commit f940b95b67420878c889b20827b63609f17a7ea1 Author: shihao <3127647737@qq.com> Date: Fri Nov 28 21:20:40 2025 +0800 init diff --git a/.claude/settings.local.json b/.claude/settings.local.json new file mode 100644 index 0000000..ab891c2 --- /dev/null +++ b/.claude/settings.local.json @@ -0,0 +1,33 @@ +{ + "permissions": { + "allow": [ + "mcp__chrome-devtools__list_pages", + "mcp__chrome-devtools__navigate_page", + "mcp__chrome-devtools__take_snapshot", + "mcp__chrome-devtools__fill", + "mcp__chrome-devtools__click", + "mcp__chrome-devtools__list_console_messages", + "mcp__chrome-devtools__list_network_requests", + "mcp__chrome-devtools__get_console_message", + "mcp__chrome-devtools__get_network_request", + "Bash(python:*)", + "mcp__chrome-devtools__fill_form", + "mcp__chrome-devtools__handle_dialog", + "Bash(grep:*)", + "mcp__sequential-thinking__sequentialthinking", + "mcp__chrome-devtools__wait_for", + "Bash(curl:*)", + "Bash(mysql:*)", + "Bash(dir:*)", + "WebSearch", + "Bash(findstr:*)", + "Bash(cmd /c \"del NUL\")", + "Bash(find:*)", + "Bash(for file in *.html)", + "Bash(do sed -i 's/ui-components.css?v=2/ui-components.css?v=3/g' \"$file\")", + "Bash(done)" + ], + "deny": [], + "ask": [] + } +} diff --git a/.env.example b/.env.example new file mode 100644 index 0000000..a9f5ff8 --- /dev/null +++ b/.env.example @@ -0,0 +1,21 @@ +# Flask配置 +FLASK_APP=app.py +FLASK_ENV=development +SECRET_KEY=your-secret-key-here + +# 数据库配置 +DB_HOST=localhost +DB_PORT=3306 +DB_USER=root +DB_PASSWORD=your-password +DB_NAME=video_parser + +# Redis配置 +REDIS_HOST=localhost +REDIS_PORT=6379 +REDIS_DB=0 +REDIS_PASSWORD= + +# 应用配置 +MAX_CONCURRENT=3 +SESSION_LIFETIME=7200 diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..9f7170c --- /dev/null +++ b/.gitignore @@ -0,0 +1,19 @@ +__pycache__/ +*.py[cod] +*$py.class +*.so +.Python +env/ +venv/ +ENV/ +.venv +.env +instance/ +.pytest_cache/ +.coverage +htmlcov/ +dist/ +build/ +*.egg-info/ +.DS_Store +*.log diff --git a/API文档.md b/API文档.md new file mode 100644 index 0000000..619fbe0 --- /dev/null +++ b/API文档.md @@ -0,0 +1,393 @@ +# 短视频解析平台 - API 文档 + +## 基础信息 + +- 基础URL: `http://your-domain.com` +- 数据格式: JSON +- 字符编码: UTF-8 + +## 认证相关 API + +### 1. 发送验证码 + +**接口地址**: `/auth/send-code` + +**请求方法**: `POST` + +**请求参数**: + +```json +{ + "email": "user@example.com", + "purpose": "register" +} +``` + +| 参数 | 类型 | 必填 | 说明 | +|------|------|------|------| +| email | string | 是 | 邮箱地址 | +| purpose | string | 是 | 用途:register/reset_password/forgot_password | + +**响应示例**: + +```json +{ + "success": true, + "message": "验证码已发送" +} +``` + +### 2. 用户注册 + +**接口地址**: `/auth/register` + +**请求方法**: `POST` + +**请求参数**: + +```json +{ + "username": "testuser", + "email": "user@example.com", + "password": "password123", + "code": "123456" +} +``` + +**响应示例**: + +```json +{ + "success": true, + "message": "注册成功" +} +``` + +### 3. 用户登录 + +**接口地址**: `/auth/login` + +**请求方法**: `POST` + +**请求参数**: + +```json +{ + "email": "user@example.com", + "password": "password123" +} +``` + +**响应示例**: + +```json +{ + "success": true, + "message": "登录成功", + "user": { + "id": 1, + "username": "testuser", + "email": "user@example.com" + } +} +``` + +### 4. 用户登出 + +**接口地址**: `/auth/logout` + +**请求方法**: `POST` + +**需要登录**: 是 + +**响应示例**: + +```json +{ + "success": true, + "message": "已退出登录" +} +``` + +### 5. 重置密码 + +**接口地址**: `/auth/reset-password` + +**请求方法**: `POST` + +**请求参数**: + +```json +{ + "email": "user@example.com", + "code": "123456", + "new_password": "newpassword123" +} +``` + +**响应示例**: + +```json +{ + "success": true, + "message": "密码重置成功" +} +``` + +## 解析相关 API + +### 1. 解析视频 + +**接口地址**: `/api/parse` + +**请求方法**: `POST` + +**请求参数**: + +```json +{ + "url": "https://v.douyin.com/xxxxx/" +} +``` + +**响应示例(立即完成)**: + +```json +{ + "success": true, + "status": "completed", + "data": { + "cover": "https://...", + "video_url": "https://...", + "title": "视频标题", + "description": "视频描述" + }, + "response_time": 1234 +} +``` + +**响应示例(排队中)**: + +```json +{ + "success": true, + "status": "queued", + "task_id": "uuid-string", + "message": "任务已加入队列,请稍候...", + "queue_status": { + "queued": 5, + "processing": 3 + } +} +``` + +### 2. 获取任务结果 + +**接口地址**: `/api/task/` + +**请求方法**: `GET` + +**响应示例**: + +```json +{ + "success": true, + "status": "completed", + "data": { + "cover": "https://...", + "video_url": "https://...", + "title": "视频标题", + "description": "视频描述" + } +} +``` + +### 3. 获取队列状态 + +**接口地址**: `/api/queue-status` + +**请求方法**: `GET` + +**响应示例**: + +```json +{ + "success": true, + "queue_status": { + "queued": 5, + "processing": 3 + } +} +``` + +## 管理员 API + +### 1. 管理员登录 + +**接口地址**: `/admin/login` + +**请求方法**: `POST` + +**请求参数**: + +```json +{ + "username": "admin", + "password": "password", + "code_2fa": "123456" +} +``` + +### 2. 获取仪表板数据 + +**接口地址**: `/admin/dashboard` + +**请求方法**: `GET` + +**需要管理员权限**: 是 + +**响应示例**: + +```json +{ + "success": true, + "data": { + "today": { + "total": 100, + "success": 95, + "fail": 5 + }, + "total_users": 50, + "total_parses": 1000, + "active_apis": 5 + } +} +``` + +### 3. 获取用户列表 + +**接口地址**: `/admin/users` + +**请求方法**: `GET` + +**查询参数**: + +| 参数 | 类型 | 必填 | 说明 | +|------|------|------|------| +| page | int | 否 | 页码,默认1 | +| per_page | int | 否 | 每页数量,默认20 | + +### 4. 更新用户信息 + +**接口地址**: `/admin/users/` + +**请求方法**: `PUT` + +**请求参数**: + +```json +{ + "group_id": 3, + "is_active": true +} +``` + +### 5. 获取解析接口列表 + +**接口地址**: `/admin/apis` + +**请求方法**: `GET` + +### 6. 创建解析接口 + +**接口地址**: `/admin/apis` + +**请求方法**: `POST` + +**请求参数**: + +```json +{ + "name": "接口名称", + "platform": "douyin", + "api_url": "https://api.example.com", + "api_key": "optional-key", + "weight": 1, + "is_enabled": true +} +``` + +### 7. 更新站点配置 + +**接口地址**: `/admin/config` + +**请求方法**: `PUT` + +**请求参数**: + +```json +{ + "site_title": "短视频解析平台", + "max_concurrent": "5" +} +``` + +### 8. 测试SMTP配置 + +**接口地址**: `/admin/smtp/test` + +**请求方法**: `POST` + +**请求参数**: + +```json +{ + "email": "test@example.com" +} +``` + +### 9. 获取解析统计 + +**接口地址**: `/admin/stats/parse` + +**请求方法**: `GET` + +**查询参数**: + +| 参数 | 类型 | 必填 | 说明 | +|------|------|------|------| +| days | int | 否 | 统计天数,默认7 | + +## 错误码说明 + +| 错误码 | 说明 | +|--------|------| +| 200 | 成功 | +| 400 | 请求参数错误 | +| 401 | 未授权/登录失败 | +| 403 | 禁止访问 | +| 404 | 资源不存在 | +| 429 | 请求过于频繁 | +| 500 | 服务器错误 | + +## 限流说明 + +- 游客:每天最多解析 5 次 +- 普通用户:每天最多解析 10 次 +- VIP用户:每天最多解析 50 次 +- SVIP用户:每天最多解析 200 次 + +管理员可在后台自定义各分组的限流策略。 + +## 支持的平台 + +- 抖音 (douyin) +- TikTok (tiktok) +- 哔哩哔哩 (bilibili) + +## 注意事项 + +1. 所有 POST 请求需要设置 `Content-Type: application/json` +2. 需要登录的接口会返回 401 错误码 +3. 超过限流次数会返回 429 错误码 +4. 解析任务可能需要排队,请使用轮询方式获取结果 diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100644 index 0000000..c1a25d2 --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1,191 @@ +# CLAUDE.md + +This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository. + +## 项目概述 + +这是一个短视频解析平台,聚合多家短视频解析接口(抖音、TikTok、哔哩哔哩),提供用户系统、限流控制、队列管理和管理后台。 + +技术栈:Python + Flask + MySQL + Redis + +## 常用命令 + +### 开发环境设置 + +```bash +# 安装依赖 +pip install -r requirements.txt + +# 配置环境变量 +cp .env.example .env +# 然后编辑 .env 文件配置数据库和Redis连接 + +# 创建数据库 +mysql -u root -p +CREATE DATABASE video_parser CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci; + +# 导入数据库表结构 +mysql -u root -p video_parser < database/schema.sql + +# 初始化管理员账号(用户名: shihao, 密码: 80012029Lz) +python init_admin.py + +# 初始化解析接口数据 +python init_data.py +``` + +### 运行应用 + +```bash +# 开发模式 +python app.py + +# 生产模式(使用Gunicorn) +gunicorn -w 4 -b 0.0.0.0:5000 app:app +``` + +### 测试和调试 + +```bash +# 测试数据库连接 +python test_db.py + +# 检查环境配置 +python check_env.py + +# 测试邮件发送功能 +python test_email.py + +# 单独运行定时任务(健康检查) +python scheduler.py +``` + +### Redis 服务 + +```bash +# Windows下启动Redis(项目包含Redis-x64-5.0.14.1) +cd Redis-x64-5.0.14.1 +redis-server.exe redis.windows.conf +``` + +## 核心架构 + +### 解析器工厂模式(parsers/factory.py) + +- `ParserFactory.create_parser(api_config)` - 根据API配置创建对应平台的解析器实例 +- `ParserFactory.get_parser_for_platform(platform)` - 获取指定平台的解析器,支持负载均衡(哔哩哔哩使用加权随机选择) +- `ParserFactory.detect_platform(video_url)` - 自动检测视频链接所属平台 + +所有解析器继承自 `BaseParser`(parsers/base.py),必须实现 `parse(video_url)` 方法,返回统一格式: +```python +{ + "cover": "封面URL", + "video_url": "视频URL", + "title": "标题", + "description": "简介" +} +``` + +**添加新平台解析器**: +1. 在 `parsers/` 目录创建新的解析器文件(如 `kuaishou.py`) +2. 继承 `BaseParser` 并实现 `parse()` 方法 +3. 在 `parsers/factory.py` 的 `create_parser()` 中添加平台映射 +4. 在数据库 `parser_apis` 表中添加新接口配置 +5. 在 `ParserFactory.detect_platform()` 中添加 URL 检测规则 + +### 队列与并发控制(utils/queue.py) + +- `ParseQueue` - 解析任务队列管理器,支持Redis和内存队列双模式 + - `add_task()` - 添加任务到队列 + - `get_task()` - 从队列获取任务 + - `complete_task()` - 完成任务并存储结果 + - `get_result()` - 获取任务结果 + - **降级机制**:Redis 连接失败时自动降级到内存队列(重启后数据丢失) + +- `ConcurrencyController` - 并发控制器 + - `can_process()` - 检查是否可以处理新任务(基于 `max_concurrent` 配置) + - `wait_for_slot()` - 等待可用槽位 + +### 健康检查与定时任务(scheduler.py) + +使用 APScheduler 定时检查各平台解析接口的健康状态: +- 根据 `health_check_config` 表配置的间隔执行检查 +- 检查结果记录到 `health_check_logs` 表 +- 更新 `parser_apis` 表的 `health_status` 和 `fail_count` + +### 路由结构 + +- `routes/main.py` - 主页路由 +- `routes/auth.py` - 用户认证(注册、登录、验证码、重置密码) +- `routes/parser.py` - 解析API(`/api/parse`, `/api/task/`, `/api/queue-status`) +- `routes/admin.py` - 管理后台(用户管理、接口管理、配置管理、统计数据) + +### 数据模型(models/__init__.py) + +核心模型: +- `User` / `UserGroup` - 用户和分组(支持不同限流策略) +- `Admin` - 管理员(支持2FA) +- `ParserAPI` - 解析接口配置(支持权重、健康状态、统计数据) +- `SMTPConfig` - SMTP配置(支持多配置负载均衡) +- `ParseLog` / `DailyParseStat` - 解析日志和统计 +- `HealthCheckConfig` / `HealthCheckLog` - 健康检查配置和日志 + +### 限流机制 + +基于用户分组的每日限流: +- 游客(group_id=1):5次/天 +- 普通用户(group_id=2):10次/天 +- VIP用户(group_id=3):50次/天 +- SVIP用户(group_id=4):200次/天 + +限流数据存储在 `daily_parse_stats` 表,按 user_id 或 ip_address 统计。 + +## 应用初始化流程(app.py) + +1. 创建Flask应用并加载配置(config.py) +2. 初始化数据库(Flask-SQLAlchemy) +3. 初始化登录管理器(Flask-Login) +4. 初始化Redis连接(支持降级到内存队列) +5. 注册蓝图(auth, parser, admin, main) +6. 启动定时任务调度器(非调试模式或主进程) + +## 环境变量配置(.env) + +必需配置: +- `SECRET_KEY` - Flask密钥 +- `DB_HOST`, `DB_PORT`, `DB_USER`, `DB_PASSWORD`, `DB_NAME` - MySQL配置 +- `REDIS_HOST`, `REDIS_PORT`, `REDIS_DB`, `REDIS_PASSWORD` - Redis配置(可选,失败时降级到内存队列) +- `MAX_CONCURRENT` - 最大并发解析数(默认3) +- `SESSION_LIFETIME` - 会话过期时间(秒,默认7200) + +**注意**: +- SMTP 配置存储在数据库 `smtp_configs` 表中,通过管理后台管理 +- 站点配置(Logo、标题、公告等)存储在 `site_configs` 表中 +- 首次运行前必须执行 `init_admin.py` 和 `init_data.py` 初始化数据 + +## 访问地址 + +- 前台首页:`http://localhost:5000` +- 管理后台:`http://localhost:5000/admin/login`(默认账号:shihao / 80012029Lz) +- API文档:参考 `API文档.md` + +## 常见问题 + +### 数据库相关 +- 修改数据库表结构后,需手动执行 SQL 更新语句(项目未使用迁移工具) +- 确保数据库字符集为 `utf8mb4`,避免 emoji 等特殊字符存储问题 + +### Redis 相关 +- Windows 环境已包含 Redis 服务端(`Redis-x64-5.0.14.1/`) +- Redis 不可用时系统会自动降级到内存队列,但重启后队列数据会丢失 +- 生产环境建议使用独立的 Redis 服务 + +### 邮件发送 +- 首次部署需在管理后台配置 SMTP(或使用 `开发文档/默认SMPT.md` 中的默认配置) +- 使用 `python test_email.py` 测试邮件发送功能 +- 支持多 SMTP 配置负载均衡,提高邮件发送成功率 + +### 解析接口 +- 哔哩哔哩平台支持 3 个接口负载均衡,通过权重分配流量 +- 健康检查失败会发送邮件告警给管理员 +- 接口配置在 `parser_apis` 表中,可通过管理后台动态启用/禁用 diff --git a/README.md b/README.md new file mode 100644 index 0000000..6712a27 --- /dev/null +++ b/README.md @@ -0,0 +1,103 @@ +# 短视频解析平台 + +一个聚合多家短视频解析接口的Web平台,支持抖音、TikTok、哔哩哔哩等平台的视频解析。 + +## 功能特性 + +- 多平台视频解析(抖音、TikTok、哔哩哔哩) +- 用户系统(注册、登录、邮箱验证) +- 用户分组与限流策略 +- 队列与并发控制 +- 管理员后台 +- SMTP邮件系统 +- 健康检查与定时任务 +- 统计与日志 + +## 技术栈 + +- 后端:Python + Flask +- 数据库:MySQL +- 缓存:Redis +- 前端:HTML + CSS + JavaScript(响应式布局) + +## 安装部署 + +### 1. 安装依赖 + +```bash +pip install -r requirements.txt +``` + +### 2. 配置环境变量 + +复制 `.env.example` 为 `.env` 并修改配置: + +```bash +cp .env.example .env +``` + +### 3. 初始化数据库 + +```bash +mysql -u root -p < database/schema.sql +``` + +### 4. 初始化管理员账号 + +运行初始化脚本创建默认管理员账号: + +```bash +python init_admin.py +``` + +默认管理员账号: +- 用户名:shihao +- 密码:80012029Lz + +### 5. 运行应用 + +```bash +python app.py +``` + +访问 http://localhost:5000 + +## 目录结构 + +``` +JieXi/ +├── app.py # 应用入口 +├── config.py # 配置文件 +├── requirements.txt # 依赖包 +├── .env # 环境变量 +├── database/ # 数据库 +│ └── schema.sql # 数据库表结构 +├── models/ # 数据模型 +│ └── __init__.py +├── routes/ # 路由 +│ ├── main.py # 主页路由 +│ ├── auth.py # 认证路由 +│ ├── parser.py # 解析路由 +│ └── admin.py # 管理员路由 +├── parsers/ # 解析器适配器 +│ ├── base.py # 基础适配器 +│ ├── douyin.py # 抖音适配器 +│ ├── tiktok.py # TikTok适配器 +│ └── bilibili.py # 哔哩哔哩适配器 +├── utils/ # 工具函数 +│ ├── email.py # 邮件发送 +│ ├── queue.py # 队列管理 +│ └── security.py # 安全相关 +├── templates/ # 模板文件 +│ ├── index.html # 首页 +│ ├── admin/ # 管理员页面 +│ └── auth/ # 认证页面 +└── static/ # 静态文件 + ├── css/ + ├── js/ + └── images/ +``` + +## 开发文档 + +详细开发文档请查看 `开发文档/` 目录。 diff --git a/Redis-x64-5.0.14.1/00-RELEASENOTES b/Redis-x64-5.0.14.1/00-RELEASENOTES new file mode 100644 index 0000000..21b4375 --- /dev/null +++ b/Redis-x64-5.0.14.1/00-RELEASENOTES @@ -0,0 +1,3630 @@ +Redis 5.0 release notes +======================= + +-------------------------------------------------------------------------------- +Upgrade urgency levels: + +LOW: No need to upgrade unless there are new features you want to use. +MODERATE: Program an upgrade of the server, but it's not urgent. +HIGH: There is a critical bug that may affect a subset of users. Upgrade! +CRITICAL: There is a critical bug affecting MOST USERS. Upgrade ASAP. +SECURITY: There are security fixes in the release. +-------------------------------------------------------------------------------- + +================================================================================ +Redis 5.0.14 Released Mon Oct 4 12:00:00 IDT 2021 +================================================================================ + +Upgrade urgency: SECURITY, contains fixes to security issues. + +Security Fixes: +* (CVE-2021-41099) Integer to heap buffer overflow handling certain string + commands and network payloads, when proto-max-bulk-len is manually configured + to a non-default, very large value [reported by yiyuaner]. +* (CVE-2021-32762) Integer to heap buffer overflow issue in redis-cli and + redis-sentinel parsing large multi-bulk replies on some older and less common + platforms [reported by Microsoft Vulnerability Research]. +* (CVE-2021-32687) Integer to heap buffer overflow with intsets, when + set-max-intset-entries is manually configured to a non-default, very large + value [reported by Pawel Wieczorkiewicz, AWS]. +* (CVE-2021-32675) Denial Of Service when processing RESP request payloads with + a large number of elements on many connections. +* (CVE-2021-32672) Random heap reading issue with Lua Debugger [reported by + Meir Shpilraien]. +* (CVE-2021-32628) Integer to heap buffer overflow handling ziplist-encoded + data types, when configuring a large, non-default value for + hash-max-ziplist-entries, hash-max-ziplist-value, zset-max-ziplist-entries + or zset-max-ziplist-value [reported by sundb]. +* (CVE-2021-32627) Integer to heap buffer overflow issue with streams, when + configuring a non-default, large value for proto-max-bulk-len and + client-query-buffer-limit [reported by sundb]. +* (CVE-2021-32626) Specially crafted Lua scripts may result with Heap buffer + overflow [reported by Meir Shpilraien]. + +================================================================================ +Redis 5.0.13 Released Wed Jul 21 16:32:19 IDT 2021 +================================================================================ + +Upgrade urgency: SECURITY, contains fixes to security issues that affect +authenticated client connections on 32-bit versions. MODERATE otherwise. + +Fix integer overflow in BITFIELD on 32-bit versions (CVE-2021-32761). +An integer overflow bug in Redis version 2.2 or newer can be exploited using the +BITFIELD command to corrupt the heap and potentially result with remote code +execution. + +Bug fixes: +* Fix overflows on 32-bit versions in GETBIT, SETBIT, BITCOUNT, BITPOS, and BITFIELD (#9191) +* Fix ziplist length updates on big-endian platforms (#2080) + +================================================================================ +Redis 5.0.12 Released Mon Mar 1 17:29:52 IST 2021 +================================================================================ + +Upgrade urgency: LOW, fixes a compilation issue. + +Bug fixes: +* Fix compilation error on non-glibc systems if jemalloc is not used (#8533) + +================================================================================ +Redis 5.0.11 Released Mon Feb 22 16:48:25 IST 2021 +================================================================================ + +Upgrade urgency: SECURITY if you use 32bit build of redis (see bellow), LOW +otherwise. + +Integer overflow on 32-bit systems (CVE-2021-21309): +Redis 4.0 or newer uses a configurable limit for the maximum supported bulk +input size. By default, it is 512MB which is a safe value for all platforms. +If the limit is significantly increased, receiving a large request from a client +may trigger several integer overflow scenarios, which would result with buffer +overflow and heap corruption. + +Bug fixes: +* Avoid 32-bit overflows when proto-max-bulk-len is set high (#8522) +* Fix an issue where a forked process deletes the parent's pidfile (#8231) +* Fix flock cluster config may cause failure to restart after kill -9 (#7674) +* Avoid an out-of-bounds read in the redis-sentinel (#7443) + +Platform and deployment-related changes: +* Fix setproctitle related crashes. (#8150, #8088) + Caused various crashes on startup, mainly on Apple M1 chips or under + instrumentation. +* Add a check for an ARM64 Linux kernel bug (#8224) + Due to the potential severity of this issue, Redis will refuse to run on + affected platforms by default. + +Modules: +* RM_ZsetRem: Delete key if empty, the bug could leave empty zset keys (#8453) + +================================================================================ +Redis 5.0.10 Released Mon Oct 26 09:21:49 IST 2020 +================================================================================ + +Upgrade urgency: SECURITY if you use an affected platform (see below). + Otherwise the upgrade urgency is MODERATE. + +This release fixes a potential heap overflow when using a heap allocator other +than jemalloc or glibc's malloc. See: +https://github.com/redis/redis/pull/7963 + +Other fixes in this release: + +* Avoid case of Lua scripts being consistently aborted due to OOM +* XPENDING will not update consumer's seen-time +* A blocked XREADGROUP didn't propagated the XSETID to replicas / AOF +* UNLINK support for streams +* RESTORE ABSTTL won't store expired keys into the DB +* Hide AUTH from MONITOR +* Cluster: reduce spurious PFAIL/FAIL states upon delayed PONG receival +* Cluster: Fix case of clusters mixing accidentally by gossip +* Cluster: Allow blocked XREAD on a cluster replica +* Cluster: Optimize memory usage CLUSTER SLOTS command +* RedisModule_ValueLength support for stream data type +* Minor fixes in redis-check-rdb and redis-cli +* Fix redis-check-rdb support for modules aux data +* Add fsync in replica when full RDB payload was received + +Full list of commits: + +Yossi Gottlieb in commit ce0d74d8f: + Fix wrong zmalloc_size() assumption. (#7963) + 1 file changed, 3 deletions(-) + +Yossi Gottlieb in commit 066699240: + Backport Lua 5.2.2 stack overflow fix. (#7733) + 1 file changed, 1 insertion(+), 1 deletion(-) + +WuYunlong in commit 8a90c7ef3: + Add fsync to readSyncBulkPayload(). (#7839) + 1 file changed, 11 insertions(+) + +Ariel Shtul in commit f0df2bb3c: + Fix redis-check-rdb support for modules aux data (#7826) + 3 files changed, 21 insertions(+), 1 deletion(-) + +hwware in commit 7add2a412: + fix memory leak in sentinel connection sharing + 1 file changed, 1 insertion(+) + +Oran Agra in commit 315e648f8: + Allow blocked XREAD on a cluster replica (#7881) + 3 files changed, 100 insertions(+), 2 deletions(-) + +guybe7 in commit 4967ee94e: + Modules: Invalidate saved_oparray after use (#7688) + 1 file changed, 2 insertions(+) + +antirez in commit 065003e8f: + Modules: remove spurious call from moduleHandleBlockedClients(). + 1 file changed, 1 deletion(-) + +Angus Pearson in commit 6cdf62928: + Fix broken interval and repeat bahaviour in redis-cli (incluing cluster mode) + 1 file changed, 11 insertions(+), 6 deletions(-) + +antirez in commit cb6a4971c: + Cluster: introduce data_received field. + 2 files changed, 27 insertions(+), 10 deletions(-) + +Madelyn Olson in commit 83f4de865: + Hide AUTH from monitor + 1 file changed, 1 insertion(+), 1 deletion(-) + +Guy Benoish in commit 3ba08d185: + Support streams in general module API functions + 3 files changed, 11 insertions(+), 1 deletion(-) + +Itamar Haber in commit 109c0635c: + Expands lazyfree's effort estimate to include Streams (#5794) + 1 file changed, 24 insertions(+) + +huangzhw in commit 235210d5b: + defrag.c activeDefragSdsListAndDict when defrag sdsele, We can't use (#7492) + 1 file changed, 1 insertion(+), 1 deletion(-) + +Oran Agra in commit fdd3162fe: + RESTORE ABSTTL skip expired keys - leak (#7511) + 1 file changed, 1 insertion(+) + +Oran Agra in commit 6139d6d18: + RESTORE ABSTTL won't store expired keys into the db (#7472) + 4 files changed, 45 insertions(+), 15 deletions(-) + +Liu Zhen in commit 0f502c58d: + fix clusters mixing accidentally by gossip + 1 file changed, 10 insertions(+), 2 deletions(-) + +Guy Benoish in commit 37fd50718: + XPENDING should not update consumer's seen-time + 4 files changed, 30 insertions(+), 18 deletions(-) + +antirez in commit a3ca53e4a: + Also use propagate() in streamPropagateGroupID(). + 1 file changed, 11 insertions(+), 1 deletion(-) + +yanhui13 in commit 7a62eb96e: + optimize the output of cluster slots + 1 file changed, 7 insertions(+), 4 deletions(-) + +srzhao in commit 0efb93d0c: + Check OOM at script start to get stable lua OOM state. + 3 files changed, 11 insertions(+), 4 deletions(-) + +================================================================================ +Redis 5.0.9 Released Thu Apr 17 12:41:00 CET 2020 +================================================================================ + +Upgrade urgency:CRITICAL if you use Streams with AOF ore replicas. + Otherwise the upgrade urgency is LOW. + +This release has a speed improvement and a critical fix: + + * FIX: XREADGROUP when fetching data in a blocking way, would not + emit the XCLAIM in the AOF file and to replicas. This means + that the last ID is not updated, and that restarting the server + will have the effect of reprocessing some entries. + * NEW: Clients blocked on the same key are now unblocked on + O(1) time. Backported from Redis 6. + +Commits: + + 1fc8ef81a Fix XCLAIM propagation in AOF/replicas for blocking XREADGROUP. + a5e24eabc Speedup: unblock clients on keys in O(1). + +================================================================================ +Redis 5.0.8 Released Thu Mar 12 16:05:41 CET 2020 +================================================================================ + +Upgrade urgency HIGH: This release fixes security issues. + +This is a list of fixes in this release: + +Salvatore Sanfilippo in commit 2bea502d: + Merge pull request #6975 from dustinmm80/add-arm-latomic-linking +Dustin Collins in commit b5931405: + Fix Pi building needing -latomic, backport + 1 file changed, 9 insertions(+) + +srzhao in commit fd441300: + fix impl of aof-child whitelist SIGUSR1 feature. + 1 file changed, 5 insertions(+), 4 deletions(-) + +Ariel in commit 77ff332b: + fix ThreadSafeContext lock/unlock function names + 1 file changed, 2 insertions(+), 2 deletions(-) + +Guy Benoish in commit 4f0f799c: + XREADGROUP should propagate XCALIM/SETID in MULTI/EXEC + 1 file changed, 2 insertions(+), 2 deletions(-) + +Oran Agra in commit 0c1273c3: + Fix client flags to be int64 in module.c + 1 file changed, 3 insertions(+), 3 deletions(-) + +Guy Benoish in commit 708a4e8a: + Fix small bugs related to replica and monitor ambiguity + 2 files changed, 8 insertions(+), 6 deletions(-) + +WuYunlong in commit eac4115d: + Fix lua related memory leak. + 1 file changed, 1 insertion(+) + +antirez in commit d075df17: + Simplify #6379 changes. + 2 files changed, 4 insertions(+), 9 deletions(-) + +WuYunlong in commit 80a49c37: + Free allocated sds in pfdebugCommand() to avoid memory leak. + 1 file changed, 1 insertion(+) + +antirez in commit 60870d3a: + Jump to right label on AOF parsing error. + 1 file changed, 6 insertions(+), 4 deletions(-) + +antirez in commit d90f599b: + Free fakeclient argv on AOF error. + 1 file changed, 11 insertions(+), 3 deletions(-) + +WuYunlong in commit 8ee3bddf: + Fix potential memory leak of rioWriteBulkStreamID(). + 1 file changed, 4 insertions(+), 1 deletion(-) + +WuYunlong in commit 4780fe78: + Fix potential memory leak of clusterLoadConfig(). + 1 file changed, 20 insertions(+), 5 deletions(-) + +Leo Murillo in commit f3b77510: + Fix bug on KEYS command where pattern starts with * followed by \x00 (null char). + 1 file changed, 1 insertion(+), 1 deletion(-) + +Guy Benoish in commit 7f3fcedb: + Blocking XREAD[GROUP] should always reply with valid data (or timeout) + 3 files changed, 44 insertions(+), 10 deletions(-) + +antirez in commit f93b2fa5: + XCLAIM: Create the consumer only on successful claims. + 1 file changed, 4 insertions(+), 2 deletions(-) + +Guy Benoish in commit 89682d96: + Stream: Handle streamID-related edge cases + 4 files changed, 54 insertions(+), 4 deletions(-) + +antirez in commit 920e108f: + Fix ip and missing mode in RM_GetClusterNodeInfo(). + 1 file changed, 5 insertions(+), 2 deletions(-) + +antirez in commit 7569b210: + Inline protocol: handle empty strings well. + 1 file changed, 2 insertions(+), 6 deletions(-) + +Khem Raj in commit 3c610b4e: + Mark extern definition of SDS_NOINIT in sds.h + 1 file changed, 1 insertion(+), 1 deletion(-) + +Seunghoon Woo in commit 16b2d07f: + [FIX] revisit CVE-2015-8080 vulnerability + 1 file changed, 6 insertions(+), 4 deletions(-) + +yz1509 in commit 19f33585: + avoid sentinel changes promoted_slave to be its own replica. + 1 file changed, 1 insertion(+), 1 deletion(-) + +================================================================================ +Redis 5.0.7 Released Tue Nov 19 17:52:44 CET 2019 +================================================================================ + +Upgrade urgency HIGH: many issues fixed, some may have an impact. + +Hi all, Redis 5.0.7 fixes a number of bugs, none is very critical, however +there are a few that may have an impact. It's a good idea to upgrade. +There are fixes in the area of replication from modules commands and +callbacks, AOF fsync (non critical issue), memory leaks (very rare and small), +streams beahvior (non critical), and a potential crash in commands +processing multiple keys at the same time that is there for years, and happens +very rarely, but is not impossible to trigger. + +The following is the list of commits in this release. + +antirez in commit 4d2a31ae: + Test: fix implementation-dependent test after code change. + 1 file changed, 2 insertions(+), 2 deletions(-) + +Oran Agra in commit 9f63fc98: + RED-31295 - redis: avoid race between dlopen and thread creation + 2 files changed, 17 insertions(+) + +antirez in commit 1a9e70c1: + Cluster: fix memory leak of cached master. + 2 files changed, 9 insertions(+), 4 deletions(-) + +Guy Benoish in commit 69b1b5be: + Fix usage of server.stream_node_max_* + 1 file changed, 3 insertions(+), 3 deletions(-) + +喜欢兰花山丘 in commit 1fd97ee7: + Update mkreleasehdr.sh + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 1a9855d7: + Remove additional space from comment. + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 32a6e3e4: + Fix stream test after addition of 0-0 ID test. + 1 file changed, 3 insertions(+), 3 deletions(-) + +Yuan Zhou in commit c9e6cda9: + aof: fix assignment for aof_fsync_offset + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit d3eeacf9: + Merge branch '5.0' of github.com:/antirez/redis into 5.0 +antirez in commit da5dc458: + Rename var to fixed_time_expire now that is more general. + 4 files changed, 7 insertions(+), 8 deletions(-) + +antirez in commit 0fefed25: + Fix patch provided in #6554. + 1 file changed, 8 insertions(+), 1 deletion(-) + +zhaozhao.zz in commit e9fbc960: + expires & blocking: handle ready keys as call() + 1 file changed, 5 insertions(+) + +Guy Benoish in commit 08ec8f71: + XADD with ID 0-0 stores an empty key + 2 files changed, 14 insertions(+) + +Loris Cro in commit 09e1fe27: + fix unreported overflow in autogerenared stream IDs + 2 files changed, 19 insertions(+), 10 deletions(-) + +Salvatore Sanfilippo in commit 09f9e4b0: + Merge pull request #6600 from oranagra/5_module_flags +Oran Agra in commit 8d8d68dd: + module documentation mismatches: loading and fork child for 5.0 branch + 3 files changed, 11 insertions(+) + +antirez in commit 7a7fbe70: + Modules: RM_GetContextFlags(): remove non Redis 5 features. + 1 file changed, 3 deletions(-) + +antirez in commit b5830486: + Modules: fix moduleCreateArgvFromUserFormat() casting bug. + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit b7a2a53a: + module: fix propagation API bug. + 1 file changed, 2 insertions(+), 2 deletions(-) + +antirez in commit 278bd6e3: + Modules: add new flags to context, replica state + more. + 2 files changed, 48 insertions(+), 1 deletion(-) + +antirez in commit 61d9a154: + Modules: RM_Call(): give pointer to documentation. + 1 file changed, 4 insertions(+), 1 deletion(-) + +antirez in commit 0e7ea0aa: + Modules: RM_Call/Replicate() ability to exclude AOF/replicas. + 1 file changed, 28 insertions(+), 9 deletions(-) + +antirez in commit 3b38164e: + Modules: RM_Replicate() in thread safe contexts. + 1 file changed, 26 insertions(+), 4 deletions(-) + +antirez in commit ef9fe9b0: + Modules: implement RM_Replicate() from async callbacks. + 2 files changed, 33 insertions(+), 2 deletions(-) + +antirez in commit 8066d2a1: + Modules: handle propagation when ctx is freed. Flag modules commands ctx. + 1 file changed, 23 insertions(+), 19 deletions(-) + +antirez in commit d3f4dec4: + Update PR #6537: use a fresh time outside call(). + 3 files changed, 27 insertions(+), 7 deletions(-) + +antirez in commit 33f42665: + Update PR #6537 patch to for generality. + 4 files changed, 40 insertions(+), 23 deletions(-) + +zhaozhao.zz in commit 68d71d83: + expires: refactoring judgment about whether a key is expired + 3 files changed, 3 insertions(+), 1 deletion(-) + +antirez in commit 62588dbf: + Modules: fix thread safe context creation crash. + 1 file changed, 1 insertion(+), 1 deletion(-) + +================================================================================ +Redis 5.0.6 Released Wed Sep 25 12:33:56 CEST 2019 +================================================================================ + +Upgrade urgency CRITICAL: Only in case of exposed instances to untrusted users. + +This Redis release, 5.0.6, is a bugfix and enhancement release. The most +important bugfix is a corruption related to the HyperLogLog. A malformed +HyperLogLog string could cause an invalid access to the memory. At a first +glance the vulnerability appears to be not exploitable but just a DoS. The +way to trigger the issue is complex, we'll not provide any information about +how to do that for the users safety. + +Other significant changes in this release: + +* New modules APIs merged from Redis unstable to Redis 5. +* Some memory optimization related to objects creation. +* Fixes to flushSlaveOutputBuffer() that make sure that SHUTDOWN will + transfer pending buffers to replicas. + +This is the full list of commits: + +antirez in commit 7a41047a: + RDB: fix MODULE_AUX loading by continuing to next opcode. + 1 file changed, 1 insertion(+) + +Oran Agra in commit 4eb3028b: + missing per-skiplist overheads in MEMORY USAGE + 1 file changed, 3 insertions(+), 1 deletion(-) + +Oran Agra in commit 5d09f9bc: + RM_Log - add support for logging without a context or context without module + 1 file changed, 6 insertions(+), 4 deletions(-) + +antirez in commit 2810de9f: + Cluster: abort loading nodes data if vars arguments are unbalanced. + 1 file changed, 1 insertion(+) + +antirez in commit f5c63ce0: + More strict checks and better comments in flushSlaveOutputBuffers(). + 1 file changed, 18 insertions(+), 7 deletions(-) + +antirez in commit 7f289c3b: + Improve comment in flushSlavesOutputBuffers(). + 1 file changed, 2 insertions(+), 1 deletion(-) + +antirez in commit 7ab62d4b: + Replication: clarify why repl_put_online_on_ack exists at all. + 2 files changed, 34 insertions(+), 10 deletions(-) + +zhaozhao.zz in commit 495dd0da: + networking: flushSlavesOutputBuffers bugfix + 1 file changed, 2 insertions(+), 4 deletions(-) + +Salvatore Sanfilippo in commit c1ccf0f1: + Merge pull request #6366 from oranagra/5.0_rm_reply_cstring +Salvatore Sanfilippo in commit a50dad73: + Merge pull request #6365 from oranagra/5.0_module_aux +Oran Agra in commit d6294d05: + RM_ReplyWithCString was missing registration + 1 file changed, 1 insertion(+) + +Oran Agra in commit 8c56fc86: + Fix to module aux data rdb format for backwards compatibility with old check-rdb + 1 file changed, 9 insertions(+), 1 deletion(-) + +Oran Agra in commit 98b1314f: + Implement module api for aux data in rdb + 9 files changed, 431 insertions(+), 18 deletions(-) + +antirez in commit 08b03e23: + redis-cli: always report server errors on read errors. + 1 file changed, 8 insertions(+), 1 deletion(-) + +wubostc in commit 239069de: + Reduce the calling stack + 1 file changed, 2 insertions(+), 3 deletions(-) + +antirez in commit 90bf6313: + Make EMBSTR case of #6261 more obvious. + 1 file changed, 1 insertion(+), 1 deletion(-) + +chendianqiang in commit 2f8a0749: + make memory usage consistent of robj with OBJ_ENCODING_INT + 1 file changed, 9 insertions(+), 4 deletions(-) + +antirez in commit 436ed56d: + HyperLogLog: fix the fix of a corruption bug. + 1 file changed, 1 insertion(+), 2 deletions(-) + +John Sully in commit 680f89fb: + Fix HLL corruption bug + 1 file changed, 1 insertion(+) + +swilly22 in commit 388efbf8: + Extend REDISMODULE_CTX_FLAGS to indicate if redis is currently loading from either RDB or AOF + 2 files changed, 5 insertions(+) + +Itamar Haber in commit 0ccbdcee: + Uses addReplyBulkCString + 1 file changed, 1 insertion(+), 1 deletion(-) + +Itamar Haber in commit 707a59c6: + Adds RedisModule_ReplyWithCString + 2 files changed, 13 insertions(+) + +================================================================================ +Redis 5.0.5 Released Wed May 15 17:57:41 CEST 2019 +================================================================================ + +Upgrade urgency CRITICAL: This release fixes an important AOF fysnc bug + and other less critical issues. + + +Dear user, + +Redis 5.0.5 fixes an important issue with AOF and adds multiple very useful +modules APIs. Moreover smaller bugs in other parts of Redis are fixed in +this release. + +The AOF bug +----------- + +The AOF bug happens when the fsync policy is set to "everysec", which is the +default: if the write load in the server drops immediately, the commands +executed in the latest second may not be fsync-ed to disk as it should. +This may lead to data loss in case the write load drops immediately and +successively a server crash happens. + +Other things in this release +---------------------------- + +* Streams: a bug in the iterator could prevent certain items to be returned in + range queries under specific conditions. +* Memleak in bitfieldCommand fixed. +* Modules API: Preserve client->id for blocked clients. +* Fix memory leak when rewriting config file in case of write errors. +* New modules API: RedisModule_GetKeyNameFromIO(). +* Fix non critical bugs in diskless replication. +* New mdouels API: command filtering. See RedisModule_RegisterCommandFilter(); +* Tests improved to be more deterministic. +* Fix a Redis Cluster bug, manual failover may abort because of the master + sending PINGs to the replicas. + +The following is the full list of commmits. + +Regards, +Salvatore + +Christian Zeller in commit 1cac9b4b: + Typo fixes in CONTRIBUTING + 1 file changed, 2 insertions(+), 2 deletions(-) + +antirez in commit f63c5c7b: + Update CONTRIBUTING with present info. + 1 file changed, 15 insertions(+), 5 deletions(-) + +antirez in commit 668661da: + Narrow the effects of PR #6029 to the exact state. + 1 file changed, 17 insertions(+), 5 deletions(-) + +chendianqiang in commit 3c2800e3: + stop ping when client pause + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 7ac7ffd5: + Test: fix slowlog test false positive. + 1 file changed, 3 insertions(+), 1 deletion(-) + +antirez in commit cc101721: + Make comment in getClientOutputBufferMemoryUsage() describing the present. + 1 file changed, 1 insertion(+), 8 deletions(-) + +WuYunlong in commit 72420b0d: + Do not active expire keys in the background when the switch is off. + 1 file changed, 6 insertions(+), 4 deletions(-) + +liaotonglang in commit 33a50d24: + delete sdsTest() from REDIS_TEST + 1 file changed, 2 deletions(-) + +zhaozhao.zz in commit 6a92836f: + test cases: skiptill -> skip-till + 1 file changed, 1 insertion(+), 1 deletion(-) + +Oran Agra in commit f179f71e: + make replication tests more stable on slow machines + 3 files changed, 34 insertions(+), 4 deletions(-) + +Yossi Gottlieb in commit 1825a4ec: + Add runtest-moduleapi with commandfilter coverage. + 5 files changed, 63 insertions(+), 28 deletions(-) + +Yossi Gottlieb in commit 9d20fdb4: + fix: missing initialization. + 3 files changed, 1 insertion(+) + +antirez in commit ded1980e: + Test: disable module testing for now. + 1 file changed, 1 deletion(-) + +Yossi Gottlieb in commit c3df78c2: + CommandFilter API: REDISMODULE_CMDFILTER_NOSELF. + 4 files changed, 62 insertions(+), 15 deletions(-) + +Yossi Gottlieb in commit 8d38ef20: + CommandFilter API: fix UnregisterCommandFilter. + 1 file changed, 2 insertions(+), 3 deletions(-) + +Yossi Gottlieb in commit 9b7009b1: + CommandFilter API: Add unregister option. + 4 files changed, 126 insertions(+), 32 deletions(-) + +Yossi Gottlieb in commit 05802442: + CommandFilter API: Extend documentation. + 1 file changed, 43 insertions(+), 5 deletions(-) + +Yossi Gottlieb in commit d5194daf: + CommandFilter API: hellofilter and tests. + 2 files changed, 47 insertions(+), 5 deletions(-) + +Yossi Gottlieb in commit 8897c154: + CommandFilter API: Support Lua and RM_call() flows. + 2 files changed, 18 insertions(+), 7 deletions(-) + +Yossi Gottlieb in commit 6dd5bad4: + CommandFilter API: More cleanup. + 2 files changed, 10 insertions(+), 29 deletions(-) + +Yossi Gottlieb in commit 83026101: + Add command filter Module API tests. + 2 files changed, 28 insertions(+) + +Yossi Gottlieb in commit dc5edc7b: + Add command filtering argument handling API. + 3 files changed, 132 insertions(+), 13 deletions(-) + +Yossi Gottlieb in commit 5f29e2e2: + Initial command filter experiment. + 6 files changed, 161 insertions(+), 2 deletions(-) + +Oran Agra in commit e1839ab3: + diskless fork kept streaming RDB to a disconnected slave + 1 file changed, 10 insertions(+) + +Oran Agra in commit 3b207b89: + diskless replication - notify slave when rdb transfer failed + 1 file changed, 1 insertion(+) + +antirez in commit 7e350b09: + More sensible name for function: restartAOFAfterSYNC(). + 1 file changed, 7 insertions(+), 3 deletions(-) + +antirez in commit 91238a60: + Mostly aesthetic changes to restartAOF(). + 1 file changed, 7 insertions(+), 3 deletions(-) + +oranagra in commit ee2da67c: + bugfix to restartAOF, exit will never happen since retry will get negative. + 1 file changed, 5 insertions(+), 4 deletions(-) + +Oran Agra in commit 78022492: + Add log when server dies of SIGTERM during loading + 1 file changed, 1 insertion(+) + +Yossi Gottlieb in commit 232dca7f: + Add RedisModule_GetKeyNameFromIO(). + 8 files changed, 30 insertions(+), 17 deletions(-) + +antirez in commit 7f98129a: + MANIFESTO: simplicity and lock-in. + 1 file changed, 5 insertions(+), 1 deletion(-) + +antirez in commit 71265fe3: + MANIFESTO v2. + 1 file changed, 41 insertions(+), 6 deletions(-) + +yongman in commit 8115be6e: + Fix uint64_t hash value in active defrag + 1 file changed, 3 insertions(+), 3 deletions(-) + +Angus Pearson in commit 90e7b5a9: + Enlarge error buffer in redis-check-aof.c to remove compiler warning of output truncation through snprintf format string + 1 file changed, 1 insertion(+), 1 deletion(-) + +zhaozhao.zz in commit 43151baf: + fix memory leak when rewrite config file + 1 file changed, 3 insertions(+), 4 deletions(-) + +唐权 in commit d3c17c9d: + Update ziplist.c + 1 file changed, 1 insertion(+), 1 deletion(-) + +stan011 in commit 296bd097: + change the comments there may have a mis type + 1 file changed, 1 insertion(+), 1 deletion(-) + +Yossi Gottlieb in commit e08c9c15: + Preserve client->id for blocked clients. + 1 file changed, 4 insertions(+), 1 deletion(-) + +zhaozhao.zz in commit c6b1252f: + aof: enhance AOF_FSYNC_EVERYSEC, more details in #5985 + 2 files changed, 32 insertions(+), 3 deletions(-) + +David Carlier in commit ce54e299: + build fix + 1 file changed, 1 insertion(+) + +yongman in commit c9274498: + Fix memleak in bitfieldCommand + 1 file changed, 8 insertions(+), 2 deletions(-) + +James Rouzier in commit 635d8d83: + Fix start and end key initialize + 1 file changed, 2 insertions(+), 2 deletions(-) + +Salvatore Sanfilippo in commit 7c23e534: + Merge pull request #6047 from abhaynahar/removed-obsolete-warning-5.0 +abhay in commit 9ea8ec42: + removed obsolete warning as per - https://github.com/antirez/redis/issues/5291 + 1 file changed, 1 insertion(+), 7 deletions(-) + +antirez in commit 1b7407fa: + Aesthetic change to #5962 to conform to Redis style. + 1 file changed, 1 insertion(+), 3 deletions(-) + +Oran Agra in commit 3bbf9747: + slave corrupts replication stream when module blocked client uses large reply (or POSTPONED_ARRAY) + 3 files changed, 15 insertions(+), 6 deletions(-) + +================================================================================ +Redis 5.0.4 Released Mon Mar 18 17:12:53 CET 2019 +================================================================================ + +Upgrade urgency HIGH: This release fixes several Redis stability issues. + +Dear Redis users, this release includes a number of fixes for bugs that may +result in Redis crashing in special conditions (not normal usage, but specific +artificial conditions), fixes to certain Redis behaviors especially around +Redis streams, and finally a set of new APIs for Redis Modules. + +Specifically: + +* Hyperloglog different coding errors leading to potential crashes were fixed. +* A replication bug leading to a potential crash in case of plain misuse of handshake commands was fixed. +* XCLAIM command incrementing of number of deliveries was fixed. +* LFU field management in objects was improved. +* A potential overflow in the redis-check-aof was fixed. +* A memory leak in case of API misuse was fixed. +* ZPOP* behavior when count is 0 is fixed. +* A few redis-cli --cluster bugs were fixed, plus a few improvements. +* Many other smaller bugs. + +We suggest to upgrade Redis, especially in case your instance is facing +untrusted users (for instance Cloud providers) because several of these +bugs could result in unwanted crashes. + +This is the list of commits: + +antirez in commit 84bdd440: + HyperLogLog: fix comment in hllCount(). + 1 file changed, 2 insertions(+), 2 deletions(-) + +antirez in commit ef1833b3: + HyperLogLog: handle wrong offset in the base case. + 1 file changed, 2 insertions(+), 6 deletions(-) + +antirez in commit 623afd5e: + HyperLogLog: speedup fuzz test. + 1 file changed, 1 insertion(+), 2 deletions(-) + +antirez in commit 12b5ff10: + HyperLogLog: enlarge reghisto variable for safety. + 1 file changed, 6 insertions(+), 1 deletion(-) + +antirez in commit 254d897e: + HyperLogLog: dense/sparse repr parsing fuzz test. + 1 file changed, 29 insertions(+) + +John Sully in commit 7f79849c: + Fix hyperloglog corruption + 1 file changed, 6 insertions(+) + +Brad Solomon in commit 3ef2c831: + Provide an uninstall target in Makefile + 1 file changed, 3 insertions(+) + +antirez in commit 57aea463: + redis-check-aof: fix potential overflow. + 1 file changed, 2 insertions(+), 2 deletions(-) + +antirez in commit ba5145b8: + Fix objectSetLRUOrLFU() when LFU underflows. + 1 file changed, 11 insertions(+), 7 deletions(-) + +antirez in commit 76c59f0e: + Fix ZPOP return type when COUNT=0. Related to #5799. + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 1c636714: + Improve comments after merging #5834. + 2 files changed, 14 insertions(+), 8 deletions(-) + +Guy Benoish in commit 6a3fca4c: + Trim SDS free space of retained module strings + 4 files changed, 28 insertions(+), 5 deletions(-) + +Guy Benoish in commit 9ec144ea: + Fix mismatching keyspace notification classes + 2 files changed, 2 insertions(+), 2 deletions(-) + +Guy Benoish in commit d04b5211: + Fix zlexrangespec mem-leak in genericZrangebylexCommand + 1 file changed, 4 insertions(+), 1 deletion(-) + +Guy Benoish in commit 516f1c77: + Use memtoll() in 'CONFIG SET client-output-buffer-limit' + 1 file changed, 2 insertions(+), 2 deletions(-) + +Guy Benoish in commit 8db67a55: + Increase string2ld's buffer size (and fix HINCRBYFLOAT) + 2 files changed, 5 insertions(+), 1 deletion(-) + +Guy Benoish in commit db3d626b: + Check server.verbosity in RM_LogRaw + 1 file changed, 2 insertions(+) + +Guy Benoish in commit 71439a07: + ZPOP should return an empty array if COUNT=0 + 1 file changed, 4 insertions(+), 1 deletion(-) + +antirez in commit c8a26834: + Modules shared API: export new core APIs. + 2 files changed, 6 insertions(+) + +antirez in commit a13ba750: + Modules shared API: also unregister the module as user. + 1 file changed, 23 insertions(+) + +antirez in commit 500e5117: + Modules shared API: prevent unloading of used modules. + 1 file changed, 10 insertions(+), 4 deletions(-) + +antirez in commit 7854daa1: + Modules shared API: unregister APIs function. + 1 file changed, 25 insertions(+) + +antirez in commit d38d82af: + Modules shared API: initial core functions. + 2 files changed, 89 insertions(+), 1 deletion(-) + +antirez in commit 4d747bb8: + Revert shared APIs to modify the design. + 3 files changed, 120 deletions(-) + +MeirShpilraien in commit 8824b509: + added module ability to register api to be used by other modules + 3 files changed, 120 insertions(+) + +zhaozhao.zz in commit 000b055b: + Streams: checkType before XGROUP CREATE + 1 file changed, 7 insertions(+), 5 deletions(-) + +antirez in commit 9b2a0d54: + Fix BZPOP arity, backport from fix in cd2743c. + 1 file changed, 2 insertions(+), 2 deletions(-) + +chendianqiang in commit 134b2582: + optimize cluster failover + 1 file changed, 1 insertion(+) + +Steve Webster in commit 1293e2a5: + Only increment delivery count if JUSTID option is omitted + 2 files changed, 18 insertions(+), 3 deletions(-) + +Steve Webster in commit 3cc4f469: + Increment delivery counter on XCLAIM unless RETRYCOUNT specified + 2 files changed, 35 insertions(+), 2 deletions(-) + +antirez in commit f4edd2b9: + Merge branch '5.0' of github.com:/antirez/redis into 5.0 +swilly22 in commit cedcc54e: + document additional flag of RM_GetContextFlags + 1 file changed, 3 insertions(+) + +swilly22 in commit 26e98da2: + Extend REDISMODULE_CTX_FLAGS to indicate if command was sent by master + 2 files changed, 6 insertions(+) + +Salvatore Sanfilippo in commit 0e910939: + Merge pull request #5879 from meierfra-ergon/redis-cli-assume-yes +antirez in commit 67452e91: + Make comment in #5911 stay inside 80 cols. + 1 file changed, 2 insertions(+), 1 deletion(-) + +John Sully in commit 30f666ef: + Replicas aren't allowed to run the replicaof command + 1 file changed, 8 insertions(+) + +Frank Meier in commit bc6c1c40: + extend use of cluster-yes option to other confimation questions + 1 file changed, 9 insertions(+) + +antirez in commit 76419d8d: + Merge branch '5.0' of github.com:/antirez/redis into 5.0 +Oran Agra in commit 72ba6069: + redis-cli add support for --memkeys, fix --bigkeys for module types + 1 file changed, 132 insertions(+), 81 deletions(-) + +chendianqiang in commit 2ca21753: + fix replicationid will not change for server.masterhost==NULL in cluster mode when restart slave + 1 file changed, 1 insertion(+), 1 deletion(-) + +Salvatore Sanfilippo in commit bd7ddd79: + Merge pull request #5870 from fengweiyuan/5.0 +varianfeng in commit d13bc143: + fix corrupt_rdb.c bug.Let the name of input rdb file name be valid. + 1 file changed, 2 insertions(+), 1 deletion(-) + +artix in commit 44c5bce0: + Cluster Manager: fix replica assigment anti-affinity (create) + 1 file changed, 6 insertions(+) + +artix in commit f066e526: + Cluster Manager: remove unused code elements + 1 file changed, 8 insertions(+), 13 deletions(-) + +Zhicheng Wei in commit 23214966: + fix clusterManagerGetAntiAffinityScore double free otypes + 1 file changed, 2 insertions(+), 1 deletion(-) + +antirez in commit 80bccd71: + Remove debugging printf from replication.tcl test. + 1 file changed, 1 deletion(-) + +================================================================================ +Redis 5.0.3 Released Tue Dec 11 18:17:26 CET 2018 +================================================================================ + +Upgrade urgency HIGH: Redis 5 is consolidating, upgrading is a good idea. + However there is nothing very critical here, but certain + issues resolved could lead to very rare crashes. + +Welcome to Redis 5.0.3, several interesting bug fixes here: + +* Redis no longer panics when you send data to a replica-mode connection that + is in MONITOR or SYNC mode. + +* Fixes to certain sorted set edge cases. You are unlikely to ever notice those + issues, but now it is more correct. + +* Certain BSD variants now are better supported: build & register logging + on crash. + +* The networking core now recovers if an IPv6 address is listed in bind but + is actually not able to work because there is no such protocol in the + system. + +* redis-cli cluster mode improved in many ways. Especially the fix subcommand + work was enhanced to cover other edge cases that were still not covered + after the work done for Redis 5. + +* MEMORY USAGE is now more accurate. + +* DEBUG DIGEST-VALUE added in case you want to make sure a given set of keys + (and not the whole DB) are excatly the same between two instances. + +* Fix a potential crash in the networking code related to recent changes + to the way the reply is consumed. + +* Reject EXEC containing write commands against an instance that changed role + from master to replica during our transaction. + +* Fix a crash in KEYS and other commands using pattern matching, in an edge + case where the pattern contains a zero byte. + +* Fix eviction during AOF loading due to maxmemory triggered by commands + executed in loading state. + +The following is the list of commmits if you want to check credits or dig +further in the details. + +commit 2c6ee0f9b3d9ca48c6da8bd18796186784216bff +Author: antirez +Date: Wed Dec 12 11:37:15 2018 +0100 + + freeMemoryIfNeeded() small refactoring. + + Related to issue #5686 and PR #5689. + +commit 107e93e75acfd5def0252efb6870751940816395 +Author: zhaozhao.zz +Date: Wed Dec 12 00:25:24 2018 +0800 + + evict: don't care about mem if loading + + When loading data, we call processEventsWhileBlocked + to process events and execute commands. + But if we are loading AOF it's dangerous, because + processCommand would call freeMemoryIfNeeded to evict, + and that will break data consistency, see issue #5686. + +antirez in commit ee93dc0b: + Crashing is too much in addReplyErrorLength(). + 1 file changed, 6 deletions(-) + +hdmg in commit c55254a5: + fix comments fault discription + 1 file changed, 1 insertion(+), 1 deletion(-) + +lsytj0413 in commit dfd25013: + fix a typo: craeted -> created + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 392a2566: + stringmatchlen() fuzz test added. + 3 files changed, 22 insertions(+) + +antirez in commit 7602f695: + Fix stringmatchlen() read past buffer bug. + 1 file changed, 1 insertion(+), 1 deletion(-) + +zhaozhao.zz in commit c4f3585e: + multi: ignore multiState's cmd_flags when loading AOF + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit d037e987: + Reject EXEC containing write commands against RO replica. + 2 files changed, 20 insertions(+) + +artix in commit e00ab324: + Cluster Manager: - Multiple owners checking in 'fix'/'check' commands is + now optional (using --cluster-search-multiple-owners). - Updated help. + 1 file changed, 14 insertions(+), 5 deletions(-) + +artix in commit 94f64de3: + Cluster Manager: FixOpenSlot now correctly updates in-memory cluster + configuration. Improved output messages. + 1 file changed, 17 insertions(+), 5 deletions(-) + +artix in commit 752d636f: + Cluster Manager: 'fix' command now handles open slots with migrating state + in one node and importing state in multiple nodes. + 1 file changed, 74 insertions(+), 6 deletions(-) + +artix in commit 552091f9: + Cluster Manager: setting new slot owner is now handled atomically in + 'fix' command. + 1 file changed, 72 insertions(+), 31 deletions(-) + +artix in commit 2280f4f7: + Cluster Manager: code cleanup. + 1 file changed, 41 insertions(+), 87 deletions(-) + +artix in commit e084b8cc: + Cluster Manager: check/fix commands now handle multiple owners even + if all slots are covered and not open. + 1 file changed, 129 insertions(+), 6 deletions(-) + +zhaozhao.zz in commit fa726e2a: + remove useless tryObjectEncoding in debug assert + 1 file changed, 1 deletion(-) + +Oran Agra in commit 40244b10: + fix #5580, display fragmentation and rss overhead bytes as signed + 2 files changed, 6 insertions(+), 6 deletions(-) + +zhaozhao.zz in commit beab3151: + networking: current_client should not be NULL when trim qb_pos + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 07ccb642: + Remove no longer relevant comment in processCommand(). + 1 file changed, 2 insertions(+), 6 deletions(-) + +antirez in commit 60fdaf07: + DEBUG DIGEST-VALUE implemented. + 1 file changed, 17 insertions(+), 3 deletions(-) + +antirez in commit 48b31b0d: + DEBUG DIGEST refactoring: extract function to digest a value. + 1 file changed, 142 insertions(+), 131 deletions(-) + +yura in commit ef3ff402: + redis-cli reshard/rebalance: ability to force replacement on existing keys + 1 file changed, 6 insertions(+), 5 deletions(-) + +Thomas Orozco in commit ee223fb8: + cli: pass auth through REDISCLI_AUTH + 1 file changed, 14 insertions(+) + +yongman in commit 41295e55: + Fix cluster call reply format readable + 1 file changed, 1 insertion(+), 1 deletion(-) + +Oran Agra in commit 0ed3970f: + fix small test suite race conditions + 3 files changed, 11 insertions(+) + +zhaozhao.zz in commit 605dddbb: + MEMORY command: make USAGE more accurate + 1 file changed, 7 insertions(+), 6 deletions(-) + +yongman in commit 1f43bf29: + Fix choose a random master node for slot assignment + 1 file changed, 29 insertions(+), 5 deletions(-) + +Weiliang Li in commit 69f0c678: + fix comment typo in util.c + 1 file changed, 1 insertion(+), 1 deletion(-) + +Chris Lamb in commit bc53a3ab: + Clarify the "Creating Server TCP listening socket" error. + 1 file changed, 1 insertion(+), 1 deletion(-) + +Chris Lamb in commit fefe5460: + Don't treat unsupported protocols as fatal errors + 1 file changed, 4 insertions(+) + +David Carlier in commit a8862972: + OpenBSD support. + 3 files changed, 74 insertions(+), 1 deletion(-) + +David Carlier in commit 5e86daf9: + Backtrace/register dump on BSD. + 3 files changed, 97 insertions(+), 3 deletions(-) + +Guy Benoish in commit 7c8cf5ac: + Don't call sdscmp() with shared.maxstring or shared.minstring + 2 files changed, 23 insertions(+), 9 deletions(-) + +Qu Chen in commit 39e9eda3: + Add unit test for stream XCLAIM command. + 1 file changed, 48 insertions(+) + +antirez in commit 62485232: + Abort instead of crashing when loading bad stream master key. + 1 file changed, 3 insertions(+) + +Madelyn Olson in commit a5487309: + Fixed a serverPanic when sending an invalid command to a monitor client + 1 file changed, 1 insertion(+), 1 deletion(-) + +================================================================================ +Redis 5.0.2 Released Thu Nov 22 11:22:37 CET 2018 +================================================================================ + +Upgrade urgency: CRITICAL if you use streams and consumer groups. + HIGH if you use redis-cli with Redis Cluster. + LOW otherwise. + +Welcome to Redis 5.0.2. This release fixes two issues with Streams consumer +groups, where items could be returned duplicated by XREADGROUP when accessing +the history, and another bug where XREADGROUP can report some history even +if the comsumer pending list is empty. Both problems were addressed and unit +tests to avoid regressions implemented. Moreover this release fixes some +issue with redis-cli when in cluster mode. Finally some FreeBSD and DragonFly +build problems are now resolved. The list of the commits is below. + +Enjoy, +Salvatore + +David Carlier in commit e8b4291a: + DragonFlyBSD little build fix + 2 files changed, 6 insertions(+), 1 deletion(-) + +yongman in commit 8fcfd374: + skip slave nodes when sending cluster setslot command + 1 file changed, 1 insertion(+) + +yongman in commit d7089ddd: + Fix pointer access and memory leak in redis-cli. + 1 file changed, 6 insertions(+), 3 deletions(-) + +antirez in commit 17b4cd83: + Test: regression test for #5570. + 1 file changed, 15 insertions(+) + +antirez in commit 45123169: + Stream: fix XREADGROUP history reading of deleted messages. + 1 file changed, 1 insertion(+), 1 deletion(-) + +David Carlier in commit 5ad588f0: + only FreeBSD change/little warning addressing + 2 files changed, 7 insertions(+), 4 deletions(-) + +David Carlier in commit 11801e1a: + tweak form feedback + 1 file changed, 1 insertion(+), 1 deletion(-) + +David Carlier in commit c1f13575: + allow flavors + 1 file changed, 1 insertion(+), 1 deletion(-) + +David Carlier in commit 275a2d49: + Fix clang build. + 1 file changed, 5 insertions(+), 1 deletion(-) + +antirez in commit 44ad5141: + Test: regression test for #5577. + 1 file changed, 24 insertions(+) + +antirez in commit c7951f43: + Streams: fix XREADGROUP history reading when CG last_id is low. + 1 file changed, 12 insertions(+), 9 deletions(-) + +antirez in commit a69bc5be: + t_stream.c comment resized to 80 cols. + 1 file changed, 2 insertions(+), 1 deletion(-) + +antirez in commit 5314099d: + Redis 5 changelog: don't expect Lua replies to be ordered. + 1 file changed, 14 insertions(+), 5 deletions(-) + +================================================================================ +Redis 5.0.1 Released Wed Nov 07 13:09:30 CET 2018 +================================================================================ + +Upgrade urgency: URGENT if you use Redis Streams. MODERATE otherwise. + +Hi all, this is the first patch level release of Redis 5. It contains +both fixes and improvements. Here there is a list of the major ones, however +read the commit messages at the end of the changelog if you want to know +more about the smaller things. Let's start with the new features: + +* Sentinel now supports authentication! Check the Sentinel official doc + for more info. + +* Redis-cli cluster "fix" is now able to fix a big number of clusters put + in a bad condition. Previously many corner cases were not covered. + +Now the critical fixes: + +1. Fix RESTORE mismatch reply when certain keys already expired. +2. Fix an XCLAIM non trivial issue: sometimes the command returned a wrong + entry or desynchronized the protocol. + +And now the other fixes: + +3. Stack trace generation on the Raspberry PI (and 32bit ARM) fixed. +4. Don't evict expired keys when the KEYS command is called, in order to + avoid a mass deletion event. However expired keys are not displayed + by KEYS as usually. +5. Improvements in the computation of the memory used, when estimating + the AOF buffers. +6. XRANGE COUNT of 0 fixed. +7. "key misses" stats accounting fixed. Many cache misses were not counted. +8. When in MULTI state, return OOM while accumulating commands and there + is no longer memory available. +9. Fix build on FreeBSD and possibly others. +10. Fix a crash in Redis modules, thread safe context reply accumulation. +11. Fix a race condition when producing the RDB file for full SYNC. +12. Disable protected mode in Sentinel. +13. More commands now have the HELP subcommand. +14. Fixed an issue about adaptive server HZ timer. +15. Fix cluster-replica-no-failover option name. + +Finally, this is the list of commits. Enjoy Redis 5.0.1! + +antirez in commit c801283f: + Fix cluster-replica-no-failover option name. + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 4c4f50e1: + MEMORY command: make strcasecmp() conditional like the following. + 1 file changed, 1 insertion(+), 2 deletions(-) + +Itamar Haber in commit a7b46e0e: + Uppercases subcommands in MEMORY HELP + 1 file changed, 5 insertions(+), 5 deletions(-) + +Itamar Haber in commit 80e129d9: + Standardizes `MEMORY HELP` subcommand + 1 file changed, 13 insertions(+), 14 deletions(-) + +valentino in commit 88805cbb: + fix short period of server.hz being uninitialized + 1 file changed, 1 insertion(+), 1 deletion(-) + +Itamar Haber in commit 6b402733: + Adds HELP to LATENCY + 1 file changed, 14 insertions(+), 2 deletions(-) + +yongman in commit 1c637de9: + fix malloc in clusterManagerComputeReshardTable + 1 file changed, 1 insertion(+), 1 deletion(-) + +artix in commit 90b52fde: + Cluster Manager: removed unused var. + 1 file changed, 1 insertion(+), 2 deletions(-) + +artix in commit 89cbb5df: + Cluster Manager: further improvements to "fix": - clusterManagerFixOpenSlot: ensure that the slot is unassigned before ADDSLOTS - clusterManagerFixSlotsCoverage: after cold migration, the slot configuration is now updated on all the nodes. + 1 file changed, 49 insertions(+), 10 deletions(-) + +artix in commit 175515c9: + Cluster Manager: fixed string parsing issue in clusterManagerGetConfigSignature + 1 file changed, 3 insertions(+), 3 deletions(-) + +artix in commit 3997dd6e: + Cluster Manager: better fix subcommand. + 1 file changed, 78 insertions(+), 20 deletions(-) + +artix in commit bd80291c: + Cluster Manager: fixed typos in comments. + 1 file changed, 3 insertions(+), 3 deletions(-) + +artix in commit 4369cbce: + Cluster Manager: fixed 'DELSLOT' subcommand typo. + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 1ed821e2: + Fix XCLAIM missing entry bug. + 1 file changed, 3 insertions(+), 2 deletions(-) + +michael-grunder in commit b49bcd01: + Use typedef'd mstime_t instead of time_t + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 09d1849e: + Improve streamReplyWithRange() top comment. + 1 file changed, 9 insertions(+), 5 deletions(-) + +antirez in commit bdf6306f: + Add support for Sentinel authentication. + 1 file changed, 17 insertions(+), 3 deletions(-) + +antirez in commit 50222af5: + Disable protected mode in Sentinel mode. + 1 file changed, 1 insertion(+) + +antirez in commit 643ee6e3: + When replica kills a pending RDB save during SYNC, log it. + 1 file changed, 6 insertions(+) + +Andrey Bugaevskiy in commit 8b609c99: + Move child termination to readSyncBulkPayload + 1 file changed, 6 insertions(+), 7 deletions(-) + +Andrey Bugaevskiy in commit 27102605: + Prevent RDB autosave from overwriting full resync results + 1 file changed, 7 insertions(+) + +antirez in commit a677923d: + asyncCloseClientOnOutputBufferLimitReached(): don't free fake clients. + 1 file changed, 1 insertion(+) + +David Carlier in commit 427e440a: + needs it for the global + 1 file changed, 1 insertion(+) + +David Carlier in commit 28f9ca4e: + Fix non Linux build. + 3 files changed, 20 insertions(+), 1 deletion(-) + +zhaozhao.zz in commit 4bf9efe2: + MULTI: OOM err if cannot free enough memory in MULTI/EXEC context + 1 file changed, 5 insertions(+), 2 deletions(-) + +antirez in commit 4fbd7a39: + Add command fingerprint comment for XSETID. + 1 file changed, 3 insertions(+), 1 deletion(-) + +Itamar Haber in commit 2480db53: + Plugs a potential underflow + 1 file changed, 1 insertion(+) + +Itamar Haber in commit e5e4d2ef: + Corrects inline documentation of syntax + 1 file changed, 1 insertion(+), 1 deletion(-) + +zhaozhao.zz in commit 713800d2: + if we read a expired key, misses++ + 1 file changed, 5 insertions(+), 1 deletion(-) + +antirez in commit e79ee263: + Fix XRANGE COUNT option for value of 0. + 1 file changed, 8 insertions(+), 2 deletions(-) + +antirez in commit 505cc70f: + Fix typo in streamReplyWithRange() top comment. + 1 file changed, 1 insertion(+), 1 deletion(-) + +Damien Tournoud in commit 3c36561d: + Overhead is the allocated size of the AOF buffer, not its length + 2 files changed, 2 insertions(+), 2 deletions(-) + +antirez in commit 3761582f: + Simplify part of the #5470 patch. + 1 file changed, 11 insertions(+), 12 deletions(-) + +zhaozhao.zz in commit edc47a3a: + do not delete expired keys in KEYS command + 1 file changed, 34 insertions(+), 27 deletions(-) + +antirez in commit 9872af6d: + Use guide comments to make changes in #5462 more obvious. + 1 file changed, 6 insertions(+) + +youjiali1995 in commit 3f399c3b: + migrate: fix mismatch of RESTORE reply when some keys have expired. + 1 file changed, 8 insertions(+), 6 deletions(-) + +hujie in commit eaaff621: + fix typo in config.c + 1 file changed, 1 insertion(+), 1 deletion(-) + +hujiecs in commit 43ebb7ee: + several typos fixed, optimize MSETNX to avoid unnecessary loop + 4 files changed, 4 insertions(+), 4 deletions(-) + +antirez in commit de8fdaac: + Remove useless complexity from MSET implementation. + 1 file changed, 5 insertions(+), 7 deletions(-) + +antirez in commit dc8f1112: + Fix again stack generation on the Raspberry Pi. + 1 file changed, 4 insertions(+) + +antirez in commit 83a6e81d: + Get rid of the word slave in the release note of Redis 5. + 1 file changed, 2 insertions(+), 2 deletions(-) + +================================================================================ +Redis 5.0.0 Released Wed Oct 17 13:28:26 CEST 2018 +================================================================================ + +Upgrade urgency CRITICAL: Several fixes to streams AOF and replication. + +Hi all and welcome to the first stable release of Redis 5! \o/ + +To start a quick recap of what's new in Redis 5: + +1. The new Stream data type. https://redis.io/topics/streams-intro +2. New Redis modules APIs: Timers, Cluster and Dictionary APIs. +3. RDB now store LFU and LRU information. +4. The cluster manager was ported from Ruby (redis-trib.rb) to C code + inside redis-cli. Check `redis-cli --cluster help` for more info. +5. New sorted set commands: ZPOPMIN/MAX and blocking variants. +6. Active defragmentation version 2. +7. Improvemenets in HyperLogLog implementations. +8. Better memory reporting capabilities. +9. Many commands with sub-commands now have an HELP subcommand. +10. Better performances when clients connect and disconnect often. +11. Many bug fixes and other random improvements. +12. Jemalloc was upgraded to version 5.1 +13. CLIENT UNBLOCK and CLIENT ID. +14. The LOLWUT command was added. http://antirez.com/news/123 +15. We no longer use the "slave" word if not for API backward compatibility. +16. Differnet optimizations in the networking layer. +17. Lua improvements: + - Better propagation of Lua scripts to replicas / AOF. + - Lua scripts can now timeout and get in -BUSY state in the replica as well. +18. Dynamic HZ to balance idle CPU usage with responsiveness. +19. The Redis core was refactored and improved in many ways. + +However the list above really does not do justice to the changes of Redis 5 +since the core was improved in many ways during the development of the new +version. However certain changes were back ported into Redis 4 once they were +sensed as safe, because many improvements were hard to distinguish from fixes. + +The most important user facing improvement is without doubts the introduction +of the new general purpose data type after years: the streams. + +Note that we worked to improve and fix streams till a few hours ago, so while +we are not aware of critical bugs in this release, surely there is to handle it +with some care for the first weeks. Bug reporting will be highly appreciated and +we are ready to work immediately to release 5.0.1 once there is enough important +stuff to justify a new release (probably soon). + +People not using the streams can have probably a better production-ready +experience with Redis 5, also because many internals are shared with Redis 4 +so the jump is not as big as it was between 3.2 and 4 in terms of how things +internally work. + +Well, many thanks to the Redis community and the developers that made +this release possible, contributing bug reports, patches, new features, working +on the clients, sometimes debugging problems for days. Also thank to everybody +that adopted Redis for their use cases making things work for users worldwide. + +The list of commits in this release follows. + +Cheers, +Salvatore + +antirez in commit bcc0916d: + Fix conditional in XGROUP. + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 1b2f23f3: + Update help.h for redis-cli. + 1 file changed, 57 insertions(+), 7 deletions(-) + +antirez in commit de0ae56c: + Tests for XGROUP CREATE MKSTREAM. + 1 file changed, 11 insertions(+) + +antirez in commit 56c3dfa1: + Fix XGROUP CREATE MKSTREAM handling of . + 1 file changed, 7 insertions(+), 2 deletions(-) + +antirez in commit 2687f228: + Process MKSTREAM option of XGROUP CREATE at a later time. + 1 file changed, 28 insertions(+), 17 deletions(-) + +zhaozhao.zz in commit cfbaf8f1: + Scripting & Streams: some commands need right flags + 1 file changed, 5 insertions(+), 5 deletions(-) + +antirez in commit 4e4099b9: + XGROUP CREATE: MKSTREAM option for automatic stream creation. + 1 file changed, 29 insertions(+), 5 deletions(-) + +zhaozhao.zz in commit 6dd4d864: + Streams: Tests modified XSTREAM -> XSETID + 1 file changed, 2 insertions(+), 2 deletions(-) + +zhaozhao.zz in commit 3aff0e8c: + Streams: rewrite empty streams with certain lastid + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 880b563e: + Tests modified to use XADD MAXLEN 0 + XSETID. + 1 file changed, 12 insertions(+), 26 deletions(-) + +antirez in commit 83c87835: + Streams: rewrite empty streams with XADD MAXLEN 0. Use XSETID. + 1 file changed, 18 insertions(+), 12 deletions(-) + +antirez in commit fd22e3ac: + XSETID: accept IDs based on last entry. + 1 file changed, 18 insertions(+), 5 deletions(-) + +antirez in commit dfab3cba: + Streams: XSTREAM SETID -> XSETID. + 3 files changed, 17 insertions(+), 67 deletions(-) + +zhaozhao.zz in commit a3fb28ed: + Streams: rewrite id in XSTREAM CREATE * + 1 file changed, 4 insertions(+) + +zhaozhao.zz in commit f4b4db13: + Streams: add tests for aof rewrite + 1 file changed, 23 insertions(+) + +zhaozhao.zz in commit d22f1ef0: + Stream & AOF: rewrite stream in correct way + 1 file changed, 32 insertions(+), 16 deletions(-) + +zhaozhao.zz in commit 6455274d: + Streams: add tests for XSTREAM command + 1 file changed, 39 insertions(+) + +zhaozhao.zz in commit 0edbe953: + Streams: add a new command XTREAM + 3 files changed, 67 insertions(+) + +Hamid Alaei in commit 9714bba2: + fix timer context selected database + 1 file changed, 3 insertions(+), 1 deletion(-) + +antirez in commit eb53f15a: + Make comment about nack->consumer test for minidle more obvious. + 1 file changed, 4 insertions(+), 2 deletions(-) + +antirez in commit a77f836e: + Streams: use propagate_last_id itself as streamPropagateGroupID trigger. + 1 file changed, 2 insertions(+), 2 deletions(-) + +antirez in commit 0f0610eb: + Streams: better naming: lastid_updated -> propagate_last_id. + 1 file changed, 6 insertions(+), 6 deletions(-) + +zhaozhao.zz in commit a745e423: + Streams: panic if streamID invalid after check, should not be possible. + 1 file changed, 2 insertions(+), 1 deletion(-) + +zhaozhao.zz in commit 9974be13: + Streams: propagate lastid in XCLAIM when it has effect + 1 file changed, 13 insertions(+), 6 deletions(-) + +zhaozhao.zz in commit 69a628d0: + Streams: XCLAIM ignore minidle if NACK is created by FORCE + 1 file changed, 4 insertions(+), 2 deletions(-) + +zhaozhao.zz in commit a04b43c7: + Streams: bugfix XCLAIM should propagate group name not consumer name + 1 file changed, 1 insertion(+), 1 deletion(-) + +Sergey Chupov in commit 8977a90c: + fixed typos in readme + 1 file changed, 2 insertions(+), 2 deletions(-) + +antirez in commit 3a745674: + redis.conf typo fixed: ingore -> ignore. + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 22770d76: + Rax: radix tree updated to latest version from antirez/rax. + 2 files changed, 233 insertions(+), 68 deletions(-) + +antirez in commit fbac534f: + Test: avoid time related false positive in RESTORE test. + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 49872337: + LOLWUT: capitalize Nees. + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 80c471f5: + Test: cgroup propagation test also for NOACK variant. + 1 file changed, 39 insertions(+), 29 deletions(-) + +antirez in commit 8defa5da: + Test: consumer group last ID slave propagation test. + 1 file changed, 39 insertions(+) + +zhaozhao.zz in commit e1e3eaca: + Avoid recreate write handler for protected client. + 1 file changed, 4 insertions(+) + +antirez in commit b501fd5d: + Fix propagation of consumer groups last ID. + 3 files changed, 56 insertions(+), 9 deletions(-) + + +================================================================================ +Redis 5.0-rc6 Released Wed Oct 10 11:03:54 CEST 2018 +================================================================================ + +Upgrade urgency HIGH: Many bugs fixed especially in the context of streams. + +This is probably the last release candidate of Redis 5. The Redis 5 GA version +will be released 17th of October. The main highlights of this release are: + +* Critical AOF bug, as old as AOF itself: if an open MULTI/EXEC block is at + the end of the AOF file, Redis would still read the half-transaction when + reloading back the AOF. +* The slave name was removed from logs and documentation, now replica is used + instead. +* LOLWUT command added. +* New modules APIs: Disable Redis Cluster redirection. +* New modules APIs: Sorted dictionaries data type. +* Modules APIs fixes: timer / cluster messages callback now can call RM_Call(). +* Fix for #5024 - commandstats for multi-exec were logged as EXEC. +* A number of optimizations and fixes for the stream data type. +* Many other stability improvements. + +This is the list of comments and contributors: + +antirez in commit 9a6fa7d0: + changelog.tcl: get optional argument for number of commits. + 1 file changed, 8 insertions(+), 3 deletions(-) + +antirez in commit 101e419f: + Free protected clients asynchronously. + 1 file changed, 7 insertions(+) + +antirez in commit 726debb8: + Actually use the protectClient() API where needed. + 2 files changed, 8 insertions(+), 9 deletions(-) + +antirez in commit 0b87f78a: + Introduce protectClient() + some refactoring. + 2 files changed, 60 insertions(+), 18 deletions(-) + +zhaozhao.zz in commit 6aa8ac70: + debug: avoid free client unexpectedly when reload & loadaof + 1 file changed, 8 insertions(+), 2 deletions(-) + +antirez in commit 48040b02: + aof.c: improve indentation and change warning message. + 1 file changed, 11 insertions(+), 4 deletions(-) + +zhaozhao.zz in commit 7cc20569: + AOF: discard if we lost EXEC when loading aof + 2 files changed, 14 insertions(+), 3 deletions(-) + +antirez in commit 2007d30c: + Refactoring of XADD / XTRIM MAXLEN rewriting. + 1 file changed, 15 insertions(+), 22 deletions(-) + +zhaozhao.zz in commit 6a298110: + Streams: add test cases for XADD/XTRIM maxlen + 1 file changed, 46 insertions(+) + +zhaozhao.zz in commit 041161b7: + Streams: propagate specified MAXLEN instead of approximated + 1 file changed, 35 insertions(+), 6 deletions(-) + +zhaozhao.zz in commit f04d799b: + Streams: reset approx_maxlen in every maxlen loop + 1 file changed, 2 insertions(+) + +zhaozhao.zz in commit affd9365: + Streams: XTRIM will return an error if MAXLEN with a count < 0 + 1 file changed, 6 insertions(+), 1 deletion(-) + +zhaozhao.zz in commit 4c405ad0: + Streams: propagate original MAXLEN argument in XADD context + 1 file changed, 3 insertions(+), 12 deletions(-) + +antirez in commit 5c6d4b4a: + Fix typo in replicationCron() comment. + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit a67a8dbf: + Fix typo in design comment of bio.c. + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit c4ab5a05: + xclaimCommand(): fix comment typos. + 1 file changed, 2 insertions(+), 2 deletions(-) + +antirez in commit dc0b628a: + streamAppendItem(): Update the radix tree pointer only if changed. + 1 file changed, 2 insertions(+), 1 deletion(-) + +antirez in commit 4566fbc7: + Listpack: optionally force reallocation on inserts. + 1 file changed, 20 insertions(+) + +antirez in commit 5eca170c: + Fix printf type mismatch in genRedisInfoString(). + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 260b53a2: + streamIteratorRemoveEntry(): set back lp only if pointer changed. + 1 file changed, 2 insertions(+), 1 deletion(-) + +zhaozhao.zz in commit 5d12f9d9: + Streams: update listpack with new pointer in XDEL + 1 file changed, 3 insertions(+) + +zhaozhao.zz in commit 6b7ad838: + bugfix: replace lastcmd with cmd when rewrite BRPOPLPUSH as RPOPLPUSH + 1 file changed, 1 insertion(+), 1 deletion(-) + +Oran Agra in commit 3454a043: + script cache memory in INFO and MEMORY includes both script code and overheads + 2 files changed, 3 insertions(+), 3 deletions(-) + +Oran Agra in commit d6aeca86: + fix #5024 - commandstats for multi-exec were logged as EXEC. + 2 files changed, 63 insertions(+), 2 deletions(-) + +antirez in commit a996b2a2: + Fix XINFO comment for consistency. + 1 file changed, 1 insertion(+), 1 deletion(-) + +Bruce Merry in commit 1a8447b6: + Fix invalid use of sdsZmallocSize on an embedded string + 1 file changed, 1 insertion(+), 1 deletion(-) + +Bruce Merry in commit 8dde46ad: + Fix incorrect memory usage accounting in zrealloc + 3 files changed, 24 insertions(+), 2 deletions(-) + +Hamid Alaei in commit b362a1b7: + fix dict get on not found + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 55e9df8a: + Try to avoid issues with GCC pragmas and older compilers. + 1 file changed, 7 insertions(+), 4 deletions(-) + +antirez in commit b0d22702: + Modules: hellodict example WIP #3: KEYRANGE. + 1 file changed, 40 insertions(+) + +antirez in commit af2f6682: + Modules: Modules: dictionary API WIP #13: Compare API exported. + 2 files changed, 6 insertions(+) + +antirez in commit f9a3e6ef: + Modules: Modules: dictionary API WIP #12: DictCompare API. + 1 file changed, 8 insertions(+) + +antirez in commit 01e0341a: + Modules: Modules: dictionary API WIP #11: DictCompareC API. + 1 file changed, 18 insertions(+) + +antirez in commit f9b3ce9a: + Modules: hellodict example WIP #1: GET command. + 1 file changed, 18 insertions(+) + +antirez in commit 36e66d86: + Modules: hellodict example WIP #1: SET command. + 1 file changed, 74 insertions(+) + +antirez in commit e33fdbe8: + Modules: remove useless defines in hellotimer.c + 2 files changed, 6 insertions(+), 4 deletions(-) + +antirez in commit 1c8b2248: + Modules: fix top comment of hellotimer.c + 1 file changed, 1 insertion(+), 1 deletion(-) + +Guy Korland in commit 7ded552d: + add missing argument to function doc + 1 file changed, 1 insertion(+), 1 deletion(-) + +Pavel Skuratovich in commit f92b3273: + Fix typo in comment + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 57b6c343: + Modules: dictionary API WIP #10: export API to modules. + 2 files changed, 60 insertions(+) + +antirez in commit 3f82e59c: + Modules: dictionary API WIP #9: iterator returning string object. + 1 file changed, 23 insertions(+), 6 deletions(-) + +antirez in commit 6a73aca3: + Modules: dictionary API WIP #8: Iterator next/prev. + 1 file changed, 42 insertions(+) + +antirez in commit ef8413db: + Modules: dictionary API WIP #7: don't store the context. + 1 file changed, 7 insertions(+), 8 deletions(-) + +antirez in commit 05579e38: + Modules: dictionary API WIP #6: implement automatic memory management. + 1 file changed, 21 insertions(+), 7 deletions(-) + +antirez in commit 11c53f8c: + Modules: dictionary API work in progress #5: rename API for consistency. + 1 file changed, 25 insertions(+), 25 deletions(-) + +antirez in commit 0bd7091b: + Modules: change RedisModuleString API to allow NULL context. + 1 file changed, 33 insertions(+), 12 deletions(-) + +antirez in commit 5fc16f17: + Modules: dictionary API work in progress #4: reseek API. + 1 file changed, 25 insertions(+), 6 deletions(-) + +antirez in commit 45b7f779: + Modules: dictionary API work in progress #3: Iterator creation. + 1 file changed, 41 insertions(+), 1 deletion(-) + +antirez in commit 8576b0ae: + Modules: dictionary API work in progress #2: Del API. + 1 file changed, 17 insertions(+), 2 deletions(-) + +antirez in commit 4b0fa7a7: + Modules: dictionary API work in progress #1. + 2 files changed, 95 insertions(+), 1 deletion(-) + +antirez in commit 28210760: + Module cluster flags: use RM_SetClusterFlags() in the example. + 2 files changed, 11 insertions(+) + +antirez in commit 18c5ab93: + Module cluster flags: add RM_SetClusterFlags() API. + 3 files changed, 33 insertions(+) + +antirez in commit 4ce6bff2: + Module cluster flags: add hooks for NO_FAILOVER flag. + 1 file changed, 4 insertions(+), 2 deletions(-) + +antirez in commit 2ba52889: + Module cluster flags: add hooks for NO_REDIRECTION flag. + 3 files changed, 14 insertions(+), 4 deletions(-) + +antirez in commit 6a39ece6: + Module cluster flags: initial vars / defines added. + 5 files changed, 20 insertions(+) + +antirez in commit 0ff35370: + Modules: rename the reused static client to something more general. + 1 file changed, 10 insertions(+), 8 deletions(-) + +antirez in commit 2d11ee95: + Modules: associate a fake client to timer context callback. + 1 file changed, 2 insertions(+) + +antirez in commit 851b2ed3: + Modules: associate a fake client to cluster message context callback. + 1 file changed, 2 insertions(+) + +artix in commit 148e4911: + Cluster Manager: clusterManagerFixOpenSlot now counts node's keys in slot if node is neither migrating nor importing. + 1 file changed, 20 insertions(+), 1 deletion(-) + +Guy Korland in commit 8afca145: + No need to return "OK" + 1 file changed, 1 insertion(+), 1 deletion(-) + +Guy Korland in commit 9a278db2: + typo fix + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 26479036: + Revert "fix repeat argument issue and reduce unnessary loop times for redis-cli." + 1 file changed, 7 insertions(+), 12 deletions(-) + +Guy Korland in commit 27b7fb5a: + Fix few typos + 1 file changed, 10 insertions(+), 10 deletions(-) + +Guy Korland in commit 233aa2d3: + RedisModule_HashSet call must end with NULL + 1 file changed, 3 insertions(+), 1 deletion(-) + +antirez in commit a8494072: + Sentinel: document how to undo a renamed command. + 1 file changed, 6 insertions(+), 1 deletion(-) + +antirez in commit 6c8a8f2e: + LOLWUT: split the command from version-specific implementations. + 3 files changed, 297 insertions(+), 241 deletions(-) + +antirez in commit 5c758406: + Slave removal: add a few forgotten aliases for CONFIG SET. + 1 file changed, 10 insertions(+) + +antirez in commit 2da823c4: + LOLWUT: add Redis version in the output. + 1 file changed, 3 insertions(+), 1 deletion(-) + +antirez in commit bfcba420: + LOLWUT: Ness -> Nees. + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit efed898a: + LOLWUT: Limit maximum CPU effort. + 1 file changed, 5 insertions(+) + +antirez in commit eb0fbd71: + LOLWUT: change padding conditional to a more direct one. + 1 file changed, 1 insertion(+), 1 deletion(-) + +Slobodan Mišković in commit ed08feb7: + Fix spelling descrive -> describe + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 2ffb4413: + LOLWUT: fix crash when col < 2. + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 55dae693: + LOLWUT: fix structure typo in comment. + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 9b3098b9: + LOLWUT: Fix license copyright year. + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 263dbadc: + LOLWUT: increase the translation factor. + 1 file changed, 2 insertions(+), 2 deletions(-) + +antirez in commit a622f6c0: + LOLWUT: change default size to fit a normal terminal better. + 1 file changed, 6 insertions(+), 6 deletions(-) + +antirez in commit 38b0d25a: + LOLWUT: wrap it into a proper command. + 4 files changed, 40 insertions(+), 15 deletions(-) + +antirez in commit 34ebd898: + LOLWUT: draw Schotter by Georg Nees. + 1 file changed, 47 insertions(+), 3 deletions(-) + +antirez in commit 46286e64: + LOLWUT: draw rotated squares using trivial trigonometry. + 1 file changed, 44 insertions(+) + +antirez in commit 2d4143fd: + LOLWUT: draw lines using Bresenham algorithm. + 1 file changed, 26 insertions(+), 2 deletions(-) + +antirez in commit 3546d9ce: + LOLWUT: Rendering of the virtual canvas to a string. + 1 file changed, 78 insertions(+), 7 deletions(-) + +antirez in commit b404a6ce: + LOLWUT: show the output verbatim in redis-cli. + 1 file changed, 1 insertion(+) + +antirez in commit e30ba94f: + LOLWUT: canvas structure and BSD license on top. + 1 file changed, 46 insertions(+) + +antirez in commit 9c771145: + LOLWUT: Emit Braille unicode according to pixel pattern. + 1 file changed, 23 insertions(+) + +Jakub Vrana in commit 4a1d6c7d: + Slave removal: capitalize Replica + 2 files changed, 5 insertions(+), 5 deletions(-) + +antirez in commit 72e0368a: + Slave removal: remove slave from integration tests descriptions. + 8 files changed, 36 insertions(+), 36 deletions(-) + +antirez in commit c7841c2b: + Slave removal: remove slave from top-level tests descriptions. + 3 files changed, 12 insertions(+), 12 deletions(-) + +antirez in commit 1b9b19ba: + Slave removal: remove slave from object.c. + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 7da266e6: + Slave removal: remove slave from the README. + 1 file changed, 7 insertions(+), 7 deletions(-) + +antirez in commit 93d803c9: + Slave removal: server.c logs fixed. + 1 file changed, 5 insertions(+), 5 deletions(-) + +antirez in commit 89434032: + Slave removal: remove slave from sentinel.conf when possible. + 1 file changed, 18 insertions(+), 18 deletions(-) + +antirez in commit 7673d88d: + Slave removal: replace very few things in Sentinel. + 1 file changed, 12 insertions(+), 8 deletions(-) + +antirez in commit f1de29b3: + Slave removal: scripting.c logs and other stuff fixed. + 1 file changed, 6 insertions(+), 2 deletions(-) + +antirez in commit 53fe558e: + Slave removal: replication.c logs fixed. + 1 file changed, 35 insertions(+), 35 deletions(-) + +antirez in commit c92b02dd: + Slave removal: networking.c logs fixed. + 1 file changed, 5 insertions(+), 5 deletions(-) + +antirez in commit be76ed0c: + Slave removal: blocked.c logs fixed. + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 3fd73151: + Slave removal: Make obvious in redis.conf what a replica is. + 1 file changed, 5 insertions(+) + +antirez in commit a22168e4: + Slave removal: slave mode -> replica mode text in redis-cli. + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 0e222fbe: + Slave removal: fix typo of replicaof. + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 34a5615e: + Slave removal: slave -> replica in redis.conf and output buffer option. + 3 files changed, 132 insertions(+), 129 deletions(-) + +antirez in commit 1d2fcf6f: + Slave removal: Convert cluster.c log messages and command names. + 1 file changed, 12 insertions(+), 11 deletions(-) + +antirez in commit 2546158d: + Slave removal: config.c converted + config rewriting hacks. + 1 file changed, 117 insertions(+), 38 deletions(-) + +antirez in commit c0952c0d: + Slave removal: redis-cli --slave -> --replica. + 1 file changed, 3 insertions(+), 1 deletion(-) + +antirez in commit 1f37f1dd: + Slave removal: SLAVEOF -> REPLICAOF. SLAVEOF is now an alias. + 3 files changed, 4 insertions(+), 3 deletions(-) + +Amin Mesbah in commit 7928f578: + Use geohash limit defines in constraint check + 1 file changed, 2 insertions(+), 2 deletions(-) + +Jeffrey Lovitz in commit bb2bed78: + CLI Help text loop verifies arg count + 1 file changed, 1 insertion(+), 1 deletion(-) + +youjiali1995 in commit 246980d0: + sentinel: fix randomized sentinelTimer. + 1 file changed, 1 insertion(+), 3 deletions(-) + +youjiali1995 in commit fa7de8c4: + bio: fix bioWaitStepOfType. + 1 file changed, 3 insertions(+), 3 deletions(-) + +Weiliang Li in commit 7642f9d5: + fix usage typo in redis-cli + 1 file changed, 1 insertion(+), 1 deletion(-) + +================================================================================ +Redis 5.0 RC5 Released Thu Sep 06 12:54:29 CEST 2018 +================================================================================ + +Upgrade urgency HIGH: Several imporant bugs fixed. + +Hi all, + +This is the release candidate number five, and has a lot of bug fixes inside, +together with a few big changes to the Redis behavior from the point of view +of replication of scripts and handling of the maxmemory directive in slaves. +Make sure to read the whole list! + +* BREAKING BEHAVIOR: Slaves now ignore maxmemory by default. +* BREAKING BEHAVIOR: Now scripts are always replicated for their effects, and + never sending the script itself to slaves/AOF. +* Improvement: Big pipelining performances improved significantly. +* Fix: Rewrite BRPOPLPUSH as RPOPLPUSH to propagate. +* Fix: False positives in tests. +* Fix: Certain command flags were modified because not correct. +* Fix: Fix blocking XREAD for streams that are empty. +* Improvement: Allow scripts to timeout on slaves as well. +* Fix: Different corner cases due to CLIENT PAUSE are now fixed. +* Improvement: Optimize parsing large bulk greater than 32k. +* Fix: Propagate read-only scripts as SCRIPT LOAD, not as EVAL. + +The following is the list of commits, so that you can read the details and +check the credits of the commits. + +antirez in commit 1d1bf7f0: + Document that effects replication is Redis 5 default. + 1 file changed, 8 insertions(+) + +antirez in commit cfd969c7: + Fix scripting tests now that we default to commands repl. + 1 file changed, 8 insertions(+), 1 deletion(-) + +antirez in commit 3e1fb5ff: + Use commands (effects) replication by default in scripts. + 3 files changed, 8 insertions(+), 1 deletion(-) + +antirez in commit c6c71abe: + Safer script stop condition on OOM. + 1 file changed, 5 insertions(+), 2 deletions(-) + +antirez in commit dfbce91a: + Propagate read-only scripts as SCRIPT LOAD. + 1 file changed, 16 insertions(+), 3 deletions(-) + +antirez in commit 1705e42e: + Don't perform eviction when re-entering the event loop. + 1 file changed, 7 insertions(+), 2 deletions(-) + +antirez in commit a0dd6f82: + Clarify why remaining may be zero in readQueryFromClient(). + 1 file changed, 2 insertions(+) + +zhaozhao.zz in commit 2eed31a5: + networking: fix unexpected negative or zero readlen + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 37fb606c: + Merge branch '5.0' of github.com:/antirez/redis into 5.0 +zhaozhao.zz in commit 1898e6ce: + networking: optimize parsing large bulk greater than 32k + 1 file changed, 13 insertions(+), 10 deletions(-) + +antirez in commit 82fc63d1: + Unblocked clients API refactoring. See #4418. + 4 files changed, 33 insertions(+), 15 deletions(-) + +zhaozhao.zz in commit 839bb52c: + if master is already unblocked, do not unblock it twice + 1 file changed, 1 insertion(+), 1 deletion(-) + +zhaozhao.zz in commit 2e1cd82d: + fix multiple unblock for clientsArePaused() + 1 file changed, 3 insertions(+), 3 deletions(-) + +antirez in commit 17233080: + Make pending buffer processing safe for CLIENT_MASTER client. + 3 files changed, 22 insertions(+), 13 deletions(-) + +antirez in commit 42bce87a: + Test: processing of master stream in slave -BUSY state. + 1 file changed, 44 insertions(+) + +antirez in commit 8bf42f60: + After slave Lua script leaves busy state, re-process the master buffer. + 2 files changed, 5 insertions(+), 2 deletions(-) + +antirez in commit c2b104c7: + While the slave is busy, just accumulate master input. + 2 files changed, 6 insertions(+), 1 deletion(-) + +antirez in commit 7b75f4ae: + Allow scripts to timeout even if from the master instance. + 1 file changed, 6 insertions(+), 11 deletions(-) + +antirez in commit adc4e031: + Allow scripts to timeout on slaves as well. + 2 files changed, 10 insertions(+), 3 deletions(-) + +dejun.xdj in commit 20ec1f0c: + Revise the comments of latency command. + 1 file changed, 2 insertions(+), 1 deletion(-) + +Chris Lamb in commit 8e5423eb: + Correct "did not received" -> "did not receive" typos/grammar. + 6 files changed, 10 insertions(+), 10 deletions(-) + +zhaozhao.zz in commit 395063d7: + remove duplicate bind in sentinel.conf + 1 file changed, 10 deletions(-) + +Salvatore Sanfilippo in commit b221ca41: + Merge pull request #5300 from SaschaRoland/xread-block-5299 +Sascha Roland in commit eea0d3c5: + #5299 Fix blocking XREAD for streams that ran dry + 1 file changed, 2 insertions(+), 2 deletions(-) + +antirez in commit 4cb9ee11: + Add maxmemory slave behavior change in the change log. + 1 file changed, 8 insertions(+) + +zhaozhao.zz in commit 5ad888ba: + Supplement to PR #4835, just take info/memory/command as random commands + 1 file changed, 3 insertions(+), 3 deletions(-) + +zhaozhao.zz in commit d928487f: + some commands' flags should be set correctly, issue #4834 + 1 file changed, 14 insertions(+), 14 deletions(-) + +Oran Agra in commit af675f0a: + Fix unstable tests on slow machines. + 3 files changed, 23 insertions(+), 17 deletions(-) + +antirez in commit f2cd16be: + Document slave-ignore-maxmemory in redis.conf. + 1 file changed, 20 insertions(+) + +antirez in commit 02d729b4: + Make slave-ignore-maxmemory configurable. + 1 file changed, 9 insertions(+) + +antirez in commit 447da44d: + Introduce repl_slave_ignore_maxmemory flag internally. + 3 files changed, 7 insertions(+) + +antirez in commit 868b2925: + Better variable meaning in processCommand(). + 1 file changed, 2 insertions(+), 2 deletions(-) + +antirez in commit 319f2ee6: + Re-apply rebased #2358. + 1 file changed, 1 insertion(+), 1 deletion(-) + +zhaozhao.zz in commit 22c166da: + block: format code + 1 file changed, 2 insertions(+), 2 deletions(-) + +zhaozhao.zz in commit c03c5913: + block: rewrite BRPOPLPUSH as RPOPLPUSH to propagate + 3 files changed, 5 insertions(+), 1 deletion(-) + +zhaozhao.zz in commit fcd5ef16: + networking: make setProtocolError simple and clear + 1 file changed, 11 insertions(+), 13 deletions(-) + +zhaozhao.zz in commit 656e4b2f: + networking: just move qb_pos instead of sdsrange in processInlineBuffer + 1 file changed, 2 insertions(+), 3 deletions(-) + +zhaozhao.zz in commit 2c7972ce: + networking: just return C_OK if multibulk processing saw a <= 0 length. + 1 file changed, 2 insertions(+), 5 deletions(-) + +zhaozhao.zz in commit 1203a04f: + adjust qbuf to 26 in test case for client list + 1 file changed, 1 insertion(+), 1 deletion(-) + +zhaozhao.zz in commit aff86fa1: + pipeline: do not sdsrange querybuf unless all commands processed + 2 files changed, 48 insertions(+), 40 deletions(-) + +Chris Lamb in commit 45a6c5be: + Use SOURCE_DATE_EPOCH over unreproducible uname + date calls. + 1 file changed, 3 insertions(+) + +Chris Lamb in commit 186df148: + Make some defaults explicit in the sentinel.conf for package maintainers + 1 file changed, 25 insertions(+) + +dejun.xdj in commit b59f04a0: + Streams: ID of xclaim command starts from the sixth argument. + 1 file changed, 1 insertion(+), 1 deletion(-) + +shenlongxing in commit a3f2437b: + Fix stream command paras + 2 files changed, 7 insertions(+), 7 deletions(-) + +antirez in commit df911235: + Fix AOF comment to report the current behavior. + 1 file changed, 3 insertions(+), 1 deletion(-) + + + +================================================================================ +Redis 5.0 RC4 Released Fri Aug 03 13:51:02 CEST 2018 +================================================================================ + +Upgrade urgency + + HIGH: Many non critical but important issues fixed. + CRITICAL for Stream users: Many important bugs fixed. + +Hi all, welcome to Redis 5.0 RC4. + +This release is a huge step forward in Redis 5 maturity and fixes a number +of issues. It also provides interesting improvements. Here I'll summarize +the biggest one, but laster you can find the full list of commits: + +Fixes: + +* A number of fixes related to Streams: stability and correctnes. +* Fix dbRandomKey() potential infinite loop. +* Improve eviction LFU/LRU when keys are created by INCR commands family. +* Active defragmentation is now working on Redis 5. +* Fix corner case in Redis CLuster / Sentinel failover, by resetting the + disconnection time with master in a more appropriate place. +* Use a private version of localtime() to avoid potential deadlocks. +* Different redis-cli non critical fixes. +* Fix rare replication stream corruption with disk-based replication. + +Improvements: + +* Sentinel: add an option to deny online script reconfiguration. +* Improved RESTORE command. +* Sentinel command renaming: allows to use Sentinel with Redis instances + that have non standard command names. +* CLIENT ID and CLIENT UNBLOCK. +* CLIENT LIST now supports a TYPE option. +* redis-cli --cluster now supports authentication. +* redis-trib is now deprecated (use redis-cli --cluster). +* Better slaves output buffers efficiency. +* Faster INFO when there are many clients connected. +* Dynamic HZ feature. +* Improvements in what the MEMORY command is able to report. +* Add year in log. (WARNING: may be incompatible with log scraping tools) +* Lazy freeing now works even when values are overwritten (for instance SET). +* Faster ZADD when elements scores are updated. +* Improvements to the test suite, including many new options. + +antirez in commit a4d1201e: + Test suite: add --loop option. + 1 file changed, 12 insertions(+), 5 deletions(-) + +antirez in commit 273d8191: + Test suite: new --stop option. + 1 file changed, 13 insertions(+), 4 deletions(-) + +antirez in commit fbbcc6a6: + Streams IDs parsing refactoring. + 1 file changed, 32 insertions(+), 17 deletions(-) + +antirez in commit 70c4bcb7: + Test: new sorted set skiplist order consistency. + 1 file changed, 26 insertions(+) + +antirez in commit 63addc5c: + Fix zslUpdateScore() edge case. + 1 file changed, 2 insertions(+), 2 deletions(-) + +antirez in commit 724740cc: + More commenting of zslUpdateScore(). + 1 file changed, 2 insertions(+) + +antirez in commit ddc87eef: + Explain what's the point of zslUpdateScore() in top comment. + 1 file changed, 5 insertions(+) + +antirez in commit 741f29ea: + Remove old commented zslUpdateScore() from source. + 1 file changed, 13 deletions(-) + +antirez in commit 20116836: + Optimize zslUpdateScore() as asked in #5179. + 1 file changed, 44 insertions(+) + +antirez in commit 8c297e8b: + zsetAdd() refactored adding zslUpdateScore(). + 1 file changed, 18 insertions(+), 7 deletions(-) + +dejun.xdj in commit bd2f3f6b: + Streams: rearrange the usage of '-' and '+' IDs in stream commands. + 1 file changed, 13 insertions(+), 13 deletions(-) + +dejun.xdj in commit c0c06b84: + Streams: add mmid_supp argument in streamParseIDOrReply(). + 1 file changed, 6 insertions(+), 2 deletions(-) + +antirez in commit ab237a8e: + Minor improvements to PR #5187. + 2 files changed, 13 insertions(+), 6 deletions(-) + +Oran Agra in commit 1ce3cf7a: + test suite conveniency improvements + 3 files changed, 79 insertions(+), 3 deletions(-) + +Oran Agra in commit 36622899: + add DEBUG LOG, to to assist test suite debugging + 1 file changed, 4 insertions(+) + +antirez in commit 83d4311a: + Cluster cron announce IP minor refactoring. + 1 file changed, 6 insertions(+), 3 deletions(-) + +shenlongxing in commit a633f8e1: + Fix cluster-announce-ip memory leak + 1 file changed, 3 insertions(+), 2 deletions(-) + +antirez in commit 24c45538: + Tranfer -> transfer typo fixed. + 1 file changed, 1 insertion(+), 1 deletion(-) + +zhaozhao.zz in commit c609f240: + refactor dbOverwrite to make lazyfree work + 4 files changed, 27 insertions(+), 12 deletions(-) + +antirez in commit 9e971739: + Refactoring: replace low-level checks with writeCommandsDeniedByDiskError(). + 2 files changed, 6 insertions(+), 13 deletions(-) + +antirez in commit 0e77cef0: + Fix writeCommandsDeniedByDiskError() inverted return value. + 1 file changed, 2 insertions(+), 2 deletions(-) + +antirez in commit acfe9d13: + Better top comment for writeCommandsDeniedByDiskError(). + 1 file changed, 8 insertions(+), 1 deletion(-) + +antirez in commit 4e933e00: + Introduce writeCommandsDeniedByDiskError(). + 2 files changed, 24 insertions(+) + +WuYunlong in commit 41607dfd: + Consider aof write error as well as rdb in lua script. + 1 file changed, 14 insertions(+), 4 deletions(-) + +Salvatore Sanfilippo in commit 1d073a64: + Merge pull request #5168 from rpv-tomsk/issue-5033 +Guy Korland in commit 2db31fd4: + Few typo fixes + 1 file changed, 13 insertions(+), 13 deletions(-) + +antirez in commit 64242757: + Add year in log. + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 50be4a1f: + Document dynamic-hz in the example redis.conf. + 1 file changed, 16 insertions(+) + +antirez in commit 9a76472d: + Make dynamic hz actually configurable. + 1 file changed, 9 insertions(+) + +antirez in commit a330d06c: + Control dynamic HZ via server configuration. + 2 files changed, 13 insertions(+), 6 deletions(-) + +antirez in commit d42602ff: + Dynamic HZ: adapt cron frequency to number of clients. + 2 files changed, 17 insertions(+), 5 deletions(-) + +antirez in commit 7b5f0223: + Dynamic HZ: separate hz from the configured hz. + 3 files changed, 15 insertions(+), 9 deletions(-) + +antirez in commit 037b00de: + Remove useless conditional from emptyDb(). + 1 file changed, 1 deletion(-) + +antirez in commit 0e97ae79: + Make emptyDb() change introduced in #4852 simpler to read. + 1 file changed, 8 insertions(+), 3 deletions(-) + +zhaozhao.zz in commit f7740faf: + optimize flushdb, avoid useless loops + 1 file changed, 5 insertions(+), 2 deletions(-) + +zhaozhao.zz in commit 0c008376: + Streams: fix xdel memory leak + 1 file changed, 1 insertion(+) + +antirez in commit dc600a25: + Example the magic +1 in migrateCommand(). + 1 file changed, 4 insertions(+) + +antirez in commit d6827ab6: + Make changes of PR #5154 hopefully simpler. + 1 file changed, 10 insertions(+), 5 deletions(-) + +WuYunlong in commit 89ec1453: + Do not migrate already expired keys. + 1 file changed, 6 insertions(+), 2 deletions(-) + +Pavel Rochnyack in commit cd25ed17: + INFO CPU: higher precision of reported values + 1 file changed, 8 insertions(+), 8 deletions(-) + +antirez in commit 6bfb4745: + Streams: refactoring of next entry seek in the iterator. + 1 file changed, 11 insertions(+), 7 deletions(-) + +zhaozhao.zz in commit 4724548e: + Streams: skip master fileds only when we are going forward in streamIteratorGetID + 1 file changed, 8 insertions(+), 5 deletions(-) + +Oran Agra in commit 4b79fdf1: + fix slave buffer test suite false positives + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit a1e081f7: + string2ll(): better commenting. + 1 file changed, 6 insertions(+) + +dsomeshwar in commit 8b4fe752: + removing redundant check + 1 file changed, 3 deletions(-) + +antirez in commit 9e5bf047: + Restore string2ll() to original version. + 1 file changed, 7 insertions(+), 2 deletions(-) + +Oran Agra in commit c2ecdcde: + fix recursion typo in zmalloc_usable + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 4f742bd6: + string2ll(): remove duplicated check for special case. + 1 file changed, 1 insertion(+), 6 deletions(-) + +antirez in commit a4efac00: + string2ll(): test for NULL pointer in all the cases. + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 2c07c107: + Change 42 to 1000 as warning level for cached scripts. + 1 file changed, 3 insertions(+), 3 deletions(-) + +Itamar Haber in commit 270903d6: + Adds Lua overheads to MEMORY STATS, smartens the MEMORY DOCTOR + 3 files changed, 30 insertions(+), 4 deletions(-) + +Itamar Haber in commit faf3dbfc: + Adds memory information about the script's cache to INFO + 3 files changed, 12 insertions(+) + +antirez in commit 49841a54: + Fix merge errors. + 2 files changed, 7 deletions(-) + +antirez in commit 77a7ec72: + Merge branch 'unstable' into 5.0 branch +antirez in commit 4ff47a0b: + Top comment clientsCron(). + 1 file changed, 19 insertions(+), 4 deletions(-) + +antirez in commit aba68552: + Clarify that clientsCronTrackExpansiveClients() indexes may jump ahead. + 1 file changed, 9 insertions(+), 1 deletion(-) + +antirez in commit be88c0b1: + Rename INFO CLIENT max buffers field names for correctness. + 1 file changed, 2 insertions(+), 2 deletions(-) + +antirez in commit 0cf3794e: + Fix wrong array index variable in getExpansiveClientsInfo(). + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit ea3a20c5: + Change INFO CLIENTS sections to report pre-computed max/min client buffers. + 1 file changed, 5 insertions(+), 5 deletions(-) + +antirez in commit 8f7e496b: + Rename var in clientsCronTrackExpansiveClients() for clarity. + 1 file changed, 3 insertions(+), 3 deletions(-) + +antirez in commit 8d617596: + Implement a function to retrieve the expansive clients mem usage. + 1 file changed, 12 insertions(+) + +antirez in commit 85a1b4f8: + clientsCronTrackExpansiveClients() actual implementation. + 1 file changed, 14 insertions(+), 1 deletion(-) + +antirez in commit d4c5fc57: + clientsCronTrackExpansiveClients() skeleton and ideas. + 1 file changed, 23 insertions(+) + +antirez in commit 1c95c075: + Make vars used only by INFO CLIENTS local to the block. + 1 file changed, 1 insertion(+), 1 deletion(-) + +Salvatore Sanfilippo in commit 16b8d364: + Merge pull request #4727 from kingpeterpaule/redis-fix-info-cli +antirez in commit 0aca977c: + Merge branch 'unstable' of github.com:/antirez/redis into unstable +antirez in commit 313b2240: + In addReplyErrorLength() only panic when replying to slave. + 1 file changed, 4 insertions(+), 3 deletions(-) + +antirez in commit 6183f059: + Refine comment in addReplyErrorLength() about replying to masters/slaves. + 1 file changed, 11 insertions(+) + +Salvatore Sanfilippo in commit 22e9321c: + Merge pull request #5138 from oranagra/improve_defrag_test +Oran Agra in commit f89c93c8: + make active defrag test more stable + 2 files changed, 6 insertions(+), 5 deletions(-) + +Salvatore Sanfilippo in commit 8213f64d: + Merge pull request #5122 from trevor211/allowWritesWhenAofDisabled +Salvatore Sanfilippo in commit 46fd9278: + Merge pull request #4237 from aspirewit/update-comment +antirez in commit 6201f7b4: + Streams: better error when $ is given with XREADGROUP. + 1 file changed, 5 insertions(+), 2 deletions(-) + +Salvatore Sanfilippo in commit 4bff45c7: + Merge pull request #5136 from 0xtonyxia/fix-xread-id-parse +antirez in commit afc7e08a: + Panic when we are sending an error to our master/slave. + 1 file changed, 5 insertions(+) + +Salvatore Sanfilippo in commit e03358c0: + Merge pull request #5135 from oranagra/rare_repl_corruption +dejun.xdj in commit 846cf12a: + Streams: remove meaningless if condition. + 1 file changed, 1 insertion(+), 1 deletion(-) + +dejun.xdj in commit 6501b6bb: + Streams: return an error message if using xreadgroup with '$' ID. + 1 file changed, 5 insertions(+) + +Oran Agra in commit d5559898: + fix rare replication stream corruption with disk-based replication + 3 files changed, 18 insertions(+), 9 deletions(-) + +antirez in commit cefe21d2: + dict.c: remove a few trailing spaces. + 1 file changed, 2 insertions(+), 2 deletions(-) + +Salvatore Sanfilippo in commit 4fc20992: + Merge pull request #5128 from kingpeterpaule/remove-one-loop-in-freeMemoryIfNeeded +Salvatore Sanfilippo in commit 9fbd49bb: + Merge pull request #5113 from 0xtonyxia/using-compare-func-instead +Salvatore Sanfilippo in commit cab39676: + Merge pull request #5127 from oranagra/sds_req_type +antirez in commit f9c84d6d: + Hopefully improve commenting of #5126. + 2 files changed, 22 insertions(+), 10 deletions(-) + +Salvatore Sanfilippo in commit e22a1218: + Merge pull request #5126 from oranagra/slave_buf_memory_2 +Salvatore Sanfilippo in commit 28dd8dd1: + Merge pull request #5132 from soloestoy/propagate-xdel-correctly +Oran Agra in commit bf680b6f: + slave buffers were wasteful and incorrectly counted causing eviction + 10 files changed, 182 insertions(+), 50 deletions(-) + +zhaozhao.zz in commit 73306c6f: + Streams: correctly propagate xdel if needed + 1 file changed, 7 insertions(+), 3 deletions(-) + +antirez in commit 103c5a1a: + Add a few comments to streamIteratorRemoveEntry(). + 1 file changed, 4 insertions(+) + +Salvatore Sanfilippo in commit a317f55d: + Merge pull request #5131 from soloestoy/optimize-xdel +antirez in commit 185e0d9c: + Modify XINFO field from last-id to last-generated-id. + 1 file changed, 1 insertion(+), 1 deletion(-) + +Salvatore Sanfilippo in commit 4215e74b: + Merge pull request #5129 from soloestoy/xinfo-show-last-id +zhaozhao.zz in commit c9324f81: + Streams: free lp if all elements are deleted + 1 file changed, 9 insertions(+), 4 deletions(-) + +paule in commit b6ce7d5d: + Update dict.c + 1 file changed, 4 insertions(+), 2 deletions(-) + +zhaozhao.zz in commit b4ba5ac8: + Streams: show last id for streams and groups + 1 file changed, 6 insertions(+), 2 deletions(-) + +peterpaule in commit 816fc6cb: + remove one ineffective loop in dictGetSomeKeys. + 1 file changed, 1 insertion(+), 1 deletion(-) + +Oran Agra in commit b05a22e2: + bugfix in sdsReqType creating 64bit sds headers on 32bit systems + 1 file changed, 3 insertions(+), 1 deletion(-) + +dejun.xdj in commit 491682a6: + Streams: using streamCompareID() instead of direct compare in block.c. + 1 file changed, 1 insertion(+), 4 deletions(-) + +dejun.xdj in commit a2177cd2: + Streams: add streamCompareID() declaration in stream.h. + 1 file changed, 1 insertion(+) + +dejun.xdj in commit 0484dbcf: + Streams: using streamCompareID() instead of direct compare. + 1 file changed, 2 insertions(+), 6 deletions(-) + +WuYunlong in commit 2d4366c5: + Accept write commands if persisting is disabled, event if we do have problems persisting on disk previously. + 1 file changed, 2 insertions(+), 1 deletion(-) + +Salvatore Sanfilippo in commit ab33bcd3: + Merge pull request #5120 from andrewsensus/leap-year-comment-patch-1 +antirez in commit 2352a519: + Test: XDEL fuzz testing. Remove and check stage. + 1 file changed, 15 insertions(+) + +antirez in commit 3d7d20b7: + Test: fix lshuffle by providing the "K" combinator. + 1 file changed, 2 insertions(+) + +antirez in commit 967ad364: + Test: add lshuffle in the Tcl utility functions set. + 1 file changed, 14 insertions(+) + +antirez in commit d6efd5fc: + Test: XDEL fuzz testing, stream creation. + 1 file changed, 20 insertions(+) + +andrewsensus in commit 8dc08ae2: + update leap year comment + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 69997153: + Merge branch 'unstable' of github.com:/antirez/redis into unstable +antirez in commit a93f8f09: + Test: XDEL basic test. + 1 file changed, 12 insertions(+) + +Salvatore Sanfilippo in commit a44a5934: + Merge pull request #5119 from trevor211/fixSlowlogConfig +WuYunlong in commit d6ba4fd5: + Fix config set slowlog-log-slower-than and condition in createLatencyReport + 2 files changed, 2 insertions(+), 2 deletions(-) + +WuYunlong in commit b3660be8: + Add test in slowlog.tcl + 1 file changed, 10 insertions(+) + +artix in commit d4182a0a: + Cluster Manager: more checks on --cluster-weight option. + 1 file changed, 12 insertions(+), 2 deletions(-) + +artix in commit d222eda9: + Redis-trib deprecated: it no longer works and it outputs a warning to the user. + 1 file changed, 103 insertions(+), 1804 deletions(-) + +artix in commit 513eb572: + Cluster Manager: auth support (-a argument). + 1 file changed, 41 insertions(+), 19 deletions(-) + +Salvatore Sanfilippo in commit f3980bb9: + Merge pull request #5115 from shenlongxing/patch-1 +Shen Longxing in commit c2a85fb3: + Delete unused role checking. + 1 file changed, 2 insertions(+), 6 deletions(-) + +Salvatore Sanfilippo in commit 4cb5bd4e: + Merge pull request #4820 from charpty/wip-serverc-simplify +antirez in commit 8d6b7861: + Add regression test for #5111. + 1 file changed, 15 insertions(+) + +antirez in commit b6260a02: + Streams: when re-delivering because of SETID, reset deliveries counter. + 1 file changed, 2 insertions(+), 2 deletions(-) + +antirez in commit a7c180e5: + Simplify duplicated NACK #5112 fix. + 1 file changed, 18 insertions(+), 21 deletions(-) + +Salvatore Sanfilippo in commit bf4def0f: + Merge pull request #5112 from 0xtonyxia/fix-xreadgroup-crash-after-setid +Salvatore Sanfilippo in commit 16770551: + Merge pull request #5114 from oranagra/defrag_32 +Oran Agra in commit 920158ec: + Active defrag fixes for 32bit builds (again) + 1 file changed, 2 insertions(+), 2 deletions(-) + +Salvatore Sanfilippo in commit f45e7901: + Merge pull request #4967 from JingchengLi/unstable +tengfeng in commit 9505dd20: + fix repeat argument issue and reduce unnessary loop times for redis-cli. + 1 file changed, 12 insertions(+), 7 deletions(-) + +antirez in commit 0420c327: + Merge branch 'unstable' of github.com:/antirez/redis into unstable +antirez in commit 28e95c7c: + Streams: fix typo "consumer". + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit a8c1bb31: + Streams: fix new XREADGROUP sync logic. + 1 file changed, 13 insertions(+), 3 deletions(-) + +antirez in commit 1a02b5f6: + Streams: make blocking for > a truly special case. + 1 file changed, 29 insertions(+), 4 deletions(-) + +antirez in commit a71e8148: + Streams: send an error to consumers blocked on non-existing group. + 1 file changed, 5 insertions(+), 1 deletion(-) + +antirez in commit 09327f11: + Streams: fix unblocking logic into a consumer group. + 1 file changed, 24 insertions(+), 14 deletions(-) + +dejun.xdj in commit 3f8a3efe: + Streams: fix xreadgroup crash after xgroup SETID is sent. + 1 file changed, 20 insertions(+), 15 deletions(-) + +Salvatore Sanfilippo in commit 7239e9ca: + Merge pull request #5095 from 0xtonyxia/fix-indentation +dejun.xdj in commit 61f12973: + Bugfix: PEL is incorrect when consumer is blocked using xreadgroup with NOACK option. + 4 files changed, 6 insertions(+), 1 deletion(-) + +antirez in commit b67f0276: + redis-cli: fix #4990 additional argument in help. + 1 file changed, 1 insertion(+) + +antirez in commit 18d65849: + redis-cli: fix #5096 double error message. + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 84620327: + redis-cli: cliConnect() flags CC_FORCE and CC_QUIET. + 1 file changed, 23 insertions(+), 13 deletions(-) + +Amit Dey in commit a3a5a25f: + fixing broken link in CONTRIBUTING + 1 file changed, 1 insertion(+), 1 deletion(-) + +dejun.xdj in commit 289d8d9c: + CLIENT UNBLOCK: fix client unblock help message. + 1 file changed, 1 insertion(+), 1 deletion(-) + +minkikim89 in commit 62a4a8c1: + fix whitespace in redis-cli.c + 1 file changed, 362 insertions(+), 362 deletions(-) + +WuYunlong in commit 0a5805d7: + fix compile warning in addReplySubcommandSyntaxError + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit cb78c842: + Use nolocks_localtime() for safer logging. + 2 files changed, 8 insertions(+), 2 deletions(-) + +antirez in commit 81778d91: + Cache timezone and daylight active flag for safer logging. + 2 files changed, 14 insertions(+), 1 deletion(-) + +antirez in commit 18d8205b: + Localtime: clarify is_leap_year() working with comments. + 1 file changed, 4 insertions(+), 4 deletions(-) + +antirez in commit 29644144: + Localtime: fix comment about leap year. + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 0ea39aa4: + Localtime: fix daylight saving adjustment. Use * not +. + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 6614d305: + Localtime: fix daylight time documentation and computation. + 1 file changed, 14 insertions(+), 4 deletions(-) + +dejun.xdj in commit 46f5a2ca: + Fix indentation. + 2 files changed, 14 insertions(+), 14 deletions(-) + +antirez in commit 243c5a7a: + Localtime: add a test main() function to check the output. + 1 file changed, 15 insertions(+), 1 deletion(-) + +antirez in commit c25ee35a: + Localtime: day of month is 1 based. Convert from 0 based "days". + 1 file changed, 2 insertions(+), 2 deletions(-) + +antirez in commit b9f33830: + Localtime: fix timezone adjustment. + 1 file changed, 2 insertions(+), 1 deletion(-) + +antirez in commit 0c12cbed: + Localtime: compute year, month and day of the month. + 1 file changed, 26 insertions(+) + +antirez in commit 06ca400f: + Localtime: basics initial calculations. Year missing. + 1 file changed, 30 insertions(+), 2 deletions(-) + +antirez in commit 6a529067: + Localtime function skeleton and file added. + 1 file changed, 44 insertions(+) + +Jack Drogon in commit 93238575: + Fix typo + 40 files changed, 99 insertions(+), 99 deletions(-) + +antirez in commit 94b3ee61: + Clarify the pending_querybuf field of clients. + 1 file changed, 4 insertions(+), 3 deletions(-) + +antirez in commit 549b8b99: + Improve style of PR #5084. + 1 file changed, 8 insertions(+), 2 deletions(-) + +Salvatore Sanfilippo in commit 526b30a7: + Merge pull request #5084 from chendq8/pending-querybuf +antirez in commit 677d10b2: + Set repl_down_since to zero on state change. + 1 file changed, 2 insertions(+), 1 deletion(-) + +Salvatore Sanfilippo in commit 02e38516: + Merge pull request #5081 from trevor211/fixClusterFailover +chendianqiang in commit cbb2ac07: + Merge branch 'unstable' into pending-querybuf +antirez in commit 2edcafb3: + addReplySubSyntaxError() renamed to addReplySubcommandSyntaxError(). + 12 files changed, 14 insertions(+), 14 deletions(-) + +Salvatore Sanfilippo in commit bc6a0045: + Merge pull request #4998 from itamarhaber/module_command_help +Salvatore Sanfilippo in commit ee09b5ed: + Merge pull request #5071 from akshaynagpal/patch-2 +Salvatore Sanfilippo in commit f03ad962: + Merge pull request #5068 from shenlongxing/fix-rename-command +Salvatore Sanfilippo in commit e4881cd0: + Merge pull request #5090 from trevor211/test_helper_tcl +WuYunlong in commit 2833cfbe: + fix tests/test_helper.tcl with --wait-server option. Issue #5063 added --wait-server option, but can not work. + 1 file changed, 1 deletion(-) + +chendianqiang in commit 7de1ada0: + limit the size of pending-querybuf in masterclient + 4 files changed, 48 insertions(+) + +WuYunlong in commit 2e167f7d: + fix server.repl_down_since resetting, so that slaves could failover automatically as expected. + 1 file changed, 1 insertion(+), 1 deletion(-) + +WuYunlong in commit aeb7bc3e: + cluster.tcl: Add master consecutively down test. + 1 file changed, 77 insertions(+) + +antirez in commit d751d98b: + Change CLIENT LIST TYPE help string. + 1 file changed, 2 insertions(+), 2 deletions(-) + +Salvatore Sanfilippo in commit a0b05a04: + Merge pull request #5075 from soloestoy/client-list-types +Salvatore Sanfilippo in commit aa2c390e: + Merge pull request #5074 from soloestoy/fix-compile-warning +Salvatore Sanfilippo in commit a4ef94d2: + Merge pull request #5076 from 0xtonyxia/add-no-auth-warning-option +dejun.xdj in commit 9f185626: + Check if password is used on command line interface. + 1 file changed, 1 insertion(+), 1 deletion(-) + +dejun.xdj in commit 1139070a: + Fix trailing white space. + 1 file changed, 1 insertion(+), 1 deletion(-) + +dejun.xdj in commit bbd0ca95: + Fix code format issue. + 1 file changed, 4 insertions(+), 4 deletions(-) + +dejun.xdj in commit 7becf54e: + Don't output password warning message when --no-auth-warning is used. + 1 file changed, 10 insertions(+), 1 deletion(-) + +dejun.xdj in commit bde05e9c: + Avoid -Woverlength-strings compile warning. + 1 file changed, 5 insertions(+), 3 deletions(-) + +antirez in commit 5baf50d8: + Rax library updated (node callback). + 2 files changed, 19 insertions(+), 5 deletions(-) + +dejun.xdj in commit 0b74fd67: + Add --no-auth-warning help message. + 1 file changed, 2 insertions(+) + +zhaozhao.zz in commit b9cbd04b: + clients: add type option for client list + 4 files changed, 20 insertions(+), 6 deletions(-) + +zhaozhao.zz in commit f5538642: + clients: show pubsub flag in client list + 1 file changed, 1 insertion(+) + +zhaozhao.zz in commit 1fcf2737: + fix some compile warnings + 2 files changed, 2 insertions(+), 2 deletions(-) + +Akshay Nagpal in commit 007e3cbd: + Added link to Google Group + 1 file changed, 3 insertions(+), 1 deletion(-) + +antirez in commit ab55f9da: + Make CLIENT HELP output nicer to the eyes. + 1 file changed, 11 insertions(+), 11 deletions(-) + +antirez in commit 75f1a7bd: + Merge branch 'unstable' of github.com:/antirez/redis into unstable +antirez in commit 4a70ff74: + Add unblock in CLIENT HELP. + 1 file changed, 1 insertion(+) + +shenlongxing in commit 3c27db1c: + fix empty string for sentinel rename-command + 1 file changed, 5 insertions(+) + +Salvatore Sanfilippo in commit f7b21bc7: + Merge pull request #5066 from oranagra/defrag_jemalloc5_fix +Salvatore Sanfilippo in commit 730a4cfa: + Merge pull request #5067 from mpaltun/mpaltun-doc-fix +antirez in commit 2214043b: + CLIENT UNBLOCK: support unblocking by error. + 1 file changed, 22 insertions(+), 3 deletions(-) + +Mustafa Paltun in commit 010dc172: + Update t_stream.c + 1 file changed, 2 insertions(+), 2 deletions(-) + +Mustafa Paltun in commit 6d0acb33: + Update help.h + 1 file changed, 1 insertion(+), 1 deletion(-) + +Oran Agra in commit de495ee7: + minor fix in creating a stream NACK for rdb and defrag tests + 2 files changed, 2 insertions(+), 2 deletions(-) + +antirez in commit 71295ee3: + CLIENT UNBLOCK implemented. + 1 file changed, 22 insertions(+) + +antirez in commit fb39bfd7: + Take clients in a ID -> Client handle dictionary. + 3 files changed, 6 insertions(+) + +antirez in commit ed65d734: + CLIENT ID implemented. + 1 file changed, 4 insertions(+) + +Salvatore Sanfilippo in commit 345b4809: + Merge pull request #5063 from oranagra/test_suite_improvements +Salvatore Sanfilippo in commit 35c5f3fa: + Merge pull request #5065 from oranagra/defrag_jemalloc5 +Oran Agra in commit 5616d4c6: + add active defrag support for streams + 6 files changed, 230 insertions(+), 25 deletions(-) + +Oran Agra in commit e8099cab: + add defrag hint support into jemalloc 5 + 3 files changed, 43 insertions(+) + +Oran Agra in commit 751eea24: + test suite infra improvements and fix + 2 files changed, 19 insertions(+) + +Salvatore Sanfilippo in commit bb666d44: + Merge pull request #5027 from tigertv/unstable +antirez in commit b9058c73: + Merge branch 'unstable' of github.com:/antirez/redis into unstable +antirez in commit 43831779: + Sentinel: test command renaming feature. + 1 file changed, 10 insertions(+) + +Salvatore Sanfilippo in commit eb052ba9: + Merge pull request #5059 from guybe7/fix_restore_warning +antirez in commit 27178a3f: + Fix type of argslen in sendSynchronousCommand(). + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 1f1e724f: + Remove black space. + 1 file changed, 1 insertion(+), 1 deletion(-) + +Salvatore Sanfilippo in commit aa5eaad4: + Merge pull request #5037 from madolson/repl-auth-fix +antirez in commit 3cf8dd2c: + Sentinel: fix SENTINEL SET error reporting. + 1 file changed, 18 insertions(+), 9 deletions(-) + +Madelyn Olson in commit 45731edc: + Addressed comments + 1 file changed, 1 insertion(+), 1 deletion(-) + +Madelyn Olson in commit e8d68b6b: + Fixed replication authentication with whitespace in password + 1 file changed, 12 insertions(+), 5 deletions(-) + +antirez in commit fc0c9c80: + Sentinel: drop the renamed-command entry in a more natural way. + 1 file changed, 4 insertions(+), 7 deletions(-) + +antirez in commit 8ba670f5: + Sentinel command renaming: document it into sentinel.conf. + 1 file changed, 19 insertions(+) + +antirez in commit a8a76bda: + Merge branch 'unstable' of github.com:/antirez/redis into unstable +antirez in commit 2358de68: + Sentinel command renaming: use case sensitive hashing for the dict. + 1 file changed, 2 insertions(+), 1 deletion(-) + +antirez in commit a9c50088: + Sentinel command renaming: fix CONFIG SET event logging. + 1 file changed, 17 insertions(+), 1 deletion(-) + +antirez in commit b72cecd7: + Sentinel command renaming: fix CONFIG SET after refactoring. + 1 file changed, 5 insertions(+), 5 deletions(-) + +antirez in commit 91a384a5: + Sentinel command renaming: implement SENTINEL SET. + 1 file changed, 19 insertions(+) + +antirez in commit 903582dd: + Sentinel: make SENTINEL SET able to handle different arities. + 1 file changed, 19 insertions(+), 12 deletions(-) + +antirez in commit c303e768: + Sentinel command renaming: config rewriting. + 1 file changed, 12 insertions(+) + +antirez in commit 60df7dbe: + Sentinel command renaming: rename-command option parsing. + 1 file changed, 11 insertions(+) + +antirez in commit 72e8a33b: + Sentinel command renaming: base machanism implemented. + 1 file changed, 64 insertions(+), 15 deletions(-) + +Guy Benoish in commit dfcc20f4: + Fix compiler warning in restoreCommand + 1 file changed, 1 insertion(+), 1 deletion(-) + +Salvatore Sanfilippo in commit cf7fcdbe: + Merge pull request #4634 from soloestoy/special-auth +Salvatore Sanfilippo in commit 70b7fa2c: + Merge pull request #5049 from youjiali1995/fix-load-rdb +Salvatore Sanfilippo in commit 54d66d39: + Merge pull request #5053 from michael-grunder/zpopminmax-keypos +Salvatore Sanfilippo in commit 199e704a: + Merge pull request #5050 from shenlongxing/fix-typo +michael-grunder in commit db6b99f9: + Update ZPOPMIN/ZPOPMAX command declaration + 1 file changed, 2 insertions(+), 2 deletions(-) + +Salvatore Sanfilippo in commit a16aa03a: + Merge pull request #5051 from oranagra/streams_mem_estimate +Oran Agra in commit 20e10dc7: + fix streams memory estimation, missing raxSeek + 1 file changed, 2 insertions(+), 1 deletion(-) + +shenlongxing in commit ec55df11: + fix typo + 4 files changed, 4 insertions(+), 4 deletions(-) + +youjiali1995 in commit df6644fe: + Fix rdbLoadIntegerObject() to create shared objects when needed. + 1 file changed, 1 insertion(+), 1 deletion(-) + +Salvatore Sanfilippo in commit 1527bcad: + Merge pull request #5036 from bepahol/unstable +Salvatore Sanfilippo in commit c1e82405: + Merge pull request #5039 from oranagra/rdb_dbsize_hint +Salvatore Sanfilippo in commit 79f55eed: + Merge pull request #5040 from oranagra/memrev64ifbe_fix +Salvatore Sanfilippo in commit c6f4118c: + Merge pull request #5045 from guybe7/restore_fix +Guy Benoish in commit b5197f1f: + Enhance RESTORE with RDBv9 new features + 5 files changed, 100 insertions(+), 22 deletions(-) + +Salvatore Sanfilippo in commit c6fdebf5: + Merge pull request #5042 from oranagra/malloc_usable_size_libc +Oran Agra in commit 482785ac: + add malloc_usable_size for libc malloc + 2 files changed, 8 insertions(+), 3 deletions(-) + +Salvatore Sanfilippo in commit 4da29630: + Merge pull request #5023 from FX-HAO/unstable +antirez in commit e7219025: + Test RDB stream encoding saving/loading. + 1 file changed, 17 insertions(+) + +Salvatore Sanfilippo in commit 5f5e1199: + Merge pull request #5041 from oranagra/redis-rdb-check_rdbLoadMillisecondTime +antirez in commit 4848fbec: + Modules: convert hash to hash table for big objects. + 1 file changed, 3 insertions(+) + +Oran Agra in commit f31b0405: + fix redis-rdb-check to provide proper arguments to rdbLoadMillisecondTime + 2 files changed, 3 insertions(+), 2 deletions(-) + +antirez in commit 333c98c4: + AOF: remove no longer used variable "now". + 1 file changed, 1 deletion(-) + +antirez in commit e94b2053: + Modify clusterRedirectClient() to handle ZPOP and XREAD. + 1 file changed, 5 insertions(+), 1 deletion(-) + +Oran Agra in commit 26229aa6: + use safe macro (non empty) in memrev64ifbe to eliminate empty if warning + 1 file changed, 3 insertions(+), 3 deletions(-) + +Oran Agra in commit 5cd3c952: + 64 bit RDB_OPCODE_RESIZEDB in rdb saving + 1 file changed, 3 insertions(+), 7 deletions(-) + +antirez in commit ba92b517: + Remove AOF optimization to skip expired keys. + 1 file changed, 3 deletions(-) + +Benjamin Holst in commit 36524060: + Update README.md + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 44571088: + Merge branch 'unstable' of github.com:/antirez/redis into unstable +antirez in commit 6967d0bd: + Revert fix #4976 just leaving the flush() part. + 1 file changed, 1 insertion(+), 5 deletions(-) + +antirez in commit 0ed0dc3c: + Fix incrDecrCommand() to create shared objects when needed. + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit bd92389c: + Refactor createObjectFromLongLong() to be suitable for value objects. + 2 files changed, 33 insertions(+), 2 deletions(-) + +Salvatore Sanfilippo in commit 3518bb66: + Merge pull request #5020 from shenlongxing/fix-config +antirez in commit 20766608: + Streams: fix xreadGetKeys() for correctness. + 1 file changed, 19 insertions(+), 5 deletions(-) + +Salvatore Sanfilippo in commit e670ccff: + Merge pull request #4857 from youjiali1995/fix-command-getkeys +antirez in commit a0b27dae: + Streams: fix xreadGetKeys() buffer overflow. + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 62f9ac6f: + Streams: Change XADD MAXLEN handling of values <= 0. + 1 file changed, 3 insertions(+), 3 deletions(-) + +Max Vetrov in commit d4c4f20a: + Update sort.c + 1 file changed, 1 insertion(+), 3 deletions(-) + +antirez in commit 79a1c19a: + XADD MAXLEN should return an error for values < 0. + 1 file changed, 5 insertions(+) + +Salvatore Sanfilippo in commit 2e0ab4a8: + Merge pull request #4976 from trevor211/fixDebugLoadaof +Salvatore Sanfilippo in commit 94658303: + Merge pull request #4758 from soloestoy/rdb-save-incremental-fsync +antirez in commit 6a66b93b: + Sentinel: add an option to deny online script reconfiguration. + 2 files changed, 41 insertions(+) + +antirez in commit d353023a: + Merge branch 'unstable' of github.com:/antirez/redis into unstable +antirez in commit d6e8fe77: + Fix infinite loop in dbRandomKey(). + 1 file changed, 13 insertions(+) + +Salvatore Sanfilippo in commit 40d5df65: + Merge pull request #5008 from zwkno1/unstable +Salvatore Sanfilippo in commit 8bc3ffcb: + Merge pull request #5021 from soloestoy/fix-exists +Salvatore Sanfilippo in commit 6c7847a1: + Merge pull request #5018 from soloestoy/optimize-reply +antirez in commit 1e92fde3: + Fix SCAN bug regression test, avoiding empty SREM call. + 1 file changed, 1 insertion(+), 1 deletion(-) + +Fuxin Hao in commit a4f658b2: + Fix update_zmalloc_stat_alloc in zrealloc + 1 file changed, 1 insertion(+), 1 deletion(-) + +================================================================================ +Redis 5.0 RC3 Released Wed Jun 14 9:51:44 CEST 2018 +================================================================================ + +Upgrade urgency LOW: + +This release fixes compilation of Redis RC2. For an error the commit from unstable +updating the Rax library was not merged into the 5.0 branch. + +================================================================================ +Redis 5.0 RC2 Released Wed Jun 13 12:49:13 CEST 2018 +================================================================================ + +Upgrade urgency CRITICAL: This release fixes important security issues. + HIGH: This release fixes a SCAN commands family bug. + MODERATE: This release fixes a PSYNC2 edge case with expires. + MODERATE: Sentinel related fixes. + LOW: All the other issues + +NOTE: This release breaks API compatibility with certain APIs that were +introduced in Redis 5.0 RC1. Notably ZPOP* now returns score/element in reverse +order. XINFO special form was removed, now XINFO STREAM must be +used to obtain general information about the stream. + +Redis 5.0 RC2 ixes a number of important issues: + +* Important security issues related to the Lua scripting engine. + Please check https://github.com/antirez/redis/issues/5017 + for more information. + +* A bug with SCAN, SSCAN, HSCAN and ZSCAN, that may not return all the elements. + We also add a regression test that can trigger the issue often when present, and + may in theory be able to find unrelated regressions. + +* A PSYNC2 bug is fixed: Redis should not expire keys when saving RDB files + because otherwise it is no longer possible to use such RDB file as a base + for partial resynchronization. It no longer represents the right state. + +* Compatibility of AOF with RDB preamble when the RDB checksum is disabled. + +* Sentinel bug that in some cases prevented Sentinel to detect that the master + was down immediately. A delay was added to the detection. + +* XREADGROUP would crash when the master had attacked slaves. + +* Replication and events generation of several streams command were fixed. + +* XREVRANGE fixed, in some cases it could not return elements, or crash the + server, or in general not behave correctly. + +* ZPOP can now unblock multiple clients in a sane way. + +* Other minor issues. + +Moreover this release adds new features: + +* XGROUP DESTROY and XGROUP SETID. + +* RDB loading speedup. + +* Configurable stream macro node limits (number of elements / bytes). + +* More smaller improvements. + +The following is the list of commits composing the release, please check +the commit messages and authors for credits. + +antirez in commit 9fdcc159: + Security: fix redis-cli buffer overflow. + 1 file changed, 16 insertions(+), 11 deletions(-) + +antirez in commit cf760071: + Security: fix Lua struct package offset handling. + 1 file changed, 6 insertions(+), 2 deletions(-) + +antirez in commit a57595ca: + Security: more cmsgpack fixes by @soloestoy. + 1 file changed, 7 insertions(+) + +antirez in commit 8783fb94: + Security: update Lua struct package for security. + 1 file changed, 23 insertions(+), 23 deletions(-) + +antirez in commit 8cb9344b: + Security: fix Lua cmsgpack library stack overflow. + 1 file changed, 3 insertions(+) + +赵磊 in commit 59080f60: + Fix dictScan(): It can't scan all buckets when dict is shrinking. + 1 file changed, 14 insertions(+), 11 deletions(-) + +dejun.xdj in commit ac2a824a: + Fix redis-cli memory leak when sending set preference command. + 1 file changed, 2 insertions(+) + +dejun.xdj in commit c7197ff5: + Check if the repeat value is positive in while loop of cliSendCommand(). + 1 file changed, 1 insertion(+), 1 deletion(-) + +dejun.xdj in commit 3f77777f: + Change the type of repeat argument to long for function cliSendCommand. + 1 file changed, 1 insertion(+), 1 deletion(-) + +dejun.xdj in commit 7a565d72: + Fix negtive repeat command value issue. + 1 file changed, 11 insertions(+), 3 deletions(-) + +dejun.xdj in commit 64bf60fb: + Detect and stop saving history for auth command with repeat option. + 1 file changed, 17 insertions(+), 10 deletions(-) + +dejun.xdj in commit 5bed12aa: + Change the warning message a little bit to avoid trademark issuses. + 1 file changed, 1 insertion(+), 1 deletion(-) + +dejun.xdj in commit d71c4961: + Stop saving auth command in redis-cli history. + 1 file changed, 4 insertions(+), 2 deletions(-) + +dejun.xdj in commit fca99e41: + Add warning message when using password on command line + 1 file changed, 1 insertion(+) + +antirez in commit 01407a3a: + Don't expire keys while loading RDB from AOF preamble. + 3 files changed, 5 insertions(+), 5 deletions(-) + +WuYunlong in commit fb5408cf: + Fix rdb save by allowing dumping of expire keys, so that when we add a new slave, and do a failover, eighter by manual or not, other local slaves will delete the expired keys properly. + 2 files changed, 3 insertions(+), 7 deletions(-) + +antirez in commit 0b8b6df4: + Backport hiredis issue 525 fix to compile on FreeBSD. + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit e98627c5: + Add INIT INFO to the provided init script. + 1 file changed, 8 insertions(+) + +antirez in commit 17f5de89: + Fix ae.c when a timer finalizerProc adds an event. + 2 files changed, 10 insertions(+), 6 deletions(-) + +antirez in commit 266e6423: + Sentinel: fix delay in detecting ODOWN. + 1 file changed, 9 insertions(+), 5 deletions(-) + +zhaozhao.zz in commit eafaf172: + AOF & RDB: be compatible with rdbchecksum no + 1 file changed, 9 insertions(+), 7 deletions(-) + +huijing.whj in commit 4630da37: + fix int overflow problem in freeMemoryIfNeeded + 1 file changed, 1 insertion(+), 1 deletion(-) + +================================================================================ +Redis 5.0 RC1 Released Tue May 29 14:14:11 CEST 2018 +================================================================================ + +Upgrade urgency LOW: This is the first RC of Redis 5. + +Introduction to the Redis 5 release +=================================== + +Redis 5 is a release focused on a few important features. While Redis 4 +was very very focused on operations, Redis 5 changes are mostly user-facing, +with the implementation of new data types and operations on top of existing +types. The following are the major features of this release: + +1. The new Stream data type. https://redis.io/topics/streams-intro +2. New Redis modules APIs: Timers, Cluster and Dictionary APIs. +3. RDB now store LFU and LRU information. +4. The cluster manager was ported from Ruby (redis-trib.rb) to C code + inside redis-cli. Check `redis-cli --cluster help` for more info. +5. New sorted set commands: ZPOPMIN/MAX and blocking variants. +6. Active defragmentation version 2. +7. Improvemenets in HyperLogLog implementations. +8. Better memory reporting capabilities. +9. Many commands with sub-commands now have an HELP subcommand. +10. Better performances when clients connect and disconnect often. +11. Many bug fixes and other random improvements. +12. Jemalloc was upgraded to version 5.1 +13. CLIENT UNBLOCK and CLIENT ID. +14. The LOLWUT command was added. http://antirez.com/news/123 +15. We no longer use the "slave" word if not for API backward compatibility. +16. Differnet optimizations in the networking layer. +17. Lua improvements: + - Better propagation of Lua scripts to slaves / AOF. + - Lua scripts can now timeout and get in -BUSY state in the slave as well. +18. Dynamic HZ to balance idle CPU usage with responsiveness. +19. The Redis core was refactored and improved in many ways. + +Thanks to all the users and developers who made this release possible. +We'll follow up with more RC releases, until the code looks production ready +and we don't get reports of serious issues for a while. + +A special thank you for the amount of work put into this release +(in decreasing number of commits) by: + +Fabio Nicotra, +Soloestoy +Itamar Haber +Oran Agra +Dvir Volk +dejun.xdj +Guy Benoish +Charsyam +Otmar Ertl +Jan-Erik Rediger +Spinlock + +Migrating from 4.0 to 5.0 +========================= + +Redis 4.0 is mostly a strict subset of 5.0, you should not have any problem +upgrading your application from 4.0 to 5.0. However this is a list of small +non-backward compatible changes introduced in the 5.0 release: + +* redis-cli now implements the cluster management tool. We still ship the + old redis-trib, but new fixes will be implemented only in redis-cli. + See `redis-cli --cluster help` for more info. + +* The RDB format changed. Redis 5.0 is still able to read 4.0 (and all the + past versions) files, but not the other way around. + +* Certain log formats and sentences are different in Redis 5.0. + +* Now by default maxmemory is ignored by slaves, and used only once a slave + is promoted to master. It means that in setups where you want slaves to + enforce maxmemory in an independent way from the master (that will anyway + stream the key eviction DEL commands), you should active this feature manually + and make sure you understand that it breaks consistency if writes are not + always idempotent. TLDR: the new behavior is much better for 99.999% of use + cases, revert it if you really know what you are doing. + +* Scripts are only replicated by their *effects* and not by sending EVAL/EVALSHA + to slaves or the AOF log itself. This is much better in the general case + and in the future we want to totally remove the other possiblity of + propagating scripts the old way (as EVAL). However you can still turn this + back to the default via the non-documented (if not here) Redis configuration + directive "lua-replicate-commands yes" or + "DEBUG lua-always-replicate-commands 0". However note that Redis 6 may + completely remove such feature. + +* Because of the above change related to scripts replication, certain Redis + commands that in Redis 4 had their result ordered lexicographically before + being passed to Lua via the return value of redis.call(), now have a behavior + more similar to calling the commands directly from a normal client. For + instance the ordering of elements returned by SMEMBERS or SDIFF is now + undetermined in Lua, exactly as it is by default when calling the commands + from a non-scripting context. + +-------------------------------------------------------------------------------- + +Credits: For each release, a list of changes with the relative author is +provided. Where not specified the implementation and design is done by +Salvatore Sanfilippo. Thanks to Redis Labs for making all this possible. +Also many thanks to all the other contributors and the amazing community +we have. + +Commit messages may contain additional credits. + +Enjoy, +Salvatore diff --git a/Redis-x64-5.0.14.1/EventLog.dll b/Redis-x64-5.0.14.1/EventLog.dll new file mode 100644 index 0000000..3cf1453 Binary files /dev/null and b/Redis-x64-5.0.14.1/EventLog.dll differ diff --git a/Redis-x64-5.0.14.1/README.txt b/Redis-x64-5.0.14.1/README.txt new file mode 100644 index 0000000..b06e115 --- /dev/null +++ b/Redis-x64-5.0.14.1/README.txt @@ -0,0 +1,19 @@ +Redis for Windows - https://github.com/tporadowski/redis +======================================================== + +This version of Redis (https://redis.io/) is an unofficial port to Windows OS +based on work contributed by Microsoft Open Technologies Inc. It is maintained +by Tomasz Poradowski (tomasz@poradowski.com, http://www.poradowski.com/en/). + +Contents of this package: +- *.exe - various Redis for Windows executables compiled for x64 platfrom, +- *.pdb - accompanying PDB files useful for debugging purposes, +- license.txt - license information (BSD-like), +- RELEASENOTES.txt - Windows-specific release notes, +- 00-RELEASENOTES - changelog of original Redis project, those changes are + ported back to this Windows version. + +For more information - please visit https://github.com/tporadowski/redis + +If you find this version of Redis useful and would like to support ongoing +development - please consider sponsoring my work at https://github.com/sponsors/tporadowski diff --git a/Redis-x64-5.0.14.1/RELEASENOTES.txt b/Redis-x64-5.0.14.1/RELEASENOTES.txt new file mode 100644 index 0000000..bdcebc6 --- /dev/null +++ b/Redis-x64-5.0.14.1/RELEASENOTES.txt @@ -0,0 +1,99 @@ +Redis for Windows - https://github.com/tporadowski/redis +======================================================== + +This file provides information about Windows-specific changes to Redis. +For release notes related to original Redis project - please see 00-RELEASENOTES. + +-------------------------------------------------------- + +2021-10-18: Redis 5.0.14 for Windows +https://github.com/tporadowski/redis/releases/tag/v5.0.14 + +Bugfix/maintenance release of Redis for Windows, updated to be in sync with +redis/5.0.14 (https://github.com/redis/redis/releases/tag/5.0.14). +Additionally "SCRIPT DEBUG SYNC" is now available. + +-------------------------------------------------------- + +2020-11-08: Redis 5.0.10 for Windows +https://github.com/tporadowski/redis/releases/tag/v5.0.10 + +Bugfix/maintenance release of Redis for Windows, updated to be in sync with +redis/5.0.10 (https://github.com/redis/redis/releases/tag/5.0.10) + +NOTE: active memory defragmentation feature ("activedefrag" option) is turned OFF. + +-------------------------------------------------------- + +2020-05-02: Redis 5.0.9 for Windows +https://github.com/tporadowski/redis/releases/tag/v5.0.9 + +First release of Redis 5.x for Windows, updated to be in sync with antirez/5.0.9. + +-------------------------------------------------------- + +2020-01-26: Redis 4.0.14.2 for Windows +https://github.com/tporadowski/redis/releases/tag/v4.0.14.2 + +This is a hotfix release of 4.0.14 branch that fixes #50 related to running in +Sentinel mode. + +-------------------------------------------------------- + +2020-01-15: Redis 4.0.14.1 for Windows +https://github.com/tporadowski/redis/releases/tag/v4.0.14.1 + +This is a hotfix release of 4.0.14 branch that fixes 2 Windows-specific issues: + + * #46 - added support for older Windows versions (prior Windows 8 and Windows Server 2012) + * #47 - fixed problem with parsing command-line arguments. + +-------------------------------------------------------- + +2019-08-29: Redis 4.0.14 for Windows +https://github.com/tporadowski/redis/releases/tag/v4.0.14 + +Redis 4.0.14 for Windows is a merge of Windows-specific changes from latest (unsupported) 3.2.100 release from MSOpenTech and original Redis 4.0.14. + +-------------------------------------------------------- + +2018-10-01: Redis for Windows 4.0.2.3 (alpha) + +This 4.0.2.3 release is still an alpha version, but contains enhancements and fixes for: + + * #14: decrease logging verbosity of some cluster-related messages + * #23: ZRANK/ZREVRANK bugfix (win-port only) + * failed unit tests (bdcf80e). + +-------------------------------------------------------- + +2018-03-26: Redis for Windows 4.0.2.2 (alpha) +https://github.com/tporadowski/redis/releases/tag/v4.0.2.2-alpha + +This 4.0.2.2 release is still an alpha version, but contains a fix to issue #12 +(crash when rewriting AOF file - this issue was specific to Windows port only). + +-------------------------------------------------------- + +2018-03-17: Redis for Windows 4.0.2.1 (alpha) +https://github.com/tporadowski/redis/releases/tag/v4.0.2.1-alpha + +This 4.0.2.1 release is still an alpha version, but contains a fix to issue #11, +which was related to sending back larger amounts of data to Redis clients +(this issue was specific to Windows port only). + +-------------------------------------------------------- + +2017-11-22: Redis 4.0.2 for Windows (alpha) +https://github.com/tporadowski/redis/releases/tag/v4.0.2-alpha + +Alpha version of Redis 4.0.2 for Windows. + +Redis 4.0.2 for Windows is a merge of Windows-specific changes from latest (unsupported) 3.2.100 release +from MSOpenTech and Redis 4.0.2 and this alpha release consists of: + + * all Redis 4.0.2 features except modules, + * all executables of Redis (redis-server, redis-cli, redis-benchmark, redis-check-aof, redis-check-rdb). + +Main difference to official Redis 4.0.2 (except no support for modules at the moment) is old version +of jemalloc-win dependency, which is planned to be updated to the same version in beta release. diff --git a/Redis-x64-5.0.14.1/dump(ShiHao的冲突副本1_2025-10-26 21-28-13).rdb b/Redis-x64-5.0.14.1/dump(ShiHao的冲突副本1_2025-10-26 21-28-13).rdb new file mode 100644 index 0000000..4b57122 Binary files /dev/null and b/Redis-x64-5.0.14.1/dump(ShiHao的冲突副本1_2025-10-26 21-28-13).rdb differ diff --git a/Redis-x64-5.0.14.1/dump.rdb b/Redis-x64-5.0.14.1/dump.rdb new file mode 100644 index 0000000..0266497 Binary files /dev/null and b/Redis-x64-5.0.14.1/dump.rdb differ diff --git a/Redis-x64-5.0.14.1/redis-benchmark.exe b/Redis-x64-5.0.14.1/redis-benchmark.exe new file mode 100644 index 0000000..b7e27b9 Binary files /dev/null and b/Redis-x64-5.0.14.1/redis-benchmark.exe differ diff --git a/Redis-x64-5.0.14.1/redis-benchmark.pdb b/Redis-x64-5.0.14.1/redis-benchmark.pdb new file mode 100644 index 0000000..0682a66 Binary files /dev/null and b/Redis-x64-5.0.14.1/redis-benchmark.pdb differ diff --git a/Redis-x64-5.0.14.1/redis-check-aof.exe b/Redis-x64-5.0.14.1/redis-check-aof.exe new file mode 100644 index 0000000..5b1b425 Binary files /dev/null and b/Redis-x64-5.0.14.1/redis-check-aof.exe differ diff --git a/Redis-x64-5.0.14.1/redis-check-aof.pdb b/Redis-x64-5.0.14.1/redis-check-aof.pdb new file mode 100644 index 0000000..0d5726e Binary files /dev/null and b/Redis-x64-5.0.14.1/redis-check-aof.pdb differ diff --git a/Redis-x64-5.0.14.1/redis-check-rdb.exe b/Redis-x64-5.0.14.1/redis-check-rdb.exe new file mode 100644 index 0000000..5b1b425 Binary files /dev/null and b/Redis-x64-5.0.14.1/redis-check-rdb.exe differ diff --git a/Redis-x64-5.0.14.1/redis-check-rdb.pdb b/Redis-x64-5.0.14.1/redis-check-rdb.pdb new file mode 100644 index 0000000..0d5726e Binary files /dev/null and b/Redis-x64-5.0.14.1/redis-check-rdb.pdb differ diff --git a/Redis-x64-5.0.14.1/redis-cli.exe b/Redis-x64-5.0.14.1/redis-cli.exe new file mode 100644 index 0000000..40a73ba Binary files /dev/null and b/Redis-x64-5.0.14.1/redis-cli.exe differ diff --git a/Redis-x64-5.0.14.1/redis-cli.pdb b/Redis-x64-5.0.14.1/redis-cli.pdb new file mode 100644 index 0000000..121bae1 Binary files /dev/null and b/Redis-x64-5.0.14.1/redis-cli.pdb differ diff --git a/Redis-x64-5.0.14.1/redis-server.exe b/Redis-x64-5.0.14.1/redis-server.exe new file mode 100644 index 0000000..5b1b425 Binary files /dev/null and b/Redis-x64-5.0.14.1/redis-server.exe differ diff --git a/Redis-x64-5.0.14.1/redis-server.pdb b/Redis-x64-5.0.14.1/redis-server.pdb new file mode 100644 index 0000000..0d5726e Binary files /dev/null and b/Redis-x64-5.0.14.1/redis-server.pdb differ diff --git a/Redis-x64-5.0.14.1/redis.windows-service.conf b/Redis-x64-5.0.14.1/redis.windows-service.conf new file mode 100644 index 0000000..0b6b1da --- /dev/null +++ b/Redis-x64-5.0.14.1/redis.windows-service.conf @@ -0,0 +1,1336 @@ +# Redis configuration file example + +# Note on units: when memory size is needed, it is possible to specify +# it in the usual form of 1k 5GB 4M and so forth: +# +# 1k => 1000 bytes +# 1kb => 1024 bytes +# 1m => 1000000 bytes +# 1mb => 1024*1024 bytes +# 1g => 1000000000 bytes +# 1gb => 1024*1024*1024 bytes +# +# units are case insensitive so 1GB 1Gb 1gB are all the same. + +################################## INCLUDES ################################### + +# Include one or more other config files here. This is useful if you +# have a standard template that goes to all Redis servers but also need +# to customize a few per-server settings. Include files can include +# other files, so use this wisely. +# +# Notice option "include" won't be rewritten by command "CONFIG REWRITE" +# from admin or Redis Sentinel. Since Redis always uses the last processed +# line as value of a configuration directive, you'd better put includes +# at the beginning of this file to avoid overwriting config change at runtime. +# +# If instead you are interested in using includes to override configuration +# options, it is better to use include as the last line. +# +# include .\path\to\local.conf +# include c:\path\to\other.conf + +################################## MODULES ##################################### + +# Load modules at startup. If the server is not able to load modules +# it will abort. It is possible to use multiple loadmodule directives. +# +# loadmodule .\path\to\my_module.dll +# loadmodule c:\path\to\other_module.dll + +################################## NETWORK ##################################### + +# By default, if no "bind" configuration directive is specified, Redis listens +# for connections from all the network interfaces available on the server. +# It is possible to listen to just one or multiple selected interfaces using +# the "bind" configuration directive, followed by one or more IP addresses. +# +# Examples: +# +# bind 192.168.1.100 10.0.0.1 +# bind 127.0.0.1 ::1 +# +# ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the +# internet, binding to all the interfaces is dangerous and will expose the +# instance to everybody on the internet. So by default we uncomment the +# following bind directive, that will force Redis to listen only into +# the IPv4 loopback interface address (this means Redis will be able to +# accept connections only from clients running into the same computer it +# is running). +# +# IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES +# JUST COMMENT THE FOLLOWING LINE. +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +bind 127.0.0.1 + +# Protected mode is a layer of security protection, in order to avoid that +# Redis instances left open on the internet are accessed and exploited. +# +# When protected mode is on and if: +# +# 1) The server is not binding explicitly to a set of addresses using the +# "bind" directive. +# 2) No password is configured. +# +# The server only accepts connections from clients connecting from the +# IPv4 and IPv6 loopback addresses 127.0.0.1 and ::1, and from Unix domain +# sockets. +# +# By default protected mode is enabled. You should disable it only if +# you are sure you want clients from other hosts to connect to Redis +# even if no authentication is configured, nor a specific set of interfaces +# are explicitly listed using the "bind" directive. +protected-mode yes + +# Accept connections on the specified port, default is 6379 (IANA #815344). +# If port 0 is specified Redis will not listen on a TCP socket. +port 6379 + +# TCP listen() backlog. +# +# In high requests-per-second environments you need an high backlog in order +# to avoid slow clients connections issues. Note that the Linux kernel +# will silently truncate it to the value of /proc/sys/net/core/somaxconn so +# make sure to raise both the value of somaxconn and tcp_max_syn_backlog +# in order to get the desired effect. +tcp-backlog 511 + +# Unix socket. +# +# Specify the path for the Unix socket that will be used to listen for +# incoming connections. There is no default, so Redis will not listen +# on a unix socket when not specified. +# +# unixsocket /tmp/redis.sock +# unixsocketperm 700 + +# Close the connection after a client is idle for N seconds (0 to disable) +timeout 0 + +# TCP keepalive. +# +# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence +# of communication. This is useful for two reasons: +# +# 1) Detect dead peers. +# 2) Take the connection alive from the point of view of network +# equipment in the middle. +# +# On Linux, the specified value (in seconds) is the period used to send ACKs. +# Note that to close the connection the double of the time is needed. +# On other kernels the period depends on the kernel configuration. +# +# A reasonable value for this option is 300 seconds, which is the new +# Redis default starting with Redis 3.2.1. +tcp-keepalive 300 + +################################# GENERAL ##################################### + +# By default Redis does not run as a daemon. Use 'yes' if you need it. +# Note that Redis will write a pid file in /var/run/redis.pid when daemonized. +# NOT SUPPORTED ON WINDOWS daemonize no + +# If you run Redis from upstart or systemd, Redis can interact with your +# supervision tree. Options: +# supervised no - no supervision interaction +# supervised upstart - signal upstart by putting Redis into SIGSTOP mode +# supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET +# supervised auto - detect upstart or systemd method based on +# UPSTART_JOB or NOTIFY_SOCKET environment variables +# Note: these supervision methods only signal "process is ready." +# They do not enable continuous liveness pings back to your supervisor. +# NOT SUPPORTED ON WINDOWS supervised no + +# If a pid file is specified, Redis writes it where specified at startup +# and removes it at exit. +# +# When the server runs non daemonized, no pid file is created if none is +# specified in the configuration. When the server is daemonized, the pid file +# is used even if not specified, defaulting to "/var/run/redis.pid". +# +# Creating a pid file is best effort: if Redis is not able to create it +# nothing bad happens, the server will start and run normally. +# NOT SUPPORTED ON WINDOWS pidfile /var/run/redis.pid + +# Specify the server verbosity level. +# This can be one of: +# debug (a lot of information, useful for development/testing) +# verbose (many rarely useful info, but not a mess like the debug level) +# notice (moderately verbose, what you want in production probably) +# warning (only very important / critical messages are logged) +loglevel notice + +# Specify the log file name. Also 'stdout' can be used to force +# Redis to log on the standard output. +logfile "server_log.txt" + +# To enable logging to the Windows EventLog, just set 'syslog-enabled' to +# yes, and optionally update the other syslog parameters to suit your needs. +# If Redis is installed and launched as a Windows Service, this will +# automatically be enabled. +syslog-enabled yes + +# Specify the source name of the events in the Windows Application log. +syslog-ident redis + +# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. +# NOT SUPPORTED ON WINDOWS syslog-facility local0 + +# Set the number of databases. The default database is DB 0, you can select +# a different one on a per-connection basis using SELECT where +# dbid is a number between 0 and 'databases'-1 +databases 16 + +# By default Redis shows an ASCII art logo only when started to log to the +# standard output and if the standard output is a TTY. Basically this means +# that normally a logo is displayed only in interactive sessions. +# +# However it is possible to force the pre-4.0 behavior and always show a +# ASCII art logo in startup logs by setting the following option to yes. +always-show-logo yes + +################################ SNAPSHOTTING ################################ +# +# Save the DB on disk: +# +# save +# +# Will save the DB if both the given number of seconds and the given +# number of write operations against the DB occurred. +# +# In the example below the behaviour will be to save: +# after 900 sec (15 min) if at least 1 key changed +# after 300 sec (5 min) if at least 10 keys changed +# after 60 sec if at least 10000 keys changed +# +# Note: you can disable saving completely by commenting out all "save" lines. +# +# It is also possible to remove all the previously configured save +# points by adding a save directive with a single empty string argument +# like in the following example: +# +# save "" + +save 900 1 +save 300 10 +save 60 10000 + +# By default Redis will stop accepting writes if RDB snapshots are enabled +# (at least one save point) and the latest background save failed. +# This will make the user aware (in a hard way) that data is not persisting +# on disk properly, otherwise chances are that no one will notice and some +# disaster will happen. +# +# If the background saving process will start working again Redis will +# automatically allow writes again. +# +# However if you have setup your proper monitoring of the Redis server +# and persistence, you may want to disable this feature so that Redis will +# continue to work as usual even if there are problems with disk, +# permissions, and so forth. +stop-writes-on-bgsave-error yes + +# Compress string objects using LZF when dump .rdb databases? +# For default that's set to 'yes' as it's almost always a win. +# If you want to save some CPU in the saving child set it to 'no' but +# the dataset will likely be bigger if you have compressible values or keys. +rdbcompression yes + +# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. +# This makes the format more resistant to corruption but there is a performance +# hit to pay (around 10%) when saving and loading RDB files, so you can disable it +# for maximum performances. +# +# RDB files created with checksum disabled have a checksum of zero that will +# tell the loading code to skip the check. +rdbchecksum yes + +# The filename where to dump the DB +dbfilename dump.rdb + +# The working directory. +# +# The DB will be written inside this directory, with the filename specified +# above using the 'dbfilename' configuration directive. +# +# The Append Only File will also be created inside this directory. +# +# Note that you must specify a directory here, not a file name. +dir ./ + +################################# REPLICATION ################################# + +# Master-Replica replication. Use replicaof to make a Redis instance a copy of +# another Redis server. A few things to understand ASAP about Redis replication. +# +# +------------------+ +---------------+ +# | Master | ---> | Replica | +# | (receive writes) | | (exact copy) | +# +------------------+ +---------------+ +# +# 1) Redis replication is asynchronous, but you can configure a master to +# stop accepting writes if it appears to be not connected with at least +# a given number of replicas. +# 2) Redis replicas are able to perform a partial resynchronization with the +# master if the replication link is lost for a relatively small amount of +# time. You may want to configure the replication backlog size (see the next +# sections of this file) with a sensible value depending on your needs. +# 3) Replication is automatic and does not need user intervention. After a +# network partition replicas automatically try to reconnect to masters +# and resynchronize with them. +# +# replicaof + +# If the master is password protected (using the "requirepass" configuration +# directive below) it is possible to tell the replica to authenticate before +# starting the replication synchronization process, otherwise the master will +# refuse the replica request. +# +# masterauth + +# When a replica loses its connection with the master, or when the replication +# is still in progress, the replica can act in two different ways: +# +# 1) if replica-serve-stale-data is set to 'yes' (the default) the replica will +# still reply to client requests, possibly with out of date data, or the +# data set may just be empty if this is the first synchronization. +# +# 2) if replica-serve-stale-data is set to 'no' the replica will reply with +# an error "SYNC with master in progress" to all the kind of commands +# but to INFO, replicaOF, AUTH, PING, SHUTDOWN, REPLCONF, ROLE, CONFIG, +# SUBSCRIBE, UNSUBSCRIBE, PSUBSCRIBE, PUNSUBSCRIBE, PUBLISH, PUBSUB, +# COMMAND, POST, HOST: and LATENCY. +# +replica-serve-stale-data yes + +# You can configure a replica instance to accept writes or not. Writing against +# a replica instance may be useful to store some ephemeral data (because data +# written on a replica will be easily deleted after resync with the master) but +# may also cause problems if clients are writing to it because of a +# misconfiguration. +# +# Since Redis 2.6 by default replicas are read-only. +# +# Note: read only replicas are not designed to be exposed to untrusted clients +# on the internet. It's just a protection layer against misuse of the instance. +# Still a read only replica exports by default all the administrative commands +# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve +# security of read only replicas using 'rename-command' to shadow all the +# administrative / dangerous commands. +replica-read-only yes + +# Replication SYNC strategy: disk or socket. +# +# ------------------------------------------------------- +# WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY +# ------------------------------------------------------- +# +# New replicas and reconnecting replicas that are not able to continue the replication +# process just receiving differences, need to do what is called a "full +# synchronization". An RDB file is transmitted from the master to the replicas. +# The transmission can happen in two different ways: +# +# 1) Disk-backed: The Redis master creates a new process that writes the RDB +# file on disk. Later the file is transferred by the parent +# process to the replicas incrementally. +# 2) Diskless: The Redis master creates a new process that directly writes the +# RDB file to replica sockets, without touching the disk at all. +# +# With disk-backed replication, while the RDB file is generated, more replicas +# can be queued and served with the RDB file as soon as the current child producing +# the RDB file finishes its work. With diskless replication instead once +# the transfer starts, new replicas arriving will be queued and a new transfer +# will start when the current one terminates. +# +# When diskless replication is used, the master waits a configurable amount of +# time (in seconds) before starting the transfer in the hope that multiple replicas +# will arrive and the transfer can be parallelized. +# +# With slow disks and fast (large bandwidth) networks, diskless replication +# works better. +repl-diskless-sync no + +# When diskless replication is enabled, it is possible to configure the delay +# the server waits in order to spawn the child that transfers the RDB via socket +# to the replicas. +# +# This is important since once the transfer starts, it is not possible to serve +# new replicas arriving, that will be queued for the next RDB transfer, so the server +# waits a delay in order to let more replicas arrive. +# +# The delay is specified in seconds, and by default is 5 seconds. To disable +# it entirely just set it to 0 seconds and the transfer will start ASAP. +repl-diskless-sync-delay 5 + +# Replicas send PINGs to server in a predefined interval. It's possible to change +# this interval with the repl_ping_replica_period option. The default value is 10 +# seconds. +# +# repl-ping-replica-period 10 + +# The following option sets the replication timeout for: +# +# 1) Bulk transfer I/O during SYNC, from the point of view of replica. +# 2) Master timeout from the point of view of replicas (data, pings). +# 3) Replica timeout from the point of view of masters (REPLCONF ACK pings). +# +# It is important to make sure that this value is greater than the value +# specified for repl-ping-replica-period otherwise a timeout will be detected +# every time there is low traffic between the master and the replica. +# +# repl-timeout 60 + +# Disable TCP_NODELAY on the replica socket after SYNC? +# +# If you select "yes" Redis will use a smaller number of TCP packets and +# less bandwidth to send data to replicas. But this can add a delay for +# the data to appear on the replica side, up to 40 milliseconds with +# Linux kernels using a default configuration. +# +# If you select "no" the delay for data to appear on the replica side will +# be reduced but more bandwidth will be used for replication. +# +# By default we optimize for low latency, but in very high traffic conditions +# or when the master and replicas are many hops away, turning this to "yes" may +# be a good idea. +repl-disable-tcp-nodelay no + +# Set the replication backlog size. The backlog is a buffer that accumulates +# replica data when replicas are disconnected for some time, so that when a replica +# wants to reconnect again, often a full resync is not needed, but a partial +# resync is enough, just passing the portion of data the replica missed while +# disconnected. +# +# The bigger the replication backlog, the longer the time the replica can be +# disconnected and later be able to perform a partial resynchronization. +# +# The backlog is only allocated once there is at least a replica connected. +# +# repl-backlog-size 1mb + +# After a master has no longer connected replicas for some time, the backlog +# will be freed. The following option configures the amount of seconds that +# need to elapse, starting from the time the last replica disconnected, for +# the backlog buffer to be freed. +# +# Note that replicas never free the backlog for timeout, since they may be +# promoted to masters later, and should be able to correctly "partially +# resynchronize" with the replicas: hence they should always accumulate backlog. +# +# A value of 0 means to never release the backlog. +# +# repl-backlog-ttl 3600 + +# The replica priority is an integer number published by Redis in the INFO output. +# It is used by Redis Sentinel in order to select a replica to promote into a +# master if the master is no longer working correctly. +# +# A replica with a low priority number is considered better for promotion, so +# for instance if there are three replicas with priority 10, 100, 25 Sentinel will +# pick the one with priority 10, that is the lowest. +# +# However a special priority of 0 marks the replica as not able to perform the +# role of master, so a replica with priority of 0 will never be selected by +# Redis Sentinel for promotion. +# +# By default the priority is 100. +replica-priority 100 + +# It is possible for a master to stop accepting writes if there are less than +# N replicas connected, having a lag less or equal than M seconds. +# +# The N replicas need to be in "online" state. +# +# The lag in seconds, that must be <= the specified value, is calculated from +# the last ping received from the replica, that is usually sent every second. +# +# This option does not GUARANTEE that N replicas will accept the write, but +# will limit the window of exposure for lost writes in case not enough replicas +# are available, to the specified number of seconds. +# +# For example to require at least 3 replicas with a lag <= 10 seconds use: +# +# min-replicas-to-write 3 +# min-replicas-max-lag 10 +# +# Setting one or the other to 0 disables the feature. +# +# By default min-replicas-to-write is set to 0 (feature disabled) and +# min-replicas-max-lag is set to 10. + +# A Redis master is able to list the address and port of the attached +# replicas in different ways. For example the "INFO replication" section +# offers this information, which is used, among other tools, by +# Redis Sentinel in order to discover replica instances. +# Another place where this info is available is in the output of the +# "ROLE" command of a master. +# +# The listed IP and address normally reported by a replica is obtained +# in the following way: +# +# IP: The address is auto detected by checking the peer address +# of the socket used by the replica to connect with the master. +# +# Port: The port is communicated by the replica during the replication +# handshake, and is normally the port that the replica is using to +# listen for connections. +# +# However when port forwarding or Network Address Translation (NAT) is +# used, the replica may be actually reachable via different IP and port +# pairs. The following two options can be used by a replica in order to +# report to its master a specific set of IP and port, so that both INFO +# and ROLE will report those values. +# +# There is no need to use both the options if you need to override just +# the port or the IP address. +# +# replica-announce-ip 5.5.5.5 +# replica-announce-port 1234 + +################################## SECURITY ################################### + +# Require clients to issue AUTH before processing any other +# commands. This might be useful in environments in which you do not trust +# others with access to the host running redis-server. +# +# This should stay commented out for backward compatibility and because most +# people do not need auth (e.g. they run their own servers). +# +# Warning: since Redis is pretty fast an outside user can try up to +# 150k passwords per second against a good box. This means that you should +# use a very strong password otherwise it will be very easy to break. +# +# requirepass foobared + +# Command renaming. +# +# It is possible to change the name of dangerous commands in a shared +# environment. For instance the CONFIG command may be renamed into something +# hard to guess so that it will still be available for internal-use tools +# but not available for general clients. +# +# Example: +# +# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 +# +# It is also possible to completely kill a command by renaming it into +# an empty string: +# +# rename-command CONFIG "" +# +# Please note that changing the name of commands that are logged into the +# AOF file or transmitted to replicas may cause problems. + +################################### CLIENTS #################################### + +# Set the max number of connected clients at the same time. By default +# this limit is set to 10000 clients, however if the Redis server is not +# able to configure the process file limit to allow for the specified limit +# the max number of allowed clients is set to the current file limit +# minus 32 (as Redis reserves a few file descriptors for internal uses). +# +# Once the limit is reached Redis will close all the new connections sending +# an error 'max number of clients reached'. +# +# maxclients 10000 + +############################## MEMORY MANAGEMENT ################################ + +# If Redis is to be used as an in-memory-only cache without any kind of +# persistence, then the fork() mechanism used by the background AOF/RDB +# persistence is unnecessary. As an optimization, all persistence can be +# turned off in the Windows version of Redis. This will redirect heap +# allocations to the system heap allocator, and disable commands that would +# otherwise cause fork() operations: BGSAVE and BGREWRITEAOF. +# This flag may not be combined with any of the other flags that configure +# AOF and RDB operations. +# persistence-available [(yes)|no] + +# Set a memory usage limit to the specified amount of bytes. +# When the memory limit is reached Redis will try to remove keys +# according to the eviction policy selected (see maxmemory-policy). +# +# If Redis can't remove keys according to the policy, or if the policy is +# set to 'noeviction', Redis will start to reply with errors to commands +# that would use more memory, like SET, LPUSH, and so on, and will continue +# to reply to read-only commands like GET. +# +# This option is usually useful when using Redis as an LRU or LFU cache, or to +# set a hard memory limit for an instance (using the 'noeviction' policy). +# +# WARNING: If you have replicas attached to an instance with maxmemory on, +# the size of the output buffers needed to feed the replicas are subtracted +# from the used memory count, so that network problems / resyncs will +# not trigger a loop where keys are evicted, and in turn the output +# buffer of replicas is full with DELs of keys evicted triggering the deletion +# of more keys, and so forth until the database is completely emptied. +# +# In short... if you have replicas attached it is suggested that you set a lower +# limit for maxmemory so that there is some free RAM on the system for replica +# output buffers (but this is not needed if the policy is 'noeviction'). +# +# WARNING: not setting maxmemory will cause Redis to terminate with an +# out-of-memory exception if the heap limit is reached. +# +# NOTE: since Redis uses the system paging file to allocate the heap memory, +# the Working Set memory usage showed by the Windows Task Manager or by other +# tools such as ProcessExplorer will not always be accurate. For example, right +# after a background save of the RDB or the AOF files, the working set value +# may drop significantly. In order to check the correct amount of memory used +# by the redis-server to store the data, use the INFO client command. The INFO +# command shows only the memory used to store the redis data, not the extra +# memory used by the Windows process for its own requirements. Th3 extra amount +# of memory not reported by the INFO command can be calculated subtracting the +# Peak Working Set reported by the Windows Task Manager and the used_memory_peak +# reported by the INFO command. +# +# maxmemory + +# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory +# is reached. You can select among five behaviors: +# +# volatile-lru -> Evict using approximated LRU among the keys with an expire set. +# allkeys-lru -> Evict any key using approximated LRU. +# volatile-lfu -> Evict using approximated LFU among the keys with an expire set. +# allkeys-lfu -> Evict any key using approximated LFU. +# volatile-random -> Remove a random key among the ones with an expire set. +# allkeys-random -> Remove a random key, any key. +# volatile-ttl -> Remove the key with the nearest expire time (minor TTL) +# noeviction -> Don't evict anything, just return an error on write operations. +# +# LRU means Least Recently Used +# LFU means Least Frequently Used +# +# Both LRU, LFU and volatile-ttl are implemented using approximated +# randomized algorithms. +# +# Note: with any of the above policies, Redis will return an error on write +# operations, when there are no suitable keys for eviction. +# +# At the date of writing these commands are: set setnx setex append +# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd +# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby +# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby +# getset mset msetnx exec sort +# +# The default is: +# +# maxmemory-policy noeviction + +# LRU, LFU and minimal TTL algorithms are not precise algorithms but approximated +# algorithms (in order to save memory), so you can tune it for speed or +# accuracy. For default Redis will check five keys and pick the one that was +# used less recently, you can change the sample size using the following +# configuration directive. +# +# The default of 5 produces good enough results. 10 Approximates very closely +# true LRU but costs more CPU. 3 is faster but not very accurate. +# +# maxmemory-samples 5 + +# Starting from Redis 5, by default a replica will ignore its maxmemory setting +# (unless it is promoted to master after a failover or manually). It means +# that the eviction of keys will be just handled by the master, sending the +# DEL commands to the replica as keys evict in the master side. +# +# This behavior ensures that masters and replicas stay consistent, and is usually +# what you want, however if your replica is writable, or you want the replica to have +# a different memory setting, and you are sure all the writes performed to the +# replica are idempotent, then you may change this default (but be sure to understand +# what you are doing). +# +# Note that since the replica by default does not evict, it may end using more +# memory than the one set via maxmemory (there are certain buffers that may +# be larger on the replica, or data structures may sometimes take more memory and so +# forth). So make sure you monitor your replicas and make sure they have enough +# memory to never hit a real out-of-memory condition before the master hits +# the configured maxmemory setting. +# +# replica-ignore-maxmemory yes + +############################# LAZY FREEING #################################### + +# Redis has two primitives to delete keys. One is called DEL and is a blocking +# deletion of the object. It means that the server stops processing new commands +# in order to reclaim all the memory associated with an object in a synchronous +# way. If the key deleted is associated with a small object, the time needed +# in order to execute the DEL command is very small and comparable to most other +# O(1) or O(log_N) commands in Redis. However if the key is associated with an +# aggregated value containing millions of elements, the server can block for +# a long time (even seconds) in order to complete the operation. +# +# For the above reasons Redis also offers non blocking deletion primitives +# such as UNLINK (non blocking DEL) and the ASYNC option of FLUSHALL and +# FLUSHDB commands, in order to reclaim memory in background. Those commands +# are executed in constant time. Another thread will incrementally free the +# object in the background as fast as possible. +# +# DEL, UNLINK and ASYNC option of FLUSHALL and FLUSHDB are user-controlled. +# It's up to the design of the application to understand when it is a good +# idea to use one or the other. However the Redis server sometimes has to +# delete keys or flush the whole database as a side effect of other operations. +# Specifically Redis deletes objects independently of a user call in the +# following scenarios: +# +# 1) On eviction, because of the maxmemory and maxmemory policy configurations, +# in order to make room for new data, without going over the specified +# memory limit. +# 2) Because of expire: when a key with an associated time to live (see the +# EXPIRE command) must be deleted from memory. +# 3) Because of a side effect of a command that stores data on a key that may +# already exist. For example the RENAME command may delete the old key +# content when it is replaced with another one. Similarly SUNIONSTORE +# or SORT with STORE option may delete existing keys. The SET command +# itself removes any old content of the specified key in order to replace +# it with the specified string. +# 4) During replication, when a replica performs a full resynchronization with +# its master, the content of the whole database is removed in order to +# load the RDB file just transferred. +# +# In all the above cases the default is to delete objects in a blocking way, +# like if DEL was called. However you can configure each case specifically +# in order to instead release memory in a non-blocking way like if UNLINK +# was called, using the following configuration directives: + +lazyfree-lazy-eviction no +lazyfree-lazy-expire no +lazyfree-lazy-server-del no +replica-lazy-flush no + +############################## APPEND ONLY MODE ############################### + +# By default Redis asynchronously dumps the dataset on disk. This mode is +# good enough in many applications, but an issue with the Redis process or +# a power outage may result into a few minutes of writes lost (depending on +# the configured save points). +# +# The Append Only File is an alternative persistence mode that provides +# much better durability. For instance using the default data fsync policy +# (see later in the config file) Redis can lose just one second of writes in a +# dramatic event like a server power outage, or a single write if something +# wrong with the Redis process itself happens, but the operating system is +# still running correctly. +# +# AOF and RDB persistence can be enabled at the same time without problems. +# If the AOF is enabled on startup Redis will load the AOF, that is the file +# with the better durability guarantees. +# +# Please check http://redis.io/topics/persistence for more information. + +appendonly no + +# The name of the append only file (default: "appendonly.aof") + +appendfilename "appendonly.aof" + +# The fsync() call tells the Operating System to actually write data on disk +# instead of waiting for more data in the output buffer. Some OS will really flush +# data on disk, some other OS will just try to do it ASAP. +# +# Redis supports three different modes: +# +# no: don't fsync, just let the OS flush the data when it wants. Faster. +# always: fsync after every write to the append only log. Slow, Safest. +# everysec: fsync only one time every second. Compromise. +# +# The default is "everysec", as that's usually the right compromise between +# speed and data safety. It's up to you to understand if you can relax this to +# "no" that will let the operating system flush the output buffer when +# it wants, for better performances (but if you can live with the idea of +# some data loss consider the default persistence mode that's snapshotting), +# or on the contrary, use "always" that's very slow but a bit safer than +# everysec. +# +# More details please check the following article: +# http://antirez.com/post/redis-persistence-demystified.html +# +# If unsure, use "everysec". + +# appendfsync always +appendfsync everysec +# appendfsync no + +# When the AOF fsync policy is set to always or everysec, and a background +# saving process (a background save or AOF log background rewriting) is +# performing a lot of I/O against the disk, in some Linux configurations +# Redis may block too long on the fsync() call. Note that there is no fix for +# this currently, as even performing fsync in a different thread will block +# our synchronous write(2) call. +# +# In order to mitigate this problem it's possible to use the following option +# that will prevent fsync() from being called in the main process while a +# BGSAVE or BGREWRITEAOF is in progress. +# +# This means that while another child is saving, the durability of Redis is +# the same as "appendfsync none". In practical terms, this means that it is +# possible to lose up to 30 seconds of log in the worst scenario (with the +# default Linux settings). +# +# If you have latency problems turn this to "yes". Otherwise leave it as +# "no" that is the safest pick from the point of view of durability. + +no-appendfsync-on-rewrite no + +# Automatic rewrite of the append only file. +# Redis is able to automatically rewrite the log file implicitly calling +# BGREWRITEAOF when the AOF log size grows by the specified percentage. +# +# This is how it works: Redis remembers the size of the AOF file after the +# latest rewrite (if no rewrite has happened since the restart, the size of +# the AOF at startup is used). +# +# This base size is compared to the current size. If the current size is +# bigger than the specified percentage, the rewrite is triggered. Also +# you need to specify a minimal size for the AOF file to be rewritten, this +# is useful to avoid rewriting the AOF file even if the percentage increase +# is reached but it is still pretty small. +# +# Specify a percentage of zero in order to disable the automatic AOF +# rewrite feature. + +auto-aof-rewrite-percentage 100 +auto-aof-rewrite-min-size 64mb + +# An AOF file may be found to be truncated at the end during the Redis +# startup process, when the AOF data gets loaded back into memory. +# This may happen when the system where Redis is running +# crashes, especially when an ext4 filesystem is mounted without the +# data=ordered option (however this can't happen when Redis itself +# crashes or aborts but the operating system still works correctly). +# +# Redis can either exit with an error when this happens, or load as much +# data as possible (the default now) and start if the AOF file is found +# to be truncated at the end. The following option controls this behavior. +# +# If aof-load-truncated is set to yes, a truncated AOF file is loaded and +# the Redis server starts emitting a log to inform the user of the event. +# Otherwise if the option is set to no, the server aborts with an error +# and refuses to start. When the option is set to no, the user requires +# to fix the AOF file using the "redis-check-aof" utility before to restart +# the server. +# +# Note that if the AOF file will be found to be corrupted in the middle +# the server will still exit with an error. This option only applies when +# Redis will try to read more data from the AOF file but not enough bytes +# will be found. +aof-load-truncated yes + +# When rewriting the AOF file, Redis is able to use an RDB preamble in the +# AOF file for faster rewrites and recoveries. When this option is turned +# on the rewritten AOF file is composed of two different stanzas: +# +# [RDB file][AOF tail] +# +# When loading Redis recognizes that the AOF file starts with the "REDIS" +# string and loads the prefixed RDB file, and continues loading the AOF +# tail. +aof-use-rdb-preamble yes + +################################ LUA SCRIPTING ############################### + +# Max execution time of a Lua script in milliseconds. +# +# If the maximum execution time is reached Redis will log that a script is +# still in execution after the maximum allowed time and will start to +# reply to queries with an error. +# +# When a long running script exceeds the maximum execution time only the +# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be +# used to stop a script that did not yet called write commands. The second +# is the only way to shut down the server in the case a write command was +# already issued by the script but the user doesn't want to wait for the natural +# termination of the script. +# +# Set it to 0 or a negative value for unlimited execution without warnings. +lua-time-limit 5000 + +################################ REDIS CLUSTER ############################### + +# Normal Redis instances can't be part of a Redis Cluster; only nodes that are +# started as cluster nodes can. In order to start a Redis instance as a +# cluster node enable the cluster support uncommenting the following: +# +# cluster-enabled yes + +# Every cluster node has a cluster configuration file. This file is not +# intended to be edited by hand. It is created and updated by Redis nodes. +# Every Redis Cluster node requires a different cluster configuration file. +# Make sure that instances running in the same system do not have +# overlapping cluster configuration file names. +# +# cluster-config-file nodes-6379.conf + +# Cluster node timeout is the amount of milliseconds a node must be unreachable +# for it to be considered in failure state. +# Most other internal time limits are multiple of the node timeout. +# +# cluster-node-timeout 15000 + +# A replica of a failing master will avoid to start a failover if its data +# looks too old. +# +# There is no simple way for a replica to actually have an exact measure of +# its "data age", so the following two checks are performed: +# +# 1) If there are multiple replicas able to failover, they exchange messages +# in order to try to give an advantage to the replica with the best +# replication offset (more data from the master processed). +# Replicas will try to get their rank by offset, and apply to the start +# of the failover a delay proportional to their rank. +# +# 2) Every single replica computes the time of the last interaction with +# its master. This can be the last ping or command received (if the master +# is still in the "connected" state), or the time that elapsed since the +# disconnection with the master (if the replication link is currently down). +# If the last interaction is too old, the replica will not try to failover +# at all. +# +# The point "2" can be tuned by user. Specifically a replica will not perform +# the failover if, since the last interaction with the master, the time +# elapsed is greater than: +# +# (node-timeout * replica-validity-factor) + repl-ping-replica-period +# +# So for example if node-timeout is 30 seconds, and the replica-validity-factor +# is 10, and assuming a default repl-ping-replica-period of 10 seconds, the +# replica will not try to failover if it was not able to talk with the master +# for longer than 310 seconds. +# +# A large replica-validity-factor may allow replicas with too old data to failover +# a master, while a too small value may prevent the cluster from being able to +# elect a replica at all. +# +# For maximum availability, it is possible to set the replica-validity-factor +# to a value of 0, which means, that replicas will always try to failover the +# master regardless of the last time they interacted with the master. +# (However they'll always try to apply a delay proportional to their +# offset rank). +# +# Zero is the only value able to guarantee that when all the partitions heal +# the cluster will always be able to continue. +# +# cluster-replica-validity-factor 10 + +# Cluster replicas are able to migrate to orphaned masters, that are masters +# that are left without working replicas. This improves the cluster ability +# to resist to failures as otherwise an orphaned master can't be failed over +# in case of failure if it has no working replicas. +# +# Replicas migrate to orphaned masters only if there are still at least a +# given number of other working replicas for their old master. This number +# is the "migration barrier". A migration barrier of 1 means that a replica +# will migrate only if there is at least 1 other working replica for its master +# and so forth. It usually reflects the number of replicas you want for every +# master in your cluster. +# +# Default is 1 (replicas migrate only if their masters remain with at least +# one replica). To disable migration just set it to a very large value. +# A value of 0 can be set but is useful only for debugging and dangerous +# in production. +# +# cluster-migration-barrier 1 + +# By default Redis Cluster nodes stop accepting queries if they detect there +# is at least an hash slot uncovered (no available node is serving it). +# This way if the cluster is partially down (for example a range of hash slots +# are no longer covered) all the cluster becomes, eventually, unavailable. +# It automatically returns available as soon as all the slots are covered again. +# +# However sometimes you want the subset of the cluster which is working, +# to continue to accept queries for the part of the key space that is still +# covered. In order to do so, just set the cluster-require-full-coverage +# option to no. +# +# cluster-require-full-coverage yes + +# This option, when set to yes, prevents replicas from trying to failover its +# master during master failures. However the master can still perform a +# manual failover, if forced to do so. +# +# This is useful in different scenarios, especially in the case of multiple +# data center operations, where we want one side to never be promoted if not +# in the case of a total DC failure. +# +# cluster-replica-no-failover no + +# In order to setup your cluster make sure to read the documentation +# available at http://redis.io web site. + +########################## CLUSTER DOCKER/NAT support ######################## + +# In certain deployments, Redis Cluster nodes address discovery fails, because +# addresses are NAT-ted or because ports are forwarded (the typical case is +# Docker and other containers). +# +# In order to make Redis Cluster working in such environments, a static +# configuration where each node knows its public address is needed. The +# following two options are used for this scope, and are: +# +# * cluster-announce-ip +# * cluster-announce-port +# * cluster-announce-bus-port +# +# Each instruct the node about its address, client port, and cluster message +# bus port. The information is then published in the header of the bus packets +# so that other nodes will be able to correctly map the address of the node +# publishing the information. +# +# If the above options are not used, the normal Redis Cluster auto-detection +# will be used instead. +# +# Note that when remapped, the bus port may not be at the fixed offset of +# clients port + 10000, so you can specify any port and bus-port depending +# on how they get remapped. If the bus-port is not set, a fixed offset of +# 10000 will be used as usually. +# +# Example: +# +# cluster-announce-ip 10.1.1.5 +# cluster-announce-port 6379 +# cluster-announce-bus-port 6380 + +################################## SLOW LOG ################################### + +# The Redis Slow Log is a system to log queries that exceeded a specified +# execution time. The execution time does not include the I/O operations +# like talking with the client, sending the reply and so forth, +# but just the time needed to actually execute the command (this is the only +# stage of command execution where the thread is blocked and can not serve +# other requests in the meantime). +# +# You can configure the slow log with two parameters: one tells Redis +# what is the execution time, in microseconds, to exceed in order for the +# command to get logged, and the other parameter is the length of the +# slow log. When a new command is logged the oldest one is removed from the +# queue of logged commands. + +# The following time is expressed in microseconds, so 1000000 is equivalent +# to one second. Note that a negative number disables the slow log, while +# a value of zero forces the logging of every command. +slowlog-log-slower-than 10000 + +# There is no limit to this length. Just be aware that it will consume memory. +# You can reclaim memory used by the slow log with SLOWLOG RESET. +slowlog-max-len 128 + +################################ LATENCY MONITOR ############################## + +# The Redis latency monitoring subsystem samples different operations +# at runtime in order to collect data related to possible sources of +# latency of a Redis instance. +# +# Via the LATENCY command this information is available to the user that can +# print graphs and obtain reports. +# +# The system only logs operations that were performed in a time equal or +# greater than the amount of milliseconds specified via the +# latency-monitor-threshold configuration directive. When its value is set +# to zero, the latency monitor is turned off. +# +# By default latency monitoring is disabled since it is mostly not needed +# if you don't have latency issues, and collecting data has a performance +# impact, that while very small, can be measured under big load. Latency +# monitoring can easily be enabled at runtime using the command +# "CONFIG SET latency-monitor-threshold " if needed. +latency-monitor-threshold 0 + +############################# EVENT NOTIFICATION ############################## + +# Redis can notify Pub/Sub clients about events happening in the key space. +# This feature is documented at http://redis.io/topics/notifications +# +# For instance if keyspace events notification is enabled, and a client +# performs a DEL operation on key "foo" stored in the Database 0, two +# messages will be published via Pub/Sub: +# +# PUBLISH __keyspace@0__:foo del +# PUBLISH __keyevent@0__:del foo +# +# It is possible to select the events that Redis will notify among a set +# of classes. Every class is identified by a single character: +# +# K Keyspace events, published with __keyspace@__ prefix. +# E Keyevent events, published with __keyevent@__ prefix. +# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... +# $ String commands +# l List commands +# s Set commands +# h Hash commands +# z Sorted set commands +# x Expired events (events generated every time a key expires) +# e Evicted events (events generated when a key is evicted for maxmemory) +# A Alias for g$lshzxe, so that the "AKE" string means all the events. +# +# The "notify-keyspace-events" takes as argument a string that is composed +# of zero or multiple characters. The empty string means that notifications +# are disabled. +# +# Example: to enable list and generic events, from the point of view of the +# event name, use: +# +# notify-keyspace-events Elg +# +# Example 2: to get the stream of the expired keys subscribing to channel +# name __keyevent@0__:expired use: +# +# notify-keyspace-events Ex +# +# By default all notifications are disabled because most users don't need +# this feature and the feature has some overhead. Note that if you don't +# specify at least one of K or E, no events will be delivered. +notify-keyspace-events "" + +############################### ADVANCED CONFIG ############################### + +# Hashes are encoded using a memory efficient data structure when they have a +# small number of entries, and the biggest entry does not exceed a given +# threshold. These thresholds can be configured using the following directives. +hash-max-ziplist-entries 512 +hash-max-ziplist-value 64 + +# Lists are also encoded in a special way to save a lot of space. +# The number of entries allowed per internal list node can be specified +# as a fixed maximum size or a maximum number of elements. +# For a fixed maximum size, use -5 through -1, meaning: +# -5: max size: 64 Kb <-- not recommended for normal workloads +# -4: max size: 32 Kb <-- not recommended +# -3: max size: 16 Kb <-- probably not recommended +# -2: max size: 8 Kb <-- good +# -1: max size: 4 Kb <-- good +# Positive numbers mean store up to _exactly_ that number of elements +# per list node. +# The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size), +# but if your use case is unique, adjust the settings as necessary. +list-max-ziplist-size -2 + +# Lists may also be compressed. +# Compress depth is the number of quicklist ziplist nodes from *each* side of +# the list to *exclude* from compression. The head and tail of the list +# are always uncompressed for fast push/pop operations. Settings are: +# 0: disable all list compression +# 1: depth 1 means "don't start compressing until after 1 node into the list, +# going from either the head or tail" +# So: [head]->node->node->...->node->[tail] +# [head], [tail] will always be uncompressed; inner nodes will compress. +# 2: [head]->[next]->node->node->...->node->[prev]->[tail] +# 2 here means: don't compress head or head->next or tail->prev or tail, +# but compress all nodes between them. +# 3: [head]->[next]->[next]->node->node->...->node->[prev]->[prev]->[tail] +# etc. +list-compress-depth 0 + +# Sets have a special encoding in just one case: when a set is composed +# of just strings that happen to be integers in radix 10 in the range +# of 64 bit signed integers. +# The following configuration setting sets the limit in the size of the +# set in order to use this special memory saving encoding. +set-max-intset-entries 512 + +# Similarly to hashes and lists, sorted sets are also specially encoded in +# order to save a lot of space. This encoding is only used when the length and +# elements of a sorted set are below the following limits: +zset-max-ziplist-entries 128 +zset-max-ziplist-value 64 + +# HyperLogLog sparse representation bytes limit. The limit includes the +# 16 bytes header. When an HyperLogLog using the sparse representation crosses +# this limit, it is converted into the dense representation. +# +# A value greater than 16000 is totally useless, since at that point the +# dense representation is more memory efficient. +# +# The suggested value is ~ 3000 in order to have the benefits of +# the space efficient encoding without slowing down too much PFADD, +# which is O(N) with the sparse encoding. The value can be raised to +# ~ 10000 when CPU is not a concern, but space is, and the data set is +# composed of many HyperLogLogs with cardinality in the 0 - 15000 range. +hll-sparse-max-bytes 3000 + +# Streams macro node max size / items. The stream data structure is a radix +# tree of big nodes that encode multiple items inside. Using this configuration +# it is possible to configure how big a single node can be in bytes, and the +# maximum number of items it may contain before switching to a new node when +# appending new stream entries. If any of the following settings are set to +# zero, the limit is ignored, so for instance it is possible to set just a +# max entires limit by setting max-bytes to 0 and max-entries to the desired +# value. +stream-node-max-bytes 4096 +stream-node-max-entries 100 + +# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in +# order to help rehashing the main Redis hash table (the one mapping top-level +# keys to values). The hash table implementation Redis uses (see dict.c) +# performs a lazy rehashing: the more operation you run into a hash table +# that is rehashing, the more rehashing "steps" are performed, so if the +# server is idle the rehashing is never complete and some more memory is used +# by the hash table. +# +# The default is to use this millisecond 10 times every second in order to +# actively rehash the main dictionaries, freeing memory when possible. +# +# If unsure: +# use "activerehashing no" if you have hard latency requirements and it is +# not a good thing in your environment that Redis can reply from time to time +# to queries with 2 milliseconds delay. +# +# use "activerehashing yes" if you don't have such hard requirements but +# want to free memory asap when possible. +activerehashing yes + +# The client output buffer limits can be used to force disconnection of clients +# that are not reading data from the server fast enough for some reason (a +# common reason is that a Pub/Sub client can't consume messages as fast as the +# publisher can produce them). +# +# The limit can be set differently for the three different classes of clients: +# +# normal -> normal clients including MONITOR clients +# replica -> replica clients +# pubsub -> clients subscribed to at least one pubsub channel or pattern +# +# The syntax of every client-output-buffer-limit directive is the following: +# +# client-output-buffer-limit +# +# A client is immediately disconnected once the hard limit is reached, or if +# the soft limit is reached and remains reached for the specified number of +# seconds (continuously). +# So for instance if the hard limit is 32 megabytes and the soft limit is +# 16 megabytes / 10 seconds, the client will get disconnected immediately +# if the size of the output buffers reach 32 megabytes, but will also get +# disconnected if the client reaches 16 megabytes and continuously overcomes +# the limit for 10 seconds. +# +# By default normal clients are not limited because they don't receive data +# without asking (in a push way), but just after a request, so only +# asynchronous clients may create a scenario where data is requested faster +# than it can read. +# +# Instead there is a default limit for pubsub and replica clients, since +# subscribers and replicas receive data in a push fashion. +# +# Both the hard or the soft limit can be disabled by setting them to zero. +client-output-buffer-limit normal 0 0 0 +client-output-buffer-limit replica 256mb 64mb 60 +client-output-buffer-limit pubsub 32mb 8mb 60 + +# Client query buffers accumulate new commands. They are limited to a fixed +# amount by default in order to avoid that a protocol desynchronization (for +# instance due to a bug in the client) will lead to unbound memory usage in +# the query buffer. However you can configure it here if you have very special +# needs, such us huge multi/exec requests or alike. +# +# client-query-buffer-limit 1gb + +# In the Redis protocol, bulk requests, that are, elements representing single +# strings, are normally limited ot 512 mb. However you can change this limit +# here. +# +# proto-max-bulk-len 512mb + +# Redis calls an internal function to perform many background tasks, like +# closing connections of clients in timeout, purging expired keys that are +# never requested, and so forth. +# +# Not all tasks are performed with the same frequency, but Redis checks for +# tasks to perform according to the specified "hz" value. +# +# By default "hz" is set to 10. Raising the value will use more CPU when +# Redis is idle, but at the same time will make Redis more responsive when +# there are many keys expiring at the same time, and timeouts may be +# handled with more precision. +# +# The range is between 1 and 500, however a value over 100 is usually not +# a good idea. Most users should use the default of 10 and raise this up to +# 100 only in environments where very low latency is required. +hz 10 + +# Normally it is useful to have an HZ value which is proportional to the +# number of clients connected. This is useful in order, for instance, to +# avoid too many clients are processed for each background task invocation +# in order to avoid latency spikes. +# +# Since the default HZ value by default is conservatively set to 10, Redis +# offers, and enables by default, the ability to use an adaptive HZ value +# which will temporary raise when there are many connected clients. +# +# When dynamic HZ is enabled, the actual configured HZ will be used as +# as a baseline, but multiples of the configured HZ value will be actually +# used as needed once more clients are connected. In this way an idle +# instance will use very little CPU time while a busy instance will be +# more responsive. +dynamic-hz yes + +# When a child rewrites the AOF file, if the following option is enabled +# the file will be fsync-ed every 32 MB of data generated. This is useful +# in order to commit the file to the disk more incrementally and avoid +# big latency spikes. +aof-rewrite-incremental-fsync yes + +# When redis saves RDB file, if the following option is enabled +# the file will be fsync-ed every 32 MB of data generated. This is useful +# in order to commit the file to the disk more incrementally and avoid +# big latency spikes. +rdb-save-incremental-fsync yes + +# Redis LFU eviction (see maxmemory setting) can be tuned. However it is a good +# idea to start with the default settings and only change them after investigating +# how to improve the performances and how the keys LFU change over time, which +# is possible to inspect via the OBJECT FREQ command. +# +# There are two tunable parameters in the Redis LFU implementation: the +# counter logarithm factor and the counter decay time. It is important to +# understand what the two parameters mean before changing them. +# +# The LFU counter is just 8 bits per key, it's maximum value is 255, so Redis +# uses a probabilistic increment with logarithmic behavior. Given the value +# of the old counter, when a key is accessed, the counter is incremented in +# this way: +# +# 1. A random number R between 0 and 1 is extracted. +# 2. A probability P is calculated as 1/(old_value*lfu_log_factor+1). +# 3. The counter is incremented only if R < P. +# +# The default lfu-log-factor is 10. This is a table of how the frequency +# counter changes with a different number of accesses with different +# logarithmic factors: +# +# +--------+------------+------------+------------+------------+------------+ +# | factor | 100 hits | 1000 hits | 100K hits | 1M hits | 10M hits | +# +--------+------------+------------+------------+------------+------------+ +# | 0 | 104 | 255 | 255 | 255 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# | 1 | 18 | 49 | 255 | 255 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# | 10 | 10 | 18 | 142 | 255 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# | 100 | 8 | 11 | 49 | 143 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# +# NOTE: The above table was obtained by running the following commands: +# +# redis-benchmark -n 1000000 incr foo +# redis-cli object freq foo +# +# NOTE 2: The counter initial value is 5 in order to give new objects a chance +# to accumulate hits. +# +# The counter decay time is the time, in minutes, that must elapse in order +# for the key counter to be divided by two (or decremented if it has a value +# less <= 10). +# +# The default value for the lfu-decay-time is 1. A Special value of 0 means to +# decay the counter every time it happens to be scanned. +# +# lfu-log-factor 10 +# lfu-decay-time 1 + +################################## INCLUDES ################################### + +# Include one or more other config files here. This is useful if you +# have a standard template that goes to all Redis server but also need +# to customize a few per-server settings. Include files can include +# other files, so use this wisely. +# +# include /path/to/local.conf +# include /path/to/other.conf diff --git a/Redis-x64-5.0.14.1/redis.windows.conf b/Redis-x64-5.0.14.1/redis.windows.conf new file mode 100644 index 0000000..8233249 --- /dev/null +++ b/Redis-x64-5.0.14.1/redis.windows.conf @@ -0,0 +1,1336 @@ +# Redis configuration file example + +# Note on units: when memory size is needed, it is possible to specify +# it in the usual form of 1k 5GB 4M and so forth: +# +# 1k => 1000 bytes +# 1kb => 1024 bytes +# 1m => 1000000 bytes +# 1mb => 1024*1024 bytes +# 1g => 1000000000 bytes +# 1gb => 1024*1024*1024 bytes +# +# units are case insensitive so 1GB 1Gb 1gB are all the same. + +################################## INCLUDES ################################### + +# Include one or more other config files here. This is useful if you +# have a standard template that goes to all Redis servers but also need +# to customize a few per-server settings. Include files can include +# other files, so use this wisely. +# +# Notice option "include" won't be rewritten by command "CONFIG REWRITE" +# from admin or Redis Sentinel. Since Redis always uses the last processed +# line as value of a configuration directive, you'd better put includes +# at the beginning of this file to avoid overwriting config change at runtime. +# +# If instead you are interested in using includes to override configuration +# options, it is better to use include as the last line. +# +# include .\path\to\local.conf +# include c:\path\to\other.conf + +################################## MODULES ##################################### + +# Load modules at startup. If the server is not able to load modules +# it will abort. It is possible to use multiple loadmodule directives. +# +# loadmodule .\path\to\my_module.dll +# loadmodule c:\path\to\other_module.dll + +################################## NETWORK ##################################### + +# By default, if no "bind" configuration directive is specified, Redis listens +# for connections from all the network interfaces available on the server. +# It is possible to listen to just one or multiple selected interfaces using +# the "bind" configuration directive, followed by one or more IP addresses. +# +# Examples: +# +# bind 192.168.1.100 10.0.0.1 +# bind 127.0.0.1 ::1 +# +# ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the +# internet, binding to all the interfaces is dangerous and will expose the +# instance to everybody on the internet. So by default we uncomment the +# following bind directive, that will force Redis to listen only into +# the IPv4 loopback interface address (this means Redis will be able to +# accept connections only from clients running into the same computer it +# is running). +# +# IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES +# JUST COMMENT THE FOLLOWING LINE. +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +bind 127.0.0.1 + +# Protected mode is a layer of security protection, in order to avoid that +# Redis instances left open on the internet are accessed and exploited. +# +# When protected mode is on and if: +# +# 1) The server is not binding explicitly to a set of addresses using the +# "bind" directive. +# 2) No password is configured. +# +# The server only accepts connections from clients connecting from the +# IPv4 and IPv6 loopback addresses 127.0.0.1 and ::1, and from Unix domain +# sockets. +# +# By default protected mode is enabled. You should disable it only if +# you are sure you want clients from other hosts to connect to Redis +# even if no authentication is configured, nor a specific set of interfaces +# are explicitly listed using the "bind" directive. +protected-mode yes + +# Accept connections on the specified port, default is 6379 (IANA #815344). +# If port 0 is specified Redis will not listen on a TCP socket. +port 6379 + +# TCP listen() backlog. +# +# In high requests-per-second environments you need an high backlog in order +# to avoid slow clients connections issues. Note that the Linux kernel +# will silently truncate it to the value of /proc/sys/net/core/somaxconn so +# make sure to raise both the value of somaxconn and tcp_max_syn_backlog +# in order to get the desired effect. +tcp-backlog 511 + +# Unix socket. +# +# Specify the path for the Unix socket that will be used to listen for +# incoming connections. There is no default, so Redis will not listen +# on a unix socket when not specified. +# +# unixsocket /tmp/redis.sock +# unixsocketperm 700 + +# Close the connection after a client is idle for N seconds (0 to disable) +timeout 0 + +# TCP keepalive. +# +# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence +# of communication. This is useful for two reasons: +# +# 1) Detect dead peers. +# 2) Take the connection alive from the point of view of network +# equipment in the middle. +# +# On Linux, the specified value (in seconds) is the period used to send ACKs. +# Note that to close the connection the double of the time is needed. +# On other kernels the period depends on the kernel configuration. +# +# A reasonable value for this option is 300 seconds, which is the new +# Redis default starting with Redis 3.2.1. +tcp-keepalive 300 + +################################# GENERAL ##################################### + +# By default Redis does not run as a daemon. Use 'yes' if you need it. +# Note that Redis will write a pid file in /var/run/redis.pid when daemonized. +# NOT SUPPORTED ON WINDOWS daemonize no + +# If you run Redis from upstart or systemd, Redis can interact with your +# supervision tree. Options: +# supervised no - no supervision interaction +# supervised upstart - signal upstart by putting Redis into SIGSTOP mode +# supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET +# supervised auto - detect upstart or systemd method based on +# UPSTART_JOB or NOTIFY_SOCKET environment variables +# Note: these supervision methods only signal "process is ready." +# They do not enable continuous liveness pings back to your supervisor. +# NOT SUPPORTED ON WINDOWS supervised no + +# If a pid file is specified, Redis writes it where specified at startup +# and removes it at exit. +# +# When the server runs non daemonized, no pid file is created if none is +# specified in the configuration. When the server is daemonized, the pid file +# is used even if not specified, defaulting to "/var/run/redis.pid". +# +# Creating a pid file is best effort: if Redis is not able to create it +# nothing bad happens, the server will start and run normally. +# NOT SUPPORTED ON WINDOWS pidfile /var/run/redis.pid + +# Specify the server verbosity level. +# This can be one of: +# debug (a lot of information, useful for development/testing) +# verbose (many rarely useful info, but not a mess like the debug level) +# notice (moderately verbose, what you want in production probably) +# warning (only very important / critical messages are logged) +loglevel notice + +# Specify the log file name. Also 'stdout' can be used to force +# Redis to log on the standard output. +logfile "" + +# To enable logging to the Windows EventLog, just set 'syslog-enabled' to +# yes, and optionally update the other syslog parameters to suit your needs. +# If Redis is installed and launched as a Windows Service, this will +# automatically be enabled. +# syslog-enabled no + +# Specify the source name of the events in the Windows Application log. +# syslog-ident redis + +# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. +# NOT SUPPORTED ON WINDOWS syslog-facility local0 + +# Set the number of databases. The default database is DB 0, you can select +# a different one on a per-connection basis using SELECT where +# dbid is a number between 0 and 'databases'-1 +databases 16 + +# By default Redis shows an ASCII art logo only when started to log to the +# standard output and if the standard output is a TTY. Basically this means +# that normally a logo is displayed only in interactive sessions. +# +# However it is possible to force the pre-4.0 behavior and always show a +# ASCII art logo in startup logs by setting the following option to yes. +always-show-logo yes + +################################ SNAPSHOTTING ################################ +# +# Save the DB on disk: +# +# save +# +# Will save the DB if both the given number of seconds and the given +# number of write operations against the DB occurred. +# +# In the example below the behaviour will be to save: +# after 900 sec (15 min) if at least 1 key changed +# after 300 sec (5 min) if at least 10 keys changed +# after 60 sec if at least 10000 keys changed +# +# Note: you can disable saving completely by commenting out all "save" lines. +# +# It is also possible to remove all the previously configured save +# points by adding a save directive with a single empty string argument +# like in the following example: +# +# save "" + +save 900 1 +save 300 10 +save 60 10000 + +# By default Redis will stop accepting writes if RDB snapshots are enabled +# (at least one save point) and the latest background save failed. +# This will make the user aware (in a hard way) that data is not persisting +# on disk properly, otherwise chances are that no one will notice and some +# disaster will happen. +# +# If the background saving process will start working again Redis will +# automatically allow writes again. +# +# However if you have setup your proper monitoring of the Redis server +# and persistence, you may want to disable this feature so that Redis will +# continue to work as usual even if there are problems with disk, +# permissions, and so forth. +stop-writes-on-bgsave-error yes + +# Compress string objects using LZF when dump .rdb databases? +# For default that's set to 'yes' as it's almost always a win. +# If you want to save some CPU in the saving child set it to 'no' but +# the dataset will likely be bigger if you have compressible values or keys. +rdbcompression yes + +# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. +# This makes the format more resistant to corruption but there is a performance +# hit to pay (around 10%) when saving and loading RDB files, so you can disable it +# for maximum performances. +# +# RDB files created with checksum disabled have a checksum of zero that will +# tell the loading code to skip the check. +rdbchecksum yes + +# The filename where to dump the DB +dbfilename dump.rdb + +# The working directory. +# +# The DB will be written inside this directory, with the filename specified +# above using the 'dbfilename' configuration directive. +# +# The Append Only File will also be created inside this directory. +# +# Note that you must specify a directory here, not a file name. +dir ./ + +################################# REPLICATION ################################# + +# Master-Replica replication. Use replicaof to make a Redis instance a copy of +# another Redis server. A few things to understand ASAP about Redis replication. +# +# +------------------+ +---------------+ +# | Master | ---> | Replica | +# | (receive writes) | | (exact copy) | +# +------------------+ +---------------+ +# +# 1) Redis replication is asynchronous, but you can configure a master to +# stop accepting writes if it appears to be not connected with at least +# a given number of replicas. +# 2) Redis replicas are able to perform a partial resynchronization with the +# master if the replication link is lost for a relatively small amount of +# time. You may want to configure the replication backlog size (see the next +# sections of this file) with a sensible value depending on your needs. +# 3) Replication is automatic and does not need user intervention. After a +# network partition replicas automatically try to reconnect to masters +# and resynchronize with them. +# +# replicaof + +# If the master is password protected (using the "requirepass" configuration +# directive below) it is possible to tell the replica to authenticate before +# starting the replication synchronization process, otherwise the master will +# refuse the replica request. +# +# masterauth + +# When a replica loses its connection with the master, or when the replication +# is still in progress, the replica can act in two different ways: +# +# 1) if replica-serve-stale-data is set to 'yes' (the default) the replica will +# still reply to client requests, possibly with out of date data, or the +# data set may just be empty if this is the first synchronization. +# +# 2) if replica-serve-stale-data is set to 'no' the replica will reply with +# an error "SYNC with master in progress" to all the kind of commands +# but to INFO, replicaOF, AUTH, PING, SHUTDOWN, REPLCONF, ROLE, CONFIG, +# SUBSCRIBE, UNSUBSCRIBE, PSUBSCRIBE, PUNSUBSCRIBE, PUBLISH, PUBSUB, +# COMMAND, POST, HOST: and LATENCY. +# +replica-serve-stale-data yes + +# You can configure a replica instance to accept writes or not. Writing against +# a replica instance may be useful to store some ephemeral data (because data +# written on a replica will be easily deleted after resync with the master) but +# may also cause problems if clients are writing to it because of a +# misconfiguration. +# +# Since Redis 2.6 by default replicas are read-only. +# +# Note: read only replicas are not designed to be exposed to untrusted clients +# on the internet. It's just a protection layer against misuse of the instance. +# Still a read only replica exports by default all the administrative commands +# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve +# security of read only replicas using 'rename-command' to shadow all the +# administrative / dangerous commands. +replica-read-only yes + +# Replication SYNC strategy: disk or socket. +# +# ------------------------------------------------------- +# WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY +# ------------------------------------------------------- +# +# New replicas and reconnecting replicas that are not able to continue the replication +# process just receiving differences, need to do what is called a "full +# synchronization". An RDB file is transmitted from the master to the replicas. +# The transmission can happen in two different ways: +# +# 1) Disk-backed: The Redis master creates a new process that writes the RDB +# file on disk. Later the file is transferred by the parent +# process to the replicas incrementally. +# 2) Diskless: The Redis master creates a new process that directly writes the +# RDB file to replica sockets, without touching the disk at all. +# +# With disk-backed replication, while the RDB file is generated, more replicas +# can be queued and served with the RDB file as soon as the current child producing +# the RDB file finishes its work. With diskless replication instead once +# the transfer starts, new replicas arriving will be queued and a new transfer +# will start when the current one terminates. +# +# When diskless replication is used, the master waits a configurable amount of +# time (in seconds) before starting the transfer in the hope that multiple replicas +# will arrive and the transfer can be parallelized. +# +# With slow disks and fast (large bandwidth) networks, diskless replication +# works better. +repl-diskless-sync no + +# When diskless replication is enabled, it is possible to configure the delay +# the server waits in order to spawn the child that transfers the RDB via socket +# to the replicas. +# +# This is important since once the transfer starts, it is not possible to serve +# new replicas arriving, that will be queued for the next RDB transfer, so the server +# waits a delay in order to let more replicas arrive. +# +# The delay is specified in seconds, and by default is 5 seconds. To disable +# it entirely just set it to 0 seconds and the transfer will start ASAP. +repl-diskless-sync-delay 5 + +# Replicas send PINGs to server in a predefined interval. It's possible to change +# this interval with the repl_ping_replica_period option. The default value is 10 +# seconds. +# +# repl-ping-replica-period 10 + +# The following option sets the replication timeout for: +# +# 1) Bulk transfer I/O during SYNC, from the point of view of replica. +# 2) Master timeout from the point of view of replicas (data, pings). +# 3) Replica timeout from the point of view of masters (REPLCONF ACK pings). +# +# It is important to make sure that this value is greater than the value +# specified for repl-ping-replica-period otherwise a timeout will be detected +# every time there is low traffic between the master and the replica. +# +# repl-timeout 60 + +# Disable TCP_NODELAY on the replica socket after SYNC? +# +# If you select "yes" Redis will use a smaller number of TCP packets and +# less bandwidth to send data to replicas. But this can add a delay for +# the data to appear on the replica side, up to 40 milliseconds with +# Linux kernels using a default configuration. +# +# If you select "no" the delay for data to appear on the replica side will +# be reduced but more bandwidth will be used for replication. +# +# By default we optimize for low latency, but in very high traffic conditions +# or when the master and replicas are many hops away, turning this to "yes" may +# be a good idea. +repl-disable-tcp-nodelay no + +# Set the replication backlog size. The backlog is a buffer that accumulates +# replica data when replicas are disconnected for some time, so that when a replica +# wants to reconnect again, often a full resync is not needed, but a partial +# resync is enough, just passing the portion of data the replica missed while +# disconnected. +# +# The bigger the replication backlog, the longer the time the replica can be +# disconnected and later be able to perform a partial resynchronization. +# +# The backlog is only allocated once there is at least a replica connected. +# +# repl-backlog-size 1mb + +# After a master has no longer connected replicas for some time, the backlog +# will be freed. The following option configures the amount of seconds that +# need to elapse, starting from the time the last replica disconnected, for +# the backlog buffer to be freed. +# +# Note that replicas never free the backlog for timeout, since they may be +# promoted to masters later, and should be able to correctly "partially +# resynchronize" with the replicas: hence they should always accumulate backlog. +# +# A value of 0 means to never release the backlog. +# +# repl-backlog-ttl 3600 + +# The replica priority is an integer number published by Redis in the INFO output. +# It is used by Redis Sentinel in order to select a replica to promote into a +# master if the master is no longer working correctly. +# +# A replica with a low priority number is considered better for promotion, so +# for instance if there are three replicas with priority 10, 100, 25 Sentinel will +# pick the one with priority 10, that is the lowest. +# +# However a special priority of 0 marks the replica as not able to perform the +# role of master, so a replica with priority of 0 will never be selected by +# Redis Sentinel for promotion. +# +# By default the priority is 100. +replica-priority 100 + +# It is possible for a master to stop accepting writes if there are less than +# N replicas connected, having a lag less or equal than M seconds. +# +# The N replicas need to be in "online" state. +# +# The lag in seconds, that must be <= the specified value, is calculated from +# the last ping received from the replica, that is usually sent every second. +# +# This option does not GUARANTEE that N replicas will accept the write, but +# will limit the window of exposure for lost writes in case not enough replicas +# are available, to the specified number of seconds. +# +# For example to require at least 3 replicas with a lag <= 10 seconds use: +# +# min-replicas-to-write 3 +# min-replicas-max-lag 10 +# +# Setting one or the other to 0 disables the feature. +# +# By default min-replicas-to-write is set to 0 (feature disabled) and +# min-replicas-max-lag is set to 10. + +# A Redis master is able to list the address and port of the attached +# replicas in different ways. For example the "INFO replication" section +# offers this information, which is used, among other tools, by +# Redis Sentinel in order to discover replica instances. +# Another place where this info is available is in the output of the +# "ROLE" command of a master. +# +# The listed IP and address normally reported by a replica is obtained +# in the following way: +# +# IP: The address is auto detected by checking the peer address +# of the socket used by the replica to connect with the master. +# +# Port: The port is communicated by the replica during the replication +# handshake, and is normally the port that the replica is using to +# listen for connections. +# +# However when port forwarding or Network Address Translation (NAT) is +# used, the replica may be actually reachable via different IP and port +# pairs. The following two options can be used by a replica in order to +# report to its master a specific set of IP and port, so that both INFO +# and ROLE will report those values. +# +# There is no need to use both the options if you need to override just +# the port or the IP address. +# +# replica-announce-ip 5.5.5.5 +# replica-announce-port 1234 + +################################## SECURITY ################################### + +# Require clients to issue AUTH before processing any other +# commands. This might be useful in environments in which you do not trust +# others with access to the host running redis-server. +# +# This should stay commented out for backward compatibility and because most +# people do not need auth (e.g. they run their own servers). +# +# Warning: since Redis is pretty fast an outside user can try up to +# 150k passwords per second against a good box. This means that you should +# use a very strong password otherwise it will be very easy to break. +# +# requirepass foobared + +# Command renaming. +# +# It is possible to change the name of dangerous commands in a shared +# environment. For instance the CONFIG command may be renamed into something +# hard to guess so that it will still be available for internal-use tools +# but not available for general clients. +# +# Example: +# +# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 +# +# It is also possible to completely kill a command by renaming it into +# an empty string: +# +# rename-command CONFIG "" +# +# Please note that changing the name of commands that are logged into the +# AOF file or transmitted to replicas may cause problems. + +################################### CLIENTS #################################### + +# Set the max number of connected clients at the same time. By default +# this limit is set to 10000 clients, however if the Redis server is not +# able to configure the process file limit to allow for the specified limit +# the max number of allowed clients is set to the current file limit +# minus 32 (as Redis reserves a few file descriptors for internal uses). +# +# Once the limit is reached Redis will close all the new connections sending +# an error 'max number of clients reached'. +# +# maxclients 10000 + +############################## MEMORY MANAGEMENT ################################ + +# If Redis is to be used as an in-memory-only cache without any kind of +# persistence, then the fork() mechanism used by the background AOF/RDB +# persistence is unnecessary. As an optimization, all persistence can be +# turned off in the Windows version of Redis. This will redirect heap +# allocations to the system heap allocator, and disable commands that would +# otherwise cause fork() operations: BGSAVE and BGREWRITEAOF. +# This flag may not be combined with any of the other flags that configure +# AOF and RDB operations. +# persistence-available [(yes)|no] + +# Set a memory usage limit to the specified amount of bytes. +# When the memory limit is reached Redis will try to remove keys +# according to the eviction policy selected (see maxmemory-policy). +# +# If Redis can't remove keys according to the policy, or if the policy is +# set to 'noeviction', Redis will start to reply with errors to commands +# that would use more memory, like SET, LPUSH, and so on, and will continue +# to reply to read-only commands like GET. +# +# This option is usually useful when using Redis as an LRU or LFU cache, or to +# set a hard memory limit for an instance (using the 'noeviction' policy). +# +# WARNING: If you have replicas attached to an instance with maxmemory on, +# the size of the output buffers needed to feed the replicas are subtracted +# from the used memory count, so that network problems / resyncs will +# not trigger a loop where keys are evicted, and in turn the output +# buffer of replicas is full with DELs of keys evicted triggering the deletion +# of more keys, and so forth until the database is completely emptied. +# +# In short... if you have replicas attached it is suggested that you set a lower +# limit for maxmemory so that there is some free RAM on the system for replica +# output buffers (but this is not needed if the policy is 'noeviction'). +# +# WARNING: not setting maxmemory will cause Redis to terminate with an +# out-of-memory exception if the heap limit is reached. +# +# NOTE: since Redis uses the system paging file to allocate the heap memory, +# the Working Set memory usage showed by the Windows Task Manager or by other +# tools such as ProcessExplorer will not always be accurate. For example, right +# after a background save of the RDB or the AOF files, the working set value +# may drop significantly. In order to check the correct amount of memory used +# by the redis-server to store the data, use the INFO client command. The INFO +# command shows only the memory used to store the redis data, not the extra +# memory used by the Windows process for its own requirements. Th3 extra amount +# of memory not reported by the INFO command can be calculated subtracting the +# Peak Working Set reported by the Windows Task Manager and the used_memory_peak +# reported by the INFO command. +# +# maxmemory + +# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory +# is reached. You can select among five behaviors: +# +# volatile-lru -> Evict using approximated LRU among the keys with an expire set. +# allkeys-lru -> Evict any key using approximated LRU. +# volatile-lfu -> Evict using approximated LFU among the keys with an expire set. +# allkeys-lfu -> Evict any key using approximated LFU. +# volatile-random -> Remove a random key among the ones with an expire set. +# allkeys-random -> Remove a random key, any key. +# volatile-ttl -> Remove the key with the nearest expire time (minor TTL) +# noeviction -> Don't evict anything, just return an error on write operations. +# +# LRU means Least Recently Used +# LFU means Least Frequently Used +# +# Both LRU, LFU and volatile-ttl are implemented using approximated +# randomized algorithms. +# +# Note: with any of the above policies, Redis will return an error on write +# operations, when there are no suitable keys for eviction. +# +# At the date of writing these commands are: set setnx setex append +# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd +# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby +# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby +# getset mset msetnx exec sort +# +# The default is: +# +# maxmemory-policy noeviction + +# LRU, LFU and minimal TTL algorithms are not precise algorithms but approximated +# algorithms (in order to save memory), so you can tune it for speed or +# accuracy. For default Redis will check five keys and pick the one that was +# used less recently, you can change the sample size using the following +# configuration directive. +# +# The default of 5 produces good enough results. 10 Approximates very closely +# true LRU but costs more CPU. 3 is faster but not very accurate. +# +# maxmemory-samples 5 + +# Starting from Redis 5, by default a replica will ignore its maxmemory setting +# (unless it is promoted to master after a failover or manually). It means +# that the eviction of keys will be just handled by the master, sending the +# DEL commands to the replica as keys evict in the master side. +# +# This behavior ensures that masters and replicas stay consistent, and is usually +# what you want, however if your replica is writable, or you want the replica to have +# a different memory setting, and you are sure all the writes performed to the +# replica are idempotent, then you may change this default (but be sure to understand +# what you are doing). +# +# Note that since the replica by default does not evict, it may end using more +# memory than the one set via maxmemory (there are certain buffers that may +# be larger on the replica, or data structures may sometimes take more memory and so +# forth). So make sure you monitor your replicas and make sure they have enough +# memory to never hit a real out-of-memory condition before the master hits +# the configured maxmemory setting. +# +# replica-ignore-maxmemory yes + +############################# LAZY FREEING #################################### + +# Redis has two primitives to delete keys. One is called DEL and is a blocking +# deletion of the object. It means that the server stops processing new commands +# in order to reclaim all the memory associated with an object in a synchronous +# way. If the key deleted is associated with a small object, the time needed +# in order to execute the DEL command is very small and comparable to most other +# O(1) or O(log_N) commands in Redis. However if the key is associated with an +# aggregated value containing millions of elements, the server can block for +# a long time (even seconds) in order to complete the operation. +# +# For the above reasons Redis also offers non blocking deletion primitives +# such as UNLINK (non blocking DEL) and the ASYNC option of FLUSHALL and +# FLUSHDB commands, in order to reclaim memory in background. Those commands +# are executed in constant time. Another thread will incrementally free the +# object in the background as fast as possible. +# +# DEL, UNLINK and ASYNC option of FLUSHALL and FLUSHDB are user-controlled. +# It's up to the design of the application to understand when it is a good +# idea to use one or the other. However the Redis server sometimes has to +# delete keys or flush the whole database as a side effect of other operations. +# Specifically Redis deletes objects independently of a user call in the +# following scenarios: +# +# 1) On eviction, because of the maxmemory and maxmemory policy configurations, +# in order to make room for new data, without going over the specified +# memory limit. +# 2) Because of expire: when a key with an associated time to live (see the +# EXPIRE command) must be deleted from memory. +# 3) Because of a side effect of a command that stores data on a key that may +# already exist. For example the RENAME command may delete the old key +# content when it is replaced with another one. Similarly SUNIONSTORE +# or SORT with STORE option may delete existing keys. The SET command +# itself removes any old content of the specified key in order to replace +# it with the specified string. +# 4) During replication, when a replica performs a full resynchronization with +# its master, the content of the whole database is removed in order to +# load the RDB file just transferred. +# +# In all the above cases the default is to delete objects in a blocking way, +# like if DEL was called. However you can configure each case specifically +# in order to instead release memory in a non-blocking way like if UNLINK +# was called, using the following configuration directives: + +lazyfree-lazy-eviction no +lazyfree-lazy-expire no +lazyfree-lazy-server-del no +replica-lazy-flush no + +############################## APPEND ONLY MODE ############################### + +# By default Redis asynchronously dumps the dataset on disk. This mode is +# good enough in many applications, but an issue with the Redis process or +# a power outage may result into a few minutes of writes lost (depending on +# the configured save points). +# +# The Append Only File is an alternative persistence mode that provides +# much better durability. For instance using the default data fsync policy +# (see later in the config file) Redis can lose just one second of writes in a +# dramatic event like a server power outage, or a single write if something +# wrong with the Redis process itself happens, but the operating system is +# still running correctly. +# +# AOF and RDB persistence can be enabled at the same time without problems. +# If the AOF is enabled on startup Redis will load the AOF, that is the file +# with the better durability guarantees. +# +# Please check http://redis.io/topics/persistence for more information. + +appendonly no + +# The name of the append only file (default: "appendonly.aof") + +appendfilename "appendonly.aof" + +# The fsync() call tells the Operating System to actually write data on disk +# instead of waiting for more data in the output buffer. Some OS will really flush +# data on disk, some other OS will just try to do it ASAP. +# +# Redis supports three different modes: +# +# no: don't fsync, just let the OS flush the data when it wants. Faster. +# always: fsync after every write to the append only log. Slow, Safest. +# everysec: fsync only one time every second. Compromise. +# +# The default is "everysec", as that's usually the right compromise between +# speed and data safety. It's up to you to understand if you can relax this to +# "no" that will let the operating system flush the output buffer when +# it wants, for better performances (but if you can live with the idea of +# some data loss consider the default persistence mode that's snapshotting), +# or on the contrary, use "always" that's very slow but a bit safer than +# everysec. +# +# More details please check the following article: +# http://antirez.com/post/redis-persistence-demystified.html +# +# If unsure, use "everysec". + +# appendfsync always +appendfsync everysec +# appendfsync no + +# When the AOF fsync policy is set to always or everysec, and a background +# saving process (a background save or AOF log background rewriting) is +# performing a lot of I/O against the disk, in some Linux configurations +# Redis may block too long on the fsync() call. Note that there is no fix for +# this currently, as even performing fsync in a different thread will block +# our synchronous write(2) call. +# +# In order to mitigate this problem it's possible to use the following option +# that will prevent fsync() from being called in the main process while a +# BGSAVE or BGREWRITEAOF is in progress. +# +# This means that while another child is saving, the durability of Redis is +# the same as "appendfsync none". In practical terms, this means that it is +# possible to lose up to 30 seconds of log in the worst scenario (with the +# default Linux settings). +# +# If you have latency problems turn this to "yes". Otherwise leave it as +# "no" that is the safest pick from the point of view of durability. + +no-appendfsync-on-rewrite no + +# Automatic rewrite of the append only file. +# Redis is able to automatically rewrite the log file implicitly calling +# BGREWRITEAOF when the AOF log size grows by the specified percentage. +# +# This is how it works: Redis remembers the size of the AOF file after the +# latest rewrite (if no rewrite has happened since the restart, the size of +# the AOF at startup is used). +# +# This base size is compared to the current size. If the current size is +# bigger than the specified percentage, the rewrite is triggered. Also +# you need to specify a minimal size for the AOF file to be rewritten, this +# is useful to avoid rewriting the AOF file even if the percentage increase +# is reached but it is still pretty small. +# +# Specify a percentage of zero in order to disable the automatic AOF +# rewrite feature. + +auto-aof-rewrite-percentage 100 +auto-aof-rewrite-min-size 64mb + +# An AOF file may be found to be truncated at the end during the Redis +# startup process, when the AOF data gets loaded back into memory. +# This may happen when the system where Redis is running +# crashes, especially when an ext4 filesystem is mounted without the +# data=ordered option (however this can't happen when Redis itself +# crashes or aborts but the operating system still works correctly). +# +# Redis can either exit with an error when this happens, or load as much +# data as possible (the default now) and start if the AOF file is found +# to be truncated at the end. The following option controls this behavior. +# +# If aof-load-truncated is set to yes, a truncated AOF file is loaded and +# the Redis server starts emitting a log to inform the user of the event. +# Otherwise if the option is set to no, the server aborts with an error +# and refuses to start. When the option is set to no, the user requires +# to fix the AOF file using the "redis-check-aof" utility before to restart +# the server. +# +# Note that if the AOF file will be found to be corrupted in the middle +# the server will still exit with an error. This option only applies when +# Redis will try to read more data from the AOF file but not enough bytes +# will be found. +aof-load-truncated yes + +# When rewriting the AOF file, Redis is able to use an RDB preamble in the +# AOF file for faster rewrites and recoveries. When this option is turned +# on the rewritten AOF file is composed of two different stanzas: +# +# [RDB file][AOF tail] +# +# When loading Redis recognizes that the AOF file starts with the "REDIS" +# string and loads the prefixed RDB file, and continues loading the AOF +# tail. +aof-use-rdb-preamble yes + +################################ LUA SCRIPTING ############################### + +# Max execution time of a Lua script in milliseconds. +# +# If the maximum execution time is reached Redis will log that a script is +# still in execution after the maximum allowed time and will start to +# reply to queries with an error. +# +# When a long running script exceeds the maximum execution time only the +# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be +# used to stop a script that did not yet called write commands. The second +# is the only way to shut down the server in the case a write command was +# already issued by the script but the user doesn't want to wait for the natural +# termination of the script. +# +# Set it to 0 or a negative value for unlimited execution without warnings. +lua-time-limit 5000 + +################################ REDIS CLUSTER ############################### + +# Normal Redis instances can't be part of a Redis Cluster; only nodes that are +# started as cluster nodes can. In order to start a Redis instance as a +# cluster node enable the cluster support uncommenting the following: +# +# cluster-enabled yes + +# Every cluster node has a cluster configuration file. This file is not +# intended to be edited by hand. It is created and updated by Redis nodes. +# Every Redis Cluster node requires a different cluster configuration file. +# Make sure that instances running in the same system do not have +# overlapping cluster configuration file names. +# +# cluster-config-file nodes-6379.conf + +# Cluster node timeout is the amount of milliseconds a node must be unreachable +# for it to be considered in failure state. +# Most other internal time limits are multiple of the node timeout. +# +# cluster-node-timeout 15000 + +# A replica of a failing master will avoid to start a failover if its data +# looks too old. +# +# There is no simple way for a replica to actually have an exact measure of +# its "data age", so the following two checks are performed: +# +# 1) If there are multiple replicas able to failover, they exchange messages +# in order to try to give an advantage to the replica with the best +# replication offset (more data from the master processed). +# Replicas will try to get their rank by offset, and apply to the start +# of the failover a delay proportional to their rank. +# +# 2) Every single replica computes the time of the last interaction with +# its master. This can be the last ping or command received (if the master +# is still in the "connected" state), or the time that elapsed since the +# disconnection with the master (if the replication link is currently down). +# If the last interaction is too old, the replica will not try to failover +# at all. +# +# The point "2" can be tuned by user. Specifically a replica will not perform +# the failover if, since the last interaction with the master, the time +# elapsed is greater than: +# +# (node-timeout * replica-validity-factor) + repl-ping-replica-period +# +# So for example if node-timeout is 30 seconds, and the replica-validity-factor +# is 10, and assuming a default repl-ping-replica-period of 10 seconds, the +# replica will not try to failover if it was not able to talk with the master +# for longer than 310 seconds. +# +# A large replica-validity-factor may allow replicas with too old data to failover +# a master, while a too small value may prevent the cluster from being able to +# elect a replica at all. +# +# For maximum availability, it is possible to set the replica-validity-factor +# to a value of 0, which means, that replicas will always try to failover the +# master regardless of the last time they interacted with the master. +# (However they'll always try to apply a delay proportional to their +# offset rank). +# +# Zero is the only value able to guarantee that when all the partitions heal +# the cluster will always be able to continue. +# +# cluster-replica-validity-factor 10 + +# Cluster replicas are able to migrate to orphaned masters, that are masters +# that are left without working replicas. This improves the cluster ability +# to resist to failures as otherwise an orphaned master can't be failed over +# in case of failure if it has no working replicas. +# +# Replicas migrate to orphaned masters only if there are still at least a +# given number of other working replicas for their old master. This number +# is the "migration barrier". A migration barrier of 1 means that a replica +# will migrate only if there is at least 1 other working replica for its master +# and so forth. It usually reflects the number of replicas you want for every +# master in your cluster. +# +# Default is 1 (replicas migrate only if their masters remain with at least +# one replica). To disable migration just set it to a very large value. +# A value of 0 can be set but is useful only for debugging and dangerous +# in production. +# +# cluster-migration-barrier 1 + +# By default Redis Cluster nodes stop accepting queries if they detect there +# is at least an hash slot uncovered (no available node is serving it). +# This way if the cluster is partially down (for example a range of hash slots +# are no longer covered) all the cluster becomes, eventually, unavailable. +# It automatically returns available as soon as all the slots are covered again. +# +# However sometimes you want the subset of the cluster which is working, +# to continue to accept queries for the part of the key space that is still +# covered. In order to do so, just set the cluster-require-full-coverage +# option to no. +# +# cluster-require-full-coverage yes + +# This option, when set to yes, prevents replicas from trying to failover its +# master during master failures. However the master can still perform a +# manual failover, if forced to do so. +# +# This is useful in different scenarios, especially in the case of multiple +# data center operations, where we want one side to never be promoted if not +# in the case of a total DC failure. +# +# cluster-replica-no-failover no + +# In order to setup your cluster make sure to read the documentation +# available at http://redis.io web site. + +########################## CLUSTER DOCKER/NAT support ######################## + +# In certain deployments, Redis Cluster nodes address discovery fails, because +# addresses are NAT-ted or because ports are forwarded (the typical case is +# Docker and other containers). +# +# In order to make Redis Cluster working in such environments, a static +# configuration where each node knows its public address is needed. The +# following two options are used for this scope, and are: +# +# * cluster-announce-ip +# * cluster-announce-port +# * cluster-announce-bus-port +# +# Each instruct the node about its address, client port, and cluster message +# bus port. The information is then published in the header of the bus packets +# so that other nodes will be able to correctly map the address of the node +# publishing the information. +# +# If the above options are not used, the normal Redis Cluster auto-detection +# will be used instead. +# +# Note that when remapped, the bus port may not be at the fixed offset of +# clients port + 10000, so you can specify any port and bus-port depending +# on how they get remapped. If the bus-port is not set, a fixed offset of +# 10000 will be used as usually. +# +# Example: +# +# cluster-announce-ip 10.1.1.5 +# cluster-announce-port 6379 +# cluster-announce-bus-port 6380 + +################################## SLOW LOG ################################### + +# The Redis Slow Log is a system to log queries that exceeded a specified +# execution time. The execution time does not include the I/O operations +# like talking with the client, sending the reply and so forth, +# but just the time needed to actually execute the command (this is the only +# stage of command execution where the thread is blocked and can not serve +# other requests in the meantime). +# +# You can configure the slow log with two parameters: one tells Redis +# what is the execution time, in microseconds, to exceed in order for the +# command to get logged, and the other parameter is the length of the +# slow log. When a new command is logged the oldest one is removed from the +# queue of logged commands. + +# The following time is expressed in microseconds, so 1000000 is equivalent +# to one second. Note that a negative number disables the slow log, while +# a value of zero forces the logging of every command. +slowlog-log-slower-than 10000 + +# There is no limit to this length. Just be aware that it will consume memory. +# You can reclaim memory used by the slow log with SLOWLOG RESET. +slowlog-max-len 128 + +################################ LATENCY MONITOR ############################## + +# The Redis latency monitoring subsystem samples different operations +# at runtime in order to collect data related to possible sources of +# latency of a Redis instance. +# +# Via the LATENCY command this information is available to the user that can +# print graphs and obtain reports. +# +# The system only logs operations that were performed in a time equal or +# greater than the amount of milliseconds specified via the +# latency-monitor-threshold configuration directive. When its value is set +# to zero, the latency monitor is turned off. +# +# By default latency monitoring is disabled since it is mostly not needed +# if you don't have latency issues, and collecting data has a performance +# impact, that while very small, can be measured under big load. Latency +# monitoring can easily be enabled at runtime using the command +# "CONFIG SET latency-monitor-threshold " if needed. +latency-monitor-threshold 0 + +############################# EVENT NOTIFICATION ############################## + +# Redis can notify Pub/Sub clients about events happening in the key space. +# This feature is documented at http://redis.io/topics/notifications +# +# For instance if keyspace events notification is enabled, and a client +# performs a DEL operation on key "foo" stored in the Database 0, two +# messages will be published via Pub/Sub: +# +# PUBLISH __keyspace@0__:foo del +# PUBLISH __keyevent@0__:del foo +# +# It is possible to select the events that Redis will notify among a set +# of classes. Every class is identified by a single character: +# +# K Keyspace events, published with __keyspace@__ prefix. +# E Keyevent events, published with __keyevent@__ prefix. +# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... +# $ String commands +# l List commands +# s Set commands +# h Hash commands +# z Sorted set commands +# x Expired events (events generated every time a key expires) +# e Evicted events (events generated when a key is evicted for maxmemory) +# A Alias for g$lshzxe, so that the "AKE" string means all the events. +# +# The "notify-keyspace-events" takes as argument a string that is composed +# of zero or multiple characters. The empty string means that notifications +# are disabled. +# +# Example: to enable list and generic events, from the point of view of the +# event name, use: +# +# notify-keyspace-events Elg +# +# Example 2: to get the stream of the expired keys subscribing to channel +# name __keyevent@0__:expired use: +# +# notify-keyspace-events Ex +# +# By default all notifications are disabled because most users don't need +# this feature and the feature has some overhead. Note that if you don't +# specify at least one of K or E, no events will be delivered. +notify-keyspace-events "" + +############################### ADVANCED CONFIG ############################### + +# Hashes are encoded using a memory efficient data structure when they have a +# small number of entries, and the biggest entry does not exceed a given +# threshold. These thresholds can be configured using the following directives. +hash-max-ziplist-entries 512 +hash-max-ziplist-value 64 + +# Lists are also encoded in a special way to save a lot of space. +# The number of entries allowed per internal list node can be specified +# as a fixed maximum size or a maximum number of elements. +# For a fixed maximum size, use -5 through -1, meaning: +# -5: max size: 64 Kb <-- not recommended for normal workloads +# -4: max size: 32 Kb <-- not recommended +# -3: max size: 16 Kb <-- probably not recommended +# -2: max size: 8 Kb <-- good +# -1: max size: 4 Kb <-- good +# Positive numbers mean store up to _exactly_ that number of elements +# per list node. +# The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size), +# but if your use case is unique, adjust the settings as necessary. +list-max-ziplist-size -2 + +# Lists may also be compressed. +# Compress depth is the number of quicklist ziplist nodes from *each* side of +# the list to *exclude* from compression. The head and tail of the list +# are always uncompressed for fast push/pop operations. Settings are: +# 0: disable all list compression +# 1: depth 1 means "don't start compressing until after 1 node into the list, +# going from either the head or tail" +# So: [head]->node->node->...->node->[tail] +# [head], [tail] will always be uncompressed; inner nodes will compress. +# 2: [head]->[next]->node->node->...->node->[prev]->[tail] +# 2 here means: don't compress head or head->next or tail->prev or tail, +# but compress all nodes between them. +# 3: [head]->[next]->[next]->node->node->...->node->[prev]->[prev]->[tail] +# etc. +list-compress-depth 0 + +# Sets have a special encoding in just one case: when a set is composed +# of just strings that happen to be integers in radix 10 in the range +# of 64 bit signed integers. +# The following configuration setting sets the limit in the size of the +# set in order to use this special memory saving encoding. +set-max-intset-entries 512 + +# Similarly to hashes and lists, sorted sets are also specially encoded in +# order to save a lot of space. This encoding is only used when the length and +# elements of a sorted set are below the following limits: +zset-max-ziplist-entries 128 +zset-max-ziplist-value 64 + +# HyperLogLog sparse representation bytes limit. The limit includes the +# 16 bytes header. When an HyperLogLog using the sparse representation crosses +# this limit, it is converted into the dense representation. +# +# A value greater than 16000 is totally useless, since at that point the +# dense representation is more memory efficient. +# +# The suggested value is ~ 3000 in order to have the benefits of +# the space efficient encoding without slowing down too much PFADD, +# which is O(N) with the sparse encoding. The value can be raised to +# ~ 10000 when CPU is not a concern, but space is, and the data set is +# composed of many HyperLogLogs with cardinality in the 0 - 15000 range. +hll-sparse-max-bytes 3000 + +# Streams macro node max size / items. The stream data structure is a radix +# tree of big nodes that encode multiple items inside. Using this configuration +# it is possible to configure how big a single node can be in bytes, and the +# maximum number of items it may contain before switching to a new node when +# appending new stream entries. If any of the following settings are set to +# zero, the limit is ignored, so for instance it is possible to set just a +# max entires limit by setting max-bytes to 0 and max-entries to the desired +# value. +stream-node-max-bytes 4096 +stream-node-max-entries 100 + +# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in +# order to help rehashing the main Redis hash table (the one mapping top-level +# keys to values). The hash table implementation Redis uses (see dict.c) +# performs a lazy rehashing: the more operation you run into a hash table +# that is rehashing, the more rehashing "steps" are performed, so if the +# server is idle the rehashing is never complete and some more memory is used +# by the hash table. +# +# The default is to use this millisecond 10 times every second in order to +# actively rehash the main dictionaries, freeing memory when possible. +# +# If unsure: +# use "activerehashing no" if you have hard latency requirements and it is +# not a good thing in your environment that Redis can reply from time to time +# to queries with 2 milliseconds delay. +# +# use "activerehashing yes" if you don't have such hard requirements but +# want to free memory asap when possible. +activerehashing yes + +# The client output buffer limits can be used to force disconnection of clients +# that are not reading data from the server fast enough for some reason (a +# common reason is that a Pub/Sub client can't consume messages as fast as the +# publisher can produce them). +# +# The limit can be set differently for the three different classes of clients: +# +# normal -> normal clients including MONITOR clients +# replica -> replica clients +# pubsub -> clients subscribed to at least one pubsub channel or pattern +# +# The syntax of every client-output-buffer-limit directive is the following: +# +# client-output-buffer-limit +# +# A client is immediately disconnected once the hard limit is reached, or if +# the soft limit is reached and remains reached for the specified number of +# seconds (continuously). +# So for instance if the hard limit is 32 megabytes and the soft limit is +# 16 megabytes / 10 seconds, the client will get disconnected immediately +# if the size of the output buffers reach 32 megabytes, but will also get +# disconnected if the client reaches 16 megabytes and continuously overcomes +# the limit for 10 seconds. +# +# By default normal clients are not limited because they don't receive data +# without asking (in a push way), but just after a request, so only +# asynchronous clients may create a scenario where data is requested faster +# than it can read. +# +# Instead there is a default limit for pubsub and replica clients, since +# subscribers and replicas receive data in a push fashion. +# +# Both the hard or the soft limit can be disabled by setting them to zero. +client-output-buffer-limit normal 0 0 0 +client-output-buffer-limit replica 256mb 64mb 60 +client-output-buffer-limit pubsub 32mb 8mb 60 + +# Client query buffers accumulate new commands. They are limited to a fixed +# amount by default in order to avoid that a protocol desynchronization (for +# instance due to a bug in the client) will lead to unbound memory usage in +# the query buffer. However you can configure it here if you have very special +# needs, such us huge multi/exec requests or alike. +# +# client-query-buffer-limit 1gb + +# In the Redis protocol, bulk requests, that are, elements representing single +# strings, are normally limited ot 512 mb. However you can change this limit +# here. +# +# proto-max-bulk-len 512mb + +# Redis calls an internal function to perform many background tasks, like +# closing connections of clients in timeout, purging expired keys that are +# never requested, and so forth. +# +# Not all tasks are performed with the same frequency, but Redis checks for +# tasks to perform according to the specified "hz" value. +# +# By default "hz" is set to 10. Raising the value will use more CPU when +# Redis is idle, but at the same time will make Redis more responsive when +# there are many keys expiring at the same time, and timeouts may be +# handled with more precision. +# +# The range is between 1 and 500, however a value over 100 is usually not +# a good idea. Most users should use the default of 10 and raise this up to +# 100 only in environments where very low latency is required. +hz 10 + +# Normally it is useful to have an HZ value which is proportional to the +# number of clients connected. This is useful in order, for instance, to +# avoid too many clients are processed for each background task invocation +# in order to avoid latency spikes. +# +# Since the default HZ value by default is conservatively set to 10, Redis +# offers, and enables by default, the ability to use an adaptive HZ value +# which will temporary raise when there are many connected clients. +# +# When dynamic HZ is enabled, the actual configured HZ will be used as +# as a baseline, but multiples of the configured HZ value will be actually +# used as needed once more clients are connected. In this way an idle +# instance will use very little CPU time while a busy instance will be +# more responsive. +dynamic-hz yes + +# When a child rewrites the AOF file, if the following option is enabled +# the file will be fsync-ed every 32 MB of data generated. This is useful +# in order to commit the file to the disk more incrementally and avoid +# big latency spikes. +aof-rewrite-incremental-fsync yes + +# When redis saves RDB file, if the following option is enabled +# the file will be fsync-ed every 32 MB of data generated. This is useful +# in order to commit the file to the disk more incrementally and avoid +# big latency spikes. +rdb-save-incremental-fsync yes + +# Redis LFU eviction (see maxmemory setting) can be tuned. However it is a good +# idea to start with the default settings and only change them after investigating +# how to improve the performances and how the keys LFU change over time, which +# is possible to inspect via the OBJECT FREQ command. +# +# There are two tunable parameters in the Redis LFU implementation: the +# counter logarithm factor and the counter decay time. It is important to +# understand what the two parameters mean before changing them. +# +# The LFU counter is just 8 bits per key, it's maximum value is 255, so Redis +# uses a probabilistic increment with logarithmic behavior. Given the value +# of the old counter, when a key is accessed, the counter is incremented in +# this way: +# +# 1. A random number R between 0 and 1 is extracted. +# 2. A probability P is calculated as 1/(old_value*lfu_log_factor+1). +# 3. The counter is incremented only if R < P. +# +# The default lfu-log-factor is 10. This is a table of how the frequency +# counter changes with a different number of accesses with different +# logarithmic factors: +# +# +--------+------------+------------+------------+------------+------------+ +# | factor | 100 hits | 1000 hits | 100K hits | 1M hits | 10M hits | +# +--------+------------+------------+------------+------------+------------+ +# | 0 | 104 | 255 | 255 | 255 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# | 1 | 18 | 49 | 255 | 255 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# | 10 | 10 | 18 | 142 | 255 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# | 100 | 8 | 11 | 49 | 143 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# +# NOTE: The above table was obtained by running the following commands: +# +# redis-benchmark -n 1000000 incr foo +# redis-cli object freq foo +# +# NOTE 2: The counter initial value is 5 in order to give new objects a chance +# to accumulate hits. +# +# The counter decay time is the time, in minutes, that must elapse in order +# for the key counter to be divided by two (or decremented if it has a value +# less <= 10). +# +# The default value for the lfu-decay-time is 1. A Special value of 0 means to +# decay the counter every time it happens to be scanned. +# +# lfu-log-factor 10 +# lfu-decay-time 1 + +################################## INCLUDES ################################### + +# Include one or more other config files here. This is useful if you +# have a standard template that goes to all Redis server but also need +# to customize a few per-server settings. Include files can include +# other files, so use this wisely. +# +# include /path/to/local.conf +# include /path/to/other.conf diff --git a/app.py b/app.py new file mode 100644 index 0000000..287525c --- /dev/null +++ b/app.py @@ -0,0 +1,71 @@ +from flask import Flask +from flask_login import LoginManager +from config import Config +import redis + +login_manager = LoginManager() +redis_client = None + +def create_app(): + app = Flask(__name__) + app.config.from_object(Config) + + # 导入并初始化 db + from models import db + db.init_app(app) + + # 初始化登录管理器 + login_manager.init_app(app) + login_manager.login_view = 'auth.login' + + # 用户加载器 + @login_manager.user_loader + def load_user(user_id): + from models import User + return User.query.get(int(user_id)) + + # 初始化Redis + global redis_client + try: + redis_client = redis.Redis( + host=app.config['REDIS_HOST'], + port=app.config['REDIS_PORT'], + db=app.config['REDIS_DB'], + password=app.config['REDIS_PASSWORD'], + decode_responses=True + ) + # 测试连接 + redis_client.ping() + print("[OK] Redis连接成功") + except Exception as e: + print(f"[ERROR] Redis连接失败: {e}") + print("提示: 请确保Redis服务已启动") + redis_client = None + + # 注册蓝图 + from routes.auth import auth_bp + from routes.parser import parser_bp + from routes.admin import admin_bp + from routes.main import main_bp + + app.register_blueprint(auth_bp, url_prefix='/auth') + app.register_blueprint(parser_bp, url_prefix='/api') + app.register_blueprint(admin_bp, url_prefix='/admin') + app.register_blueprint(main_bp) + + # 初始化定时任务(仅在非调试模式或主进程中启动) + # 注意:初始化脚本运行时不启动调度器 + import os + if os.environ.get('SKIP_SCHEDULER') != 'true': + if not app.debug or os.environ.get('WERKZEUG_RUN_MAIN') == 'true': + try: + from scheduler import init_scheduler + init_scheduler(app) + except Exception as e: + print(f"定时任务启动失败: {e}") + + return app + +if __name__ == '__main__': + app = create_app() + app.run(debug=True, host='0.0.0.0', port=5000) diff --git a/config.py b/config.py new file mode 100644 index 0000000..bafbf62 --- /dev/null +++ b/config.py @@ -0,0 +1,40 @@ +import os +from dotenv import load_dotenv + +load_dotenv() + +class Config: + SECRET_KEY = os.getenv('SECRET_KEY', 'dev-secret-key-change-in-production') + + # 数据库配置 + SQLALCHEMY_DATABASE_URI = f"mysql+pymysql://{os.getenv('DB_USER', 'root')}:{os.getenv('DB_PASSWORD', '')}@{os.getenv('DB_HOST', 'localhost')}:{os.getenv('DB_PORT', '3306')}/{os.getenv('DB_NAME', 'video_parser')}?charset=utf8mb4" + SQLALCHEMY_TRACK_MODIFICATIONS = False + SQLALCHEMY_ECHO = False + + # 数据库连接池配置(解决连接超时问题) + SQLALCHEMY_ENGINE_OPTIONS = { + 'pool_size': 10, # 连接池大小 + 'pool_recycle': 3600, # 连接回收时间(1小时),防止 MySQL 8小时超时 + 'pool_pre_ping': True, # 每次从池中取连接前先 ping 一下,确保连接有效 + 'pool_timeout': 30, # 获取连接的超时时间 + 'max_overflow': 20, # 超过 pool_size 后最多创建的连接数 + 'connect_args': { + 'connect_timeout': 10 # MySQL 连接超时时间 + } + } + + # Redis配置 + REDIS_HOST = os.getenv('REDIS_HOST', 'localhost') + REDIS_PORT = int(os.getenv('REDIS_PORT', 6379)) + REDIS_DB = int(os.getenv('REDIS_DB', 0)) + REDIS_PASSWORD = os.getenv('REDIS_PASSWORD', None) + + # 会话配置 + PERMANENT_SESSION_LIFETIME = int(os.getenv('SESSION_LIFETIME', 7200)) + + # 并发配置 + MAX_CONCURRENT = int(os.getenv('MAX_CONCURRENT', 3)) + + # 验证码配置 + VERIFICATION_CODE_EXPIRE = 600 # 10分钟 + VERIFICATION_CODE_LENGTH = 6 diff --git a/database/schema.sql b/database/schema.sql new file mode 100644 index 0000000..c897522 --- /dev/null +++ b/database/schema.sql @@ -0,0 +1,191 @@ +-- 短视频解析平台数据库表结构设计 +-- 数据库: video_parser +-- 字符集: utf8mb4 + +-- 1. 用户表 +CREATE TABLE IF NOT EXISTS `users` ( + `id` INT AUTO_INCREMENT PRIMARY KEY, + `username` VARCHAR(50) NOT NULL UNIQUE COMMENT '用户名', + `email` VARCHAR(100) NOT NULL UNIQUE COMMENT '邮箱', + `password` VARCHAR(255) NOT NULL COMMENT '加密后的密码', + `group_id` INT NOT NULL DEFAULT 2 COMMENT '用户分组ID', + `register_ip` VARCHAR(45) COMMENT '注册IP', + `last_login_ip` VARCHAR(45) COMMENT '最近登录IP', + `total_parse_count` INT DEFAULT 0 COMMENT '总解析次数', + `is_active` TINYINT(1) DEFAULT 1 COMMENT '账号是否激活', + `created_at` DATETIME DEFAULT CURRENT_TIMESTAMP, + `updated_at` DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + INDEX `idx_email` (`email`), + INDEX `idx_group_id` (`group_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='用户表'; + +-- 2. 用户分组表 +CREATE TABLE IF NOT EXISTS `user_groups` ( + `id` INT AUTO_INCREMENT PRIMARY KEY, + `name` VARCHAR(50) NOT NULL UNIQUE COMMENT '分组名称', + `daily_limit` INT NOT NULL DEFAULT 10 COMMENT '每日解析次数限制', + `description` VARCHAR(255) COMMENT '分组描述', + `created_at` DATETIME DEFAULT CURRENT_TIMESTAMP, + `updated_at` DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='用户分组表'; + +-- 插入默认分组 +INSERT INTO `user_groups` (`id`, `name`, `daily_limit`, `description`) VALUES +(1, '游客', 5, '未登录用户'), +(2, '普通用户', 10, '已注册用户'), +(3, 'VIP用户', 50, 'VIP会员'), +(4, 'SVIP用户', 200, 'SVIP会员'); + +-- 3. 管理员表 +CREATE TABLE IF NOT EXISTS `admins` ( + `id` INT AUTO_INCREMENT PRIMARY KEY, + `username` VARCHAR(50) NOT NULL UNIQUE COMMENT '管理员用户名', + `password` VARCHAR(255) NOT NULL COMMENT '加密后的密码', + `email` VARCHAR(100) COMMENT '管理员邮箱', + `totp_secret` VARCHAR(100) COMMENT '2FA密钥', + `is_2fa_enabled` TINYINT(1) DEFAULT 0 COMMENT '是否启用2FA', + `last_login_ip` VARCHAR(45) COMMENT '最近登录IP', + `last_login_at` DATETIME COMMENT '最近登录时间', + `created_at` DATETIME DEFAULT CURRENT_TIMESTAMP, + `updated_at` DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='管理员表'; + +-- 插入默认管理员账号 (密码需要在应用中加密后插入) +-- 用户名: shihao, 密码: 80012029Lz + +-- 4. 解析接口配置表 +CREATE TABLE IF NOT EXISTS `parser_apis` ( + `id` INT AUTO_INCREMENT PRIMARY KEY, + `name` VARCHAR(100) NOT NULL COMMENT '接口名称', + `platform` VARCHAR(50) NOT NULL COMMENT '平台类型(douyin/tiktok/bilibili)', + `api_url` VARCHAR(500) NOT NULL COMMENT 'API地址', + `api_key` VARCHAR(255) COMMENT 'API密钥', + `weight` INT DEFAULT 1 COMMENT '权重(用于负载均衡)', + `is_enabled` TINYINT(1) DEFAULT 1 COMMENT '是否启用', + `health_status` TINYINT(1) DEFAULT 1 COMMENT '健康状态(1正常/0异常)', + `last_check_at` DATETIME COMMENT '最后检查时间', + `fail_count` INT DEFAULT 0 COMMENT '连续失败次数', + `total_calls` INT DEFAULT 0 COMMENT '总调用次数', + `success_calls` INT DEFAULT 0 COMMENT '成功调用次数', + `avg_response_time` INT DEFAULT 0 COMMENT '平均响应时间(ms)', + `created_at` DATETIME DEFAULT CURRENT_TIMESTAMP, + `updated_at` DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + INDEX `idx_platform` (`platform`), + INDEX `idx_enabled` (`is_enabled`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='解析接口配置表'; + +-- 5. 站点配置表 +CREATE TABLE IF NOT EXISTS `site_config` ( + `id` INT AUTO_INCREMENT PRIMARY KEY, + `config_key` VARCHAR(100) NOT NULL UNIQUE COMMENT '配置键', + `config_value` TEXT COMMENT '配置值', + `config_type` VARCHAR(50) DEFAULT 'string' COMMENT '配置类型(string/int/json)', + `description` VARCHAR(255) COMMENT '配置描述', + `updated_at` DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='站点配置表'; + +-- 插入默认配置 +INSERT INTO `site_config` (`config_key`, `config_value`, `config_type`, `description`) VALUES +('site_title', '短视频解析平台', 'string', '网站标题'), +('site_logo', '', 'string', '网站Logo URL'), +('site_notice', '欢迎使用短视频解析平台', 'string', '网站公告'), +('site_footer', '', 'string', '网站底部信息'), +('max_concurrent', '3', 'int', '最大并发解析数'), +('guest_daily_limit', '5', 'int', '游客每日解析次数'), +('user_daily_limit', '10', 'int', '普通用户每日解析次数'); + +-- 6. SMTP配置表 +CREATE TABLE IF NOT EXISTS `smtp_config` ( + `id` INT AUTO_INCREMENT PRIMARY KEY, + `name` VARCHAR(100) NOT NULL COMMENT 'SMTP配置名称', + `host` VARCHAR(255) NOT NULL COMMENT 'SMTP服务器地址', + `port` INT NOT NULL COMMENT 'SMTP端口', + `username` VARCHAR(255) NOT NULL COMMENT 'SMTP用户名', + `password` VARCHAR(255) NOT NULL COMMENT 'SMTP密码', + `from_email` VARCHAR(255) NOT NULL COMMENT '发件人邮箱', + `from_name` VARCHAR(100) COMMENT '发件人名称', + `use_tls` TINYINT(1) DEFAULT 1 COMMENT '是否使用TLS', + `is_enabled` TINYINT(1) DEFAULT 1 COMMENT '是否启用', + `is_default` TINYINT(1) DEFAULT 0 COMMENT '是否为默认配置', + `weight` INT DEFAULT 1 COMMENT '权重(用于负载均衡)', + `send_count` INT DEFAULT 0 COMMENT '发送次数', + `fail_count` INT DEFAULT 0 COMMENT '失败次数', + `created_at` DATETIME DEFAULT CURRENT_TIMESTAMP, + `updated_at` DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='SMTP配置表'; + +-- 插入默认SMTP配置 +INSERT INTO `smtp_config` (`name`, `host`, `port`, `username`, `password`, `from_email`, `from_name`, `is_default`) VALUES +('默认SMTP', 'smtp.resend.com', 587, 'resend', 're_TSgVYFuT_HG9g3Mu6P4i6LEdYgqVpW6S5', 'noreply@example.com', '短视频解析平台', 1); + +-- 7. 邮箱验证码表 +CREATE TABLE IF NOT EXISTS `email_verification` ( + `id` INT AUTO_INCREMENT PRIMARY KEY, + `email` VARCHAR(100) NOT NULL COMMENT '邮箱地址', + `code` VARCHAR(10) NOT NULL COMMENT '验证码', + `purpose` VARCHAR(50) NOT NULL COMMENT '用途(register/reset_password/forgot_password)', + `is_used` TINYINT(1) DEFAULT 0 COMMENT '是否已使用', + `expires_at` DATETIME NOT NULL COMMENT '过期时间', + `created_at` DATETIME DEFAULT CURRENT_TIMESTAMP, + INDEX `idx_email_code` (`email`, `code`), + INDEX `idx_expires` (`expires_at`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='邮箱验证码表'; + +-- 8. 解析记录表 +CREATE TABLE IF NOT EXISTS `parse_logs` ( + `id` BIGINT AUTO_INCREMENT PRIMARY KEY, + `user_id` INT COMMENT '用户ID(NULL表示游客)', + `ip_address` VARCHAR(45) NOT NULL COMMENT '请求IP', + `platform` VARCHAR(50) NOT NULL COMMENT '平台类型', + `video_url` VARCHAR(1000) NOT NULL COMMENT '视频链接', + `parser_api_id` INT COMMENT '使用的解析接口ID', + `status` VARCHAR(20) NOT NULL COMMENT '状态(success/failed/queued)', + `error_message` TEXT COMMENT '错误信息', + `response_time` INT COMMENT '响应时间(ms)', + `created_at` DATETIME DEFAULT CURRENT_TIMESTAMP, + INDEX `idx_user_id` (`user_id`), + INDEX `idx_ip` (`ip_address`), + INDEX `idx_platform` (`platform`), + INDEX `idx_status` (`status`), + INDEX `idx_created` (`created_at`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='解析记录表'; + +-- 9. 每日解析统计表 +CREATE TABLE IF NOT EXISTS `daily_parse_stats` ( + `id` INT AUTO_INCREMENT PRIMARY KEY, + `user_id` INT COMMENT '用户ID(NULL表示游客)', + `ip_address` VARCHAR(45) COMMENT 'IP地址(游客使用)', + `date` DATE NOT NULL COMMENT '日期', + `parse_count` INT DEFAULT 0 COMMENT '解析次数', + `success_count` INT DEFAULT 0 COMMENT '成功次数', + `fail_count` INT DEFAULT 0 COMMENT '失败次数', + `created_at` DATETIME DEFAULT CURRENT_TIMESTAMP, + `updated_at` DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + UNIQUE KEY `uk_user_date` (`user_id`, `date`), + UNIQUE KEY `uk_ip_date` (`ip_address`, `date`), + INDEX `idx_date` (`date`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='每日解析统计表'; + +-- 10. 健康检查配置表 +CREATE TABLE IF NOT EXISTS `health_check_config` ( + `id` INT AUTO_INCREMENT PRIMARY KEY, + `platform` VARCHAR(50) NOT NULL COMMENT '平台类型', + `test_url` VARCHAR(1000) NOT NULL COMMENT '测试视频链接', + `check_interval` INT DEFAULT 300 COMMENT '检查间隔(秒)', + `is_enabled` TINYINT(1) DEFAULT 1 COMMENT '是否启用', + `alert_email` VARCHAR(255) COMMENT '告警邮箱', + `created_at` DATETIME DEFAULT CURRENT_TIMESTAMP, + `updated_at` DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='健康检查配置表'; + +-- 11. 健康检查日志表 +CREATE TABLE IF NOT EXISTS `health_check_logs` ( + `id` BIGINT AUTO_INCREMENT PRIMARY KEY, + `parser_api_id` INT NOT NULL COMMENT '解析接口ID', + `status` VARCHAR(20) NOT NULL COMMENT '检查结果(success/failed)', + `response_time` INT COMMENT '响应时间(ms)', + `error_message` TEXT COMMENT '错误信息', + `checked_at` DATETIME DEFAULT CURRENT_TIMESTAMP, + INDEX `idx_api_id` (`parser_api_id`), + INDEX `idx_checked` (`checked_at`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='健康检查日志表'; diff --git a/init_admin.py b/init_admin.py new file mode 100644 index 0000000..ed8c52a --- /dev/null +++ b/init_admin.py @@ -0,0 +1,45 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +初始化管理员账号脚本 +""" + +import os +os.environ['SKIP_SCHEDULER'] = 'true' + +from app import create_app +from models import db, Admin +from utils.security import hash_password + +def init_admin(): + """初始化默认管理员账号""" + app = create_app() + + with app.app_context(): + # 检查是否已存在管理员 + existing_admin = Admin.query.filter_by(username='shihao').first() + + if existing_admin: + print("管理员账号已存在!") + return + + # 创建默认管理员 + admin = Admin( + username='shihao', + password=hash_password('80012029Lz'), + email='admin@example.com' + ) + + db.session.add(admin) + db.session.commit() + + print("=" * 50) + print("管理员账号创建成功!") + print("=" * 50) + print(f"用户名: shihao") + print(f"密码: 80012029Lz") + print("=" * 50) + print("请及时登录后台修改密码!") + +if __name__ == '__main__': + init_admin() diff --git a/init_data.py b/init_data.py new file mode 100644 index 0000000..e464080 --- /dev/null +++ b/init_data.py @@ -0,0 +1,101 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +初始化数据脚本 - 插入解析接口配置 +""" + +import os +os.environ['SKIP_SCHEDULER'] = 'true' + +from app import create_app +from models import db, ParserAPI, HealthCheckConfig + +def init_parser_apis(): + """初始化解析接口配置""" + app = create_app() + + with app.app_context(): + # 抖音解析接口 + douyin_api = ParserAPI( + name='抖音自部署API', + platform='douyin', + api_url='https://dy.zoomcaronline.net', + weight=1, + is_enabled=True + ) + + # TikTok解析接口 + tiktok_api = ParserAPI( + name='TikTok自部署API', + platform='tiktok', + api_url='https://dy.zoomcaronline.net', + weight=1, + is_enabled=True + ) + + # 哔哩哔哩解析接口 - 米人API + bilibili_mir = ParserAPI( + name='哔哩哔哩-米人API', + platform='bilibili', + api_url='https://api.mir6.com', + weight=1, + is_enabled=True + ) + + # 哔哩哔哩解析接口 - BugPK API + bilibili_bugpk = ParserAPI( + name='哔哩哔哩-BugPK API', + platform='bilibili', + api_url='https://api.bugpk.com', + weight=1, + is_enabled=True + ) + + # 哔哩哔哩解析接口 - 妖狐API + bilibili_yaohu = ParserAPI( + name='哔哩哔哩-妖狐API', + platform='bilibili', + api_url='https://api.yaohud.cn', + api_key='SM227DLC0ZgJ6DXJhAx', + weight=1, + is_enabled=True + ) + + # 检查是否已存在 + if not ParserAPI.query.filter_by(name='抖音自部署API').first(): + db.session.add(douyin_api) + print("✓ 已添加抖音解析接口") + + if not ParserAPI.query.filter_by(name='TikTok自部署API').first(): + db.session.add(tiktok_api) + print("✓ 已添加TikTok解析接口") + + if not ParserAPI.query.filter_by(name='哔哩哔哩-米人API').first(): + db.session.add(bilibili_mir) + print("✓ 已添加哔哩哔哩-米人API") + + if not ParserAPI.query.filter_by(name='哔哩哔哩-BugPK API').first(): + db.session.add(bilibili_bugpk) + print("✓ 已添加哔哩哔哩-BugPK API") + + if not ParserAPI.query.filter_by(name='哔哩哔哩-妖狐API').first(): + db.session.add(bilibili_yaohu) + print("✓ 已添加哔哩哔哩-妖狐API") + + # 添加健康检查配置 + if not HealthCheckConfig.query.filter_by(platform='bilibili').first(): + health_config = HealthCheckConfig( + platform='bilibili', + test_url='https://www.bilibili.com/video/BV1vrU6B4ELQ/', + check_interval=300, # 5分钟 + is_enabled=True, + alert_email='admin@example.com' + ) + db.session.add(health_config) + print("✓ 已添加哔哩哔哩健康检查配置") + + db.session.commit() + print("\n数据初始化完成!") + +if __name__ == '__main__': + init_parser_apis() diff --git a/models/__init__.py b/models/__init__.py new file mode 100644 index 0000000..54146e6 --- /dev/null +++ b/models/__init__.py @@ -0,0 +1,161 @@ +from datetime import datetime +from flask_sqlalchemy import SQLAlchemy + +db = SQLAlchemy() + +class User(db.Model): + __tablename__ = 'users' + + id = db.Column(db.Integer, primary_key=True) + username = db.Column(db.String(50), unique=True, nullable=False) + email = db.Column(db.String(100), unique=True, nullable=False) + password = db.Column(db.String(255), nullable=False) + group_id = db.Column(db.Integer, db.ForeignKey('user_groups.id'), default=2) + register_ip = db.Column(db.String(45)) + last_login_ip = db.Column(db.String(45)) + total_parse_count = db.Column(db.Integer, default=0) + is_active = db.Column(db.Boolean, default=True) + created_at = db.Column(db.DateTime, default=datetime.utcnow) + updated_at = db.Column(db.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow) + + group = db.relationship('UserGroup', backref='users') + + @property + def is_authenticated(self): + return True + + def get_id(self): + return str(self.id) + +class UserGroup(db.Model): + __tablename__ = 'user_groups' + + id = db.Column(db.Integer, primary_key=True) + name = db.Column(db.String(50), unique=True, nullable=False) + daily_limit = db.Column(db.Integer, default=10) + description = db.Column(db.String(255)) + created_at = db.Column(db.DateTime, default=datetime.utcnow) + updated_at = db.Column(db.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow) + +class Admin(db.Model): + __tablename__ = 'admins' + + id = db.Column(db.Integer, primary_key=True) + username = db.Column(db.String(50), unique=True, nullable=False) + password = db.Column(db.String(255), nullable=False) + email = db.Column(db.String(100)) + totp_secret = db.Column(db.String(100)) + is_2fa_enabled = db.Column(db.Boolean, default=False) + last_login_ip = db.Column(db.String(45)) + last_login_at = db.Column(db.DateTime) + created_at = db.Column(db.DateTime, default=datetime.utcnow) + updated_at = db.Column(db.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow) + +class ParserAPI(db.Model): + __tablename__ = 'parser_apis' + + id = db.Column(db.Integer, primary_key=True) + name = db.Column(db.String(100), nullable=False) + platform = db.Column(db.String(50), nullable=False) + api_url = db.Column(db.String(500), nullable=False) + api_key = db.Column(db.String(255)) + weight = db.Column(db.Integer, default=1) + is_enabled = db.Column(db.Boolean, default=True) + health_status = db.Column(db.Boolean, default=True) + last_check_at = db.Column(db.DateTime) + fail_count = db.Column(db.Integer, default=0) + total_calls = db.Column(db.Integer, default=0) + success_calls = db.Column(db.Integer, default=0) + avg_response_time = db.Column(db.Integer, default=0) + created_at = db.Column(db.DateTime, default=datetime.utcnow) + updated_at = db.Column(db.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow) + +class SiteConfig(db.Model): + __tablename__ = 'site_config' + + id = db.Column(db.Integer, primary_key=True) + config_key = db.Column(db.String(100), unique=True, nullable=False) + config_value = db.Column(db.Text) + config_type = db.Column(db.String(50), default='string') + description = db.Column(db.String(255)) + updated_at = db.Column(db.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow) + +class SMTPConfig(db.Model): + __tablename__ = 'smtp_config' + + id = db.Column(db.Integer, primary_key=True) + name = db.Column(db.String(100), nullable=False) + host = db.Column(db.String(255), nullable=False) + port = db.Column(db.Integer, nullable=False) + username = db.Column(db.String(255), nullable=False) + password = db.Column(db.String(255), nullable=False) + from_email = db.Column(db.String(255), nullable=False) + from_name = db.Column(db.String(100)) + use_tls = db.Column(db.Boolean, default=True) + is_enabled = db.Column(db.Boolean, default=True) + is_default = db.Column(db.Boolean, default=False) + weight = db.Column(db.Integer, default=1) + send_count = db.Column(db.Integer, default=0) + fail_count = db.Column(db.Integer, default=0) + created_at = db.Column(db.DateTime, default=datetime.utcnow) + updated_at = db.Column(db.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow) + +class EmailVerification(db.Model): + __tablename__ = 'email_verification' + + id = db.Column(db.Integer, primary_key=True) + email = db.Column(db.String(100), nullable=False) + code = db.Column(db.String(10), nullable=False) + purpose = db.Column(db.String(50), nullable=False) + is_used = db.Column(db.Boolean, default=False) + expires_at = db.Column(db.DateTime, nullable=False) + created_at = db.Column(db.DateTime, default=datetime.utcnow) + +class ParseLog(db.Model): + __tablename__ = 'parse_logs' + + id = db.Column(db.BigInteger, primary_key=True) + user_id = db.Column(db.Integer, db.ForeignKey('users.id')) + ip_address = db.Column(db.String(45), nullable=False) + platform = db.Column(db.String(50), nullable=False) + video_url = db.Column(db.String(1000), nullable=False) + parser_api_id = db.Column(db.Integer, db.ForeignKey('parser_apis.id')) + status = db.Column(db.String(20), nullable=False) + error_message = db.Column(db.Text) + response_time = db.Column(db.Integer) + created_at = db.Column(db.DateTime, default=datetime.utcnow) + +class DailyParseStat(db.Model): + __tablename__ = 'daily_parse_stats' + + id = db.Column(db.Integer, primary_key=True) + user_id = db.Column(db.Integer, db.ForeignKey('users.id')) + ip_address = db.Column(db.String(45)) + date = db.Column(db.Date, nullable=False) + parse_count = db.Column(db.Integer, default=0) + success_count = db.Column(db.Integer, default=0) + fail_count = db.Column(db.Integer, default=0) + created_at = db.Column(db.DateTime, default=datetime.utcnow) + updated_at = db.Column(db.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow) + +class HealthCheckConfig(db.Model): + __tablename__ = 'health_check_config' + + id = db.Column(db.Integer, primary_key=True) + platform = db.Column(db.String(50), nullable=False) + test_url = db.Column(db.String(1000), nullable=False) + check_interval = db.Column(db.Integer, default=300) + is_enabled = db.Column(db.Boolean, default=True) + alert_email = db.Column(db.String(255)) + created_at = db.Column(db.DateTime, default=datetime.utcnow) + updated_at = db.Column(db.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow) + +class HealthCheckLog(db.Model): + __tablename__ = 'health_check_logs' + + id = db.Column(db.BigInteger, primary_key=True) + parser_api_id = db.Column(db.Integer, db.ForeignKey('parser_apis.id'), nullable=False) + status = db.Column(db.String(20), nullable=False) + response_time = db.Column(db.Integer) + error_message = db.Column(db.Text) + checked_at = db.Column(db.DateTime, default=datetime.utcnow) diff --git a/parsers/__init__.py b/parsers/__init__.py new file mode 100644 index 0000000..a07ec63 --- /dev/null +++ b/parsers/__init__.py @@ -0,0 +1 @@ +# Parsers package diff --git a/parsers/base.py b/parsers/base.py new file mode 100644 index 0000000..cabd9da --- /dev/null +++ b/parsers/base.py @@ -0,0 +1,54 @@ +from abc import ABC, abstractmethod +import requests +from typing import Dict, Optional + +class BaseParser(ABC): + """解析器基类""" + + def __init__(self, api_url: str, api_key: Optional[str] = None, timeout: int = 30): + self.api_url = api_url + self.api_key = api_key + self.timeout = timeout + + @abstractmethod + def parse(self, video_url: str) -> Dict: + """ + 解析视频链接 + 返回统一格式: + { + "cover": "封面URL", + "video_url": "视频URL", + "title": "标题", + "description": "简介" + } + """ + pass + + def _make_request(self, url: str, params: Dict = None, headers: Dict = None, verify: bool = True) -> requests.Response: + """发送HTTP请求""" + try: + # 设置默认请求头 + default_headers = { + 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36', + 'Accept': 'application/json, text/plain, */*', + 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8', + } + + # 合并自定义请求头 + if headers: + default_headers.update(headers) + + response = requests.get(url, params=params, headers=default_headers, timeout=self.timeout, verify=verify) + response.raise_for_status() + return response + except requests.RequestException as e: + raise Exception(f"请求失败: {str(e)}") + + def _normalize_response(self, cover: str, video_url: str, title: str, description: str) -> Dict: + """标准化返回数据""" + return { + "cover": cover or "", + "video_url": video_url or "", + "title": title or "", + "description": description or "" + } diff --git a/parsers/bilibili.py b/parsers/bilibili.py new file mode 100644 index 0000000..44f516b --- /dev/null +++ b/parsers/bilibili.py @@ -0,0 +1,120 @@ +from parsers.base import BaseParser +from typing import Dict +from urllib.parse import urlencode + +class BilibiliMirParser(BaseParser): + """哔哩哔哩解析器 - 米人API""" + + def parse(self, video_url: str) -> Dict: + """解析哔哩哔哩视频""" + try: + # 手动构建URL,避免双重编码 + url = f"{self.api_url}/api/bzjiexi?{urlencode({'url': video_url})}" + + response = self._make_request(url) + data = response.json() + + return self._extract_data(data) + except Exception as e: + raise Exception(f"哔哩哔哩解析失败(米人API): {str(e)}") + + def _extract_data(self, data: Dict) -> Dict: + """提取并标准化数据""" + try: + if data.get("code") == 200 or data.get("status") == "success": + video_data = data.get("data", {}) + + # 如果data是列表,取第一个元素 + if isinstance(video_data, list): + video_data = video_data[0] if video_data else {} + + cover = video_data.get("cover", "") or video_data.get("pic", "") + video_url = video_data.get("url", "") or video_data.get("video_url", "") + title = video_data.get("title", "") + description = video_data.get("desc", "") or video_data.get("description", "") + + return self._normalize_response(cover, video_url, title, description) + else: + raise Exception(f"解析失败: {data.get('msg', '未知错误')}") + except Exception as e: + raise Exception(f"数据提取失败: {str(e)}") + + +class BilibiliBugPKParser(BaseParser): + """哔哩哔哩解析器 - BugPK API""" + + def parse(self, video_url: str) -> Dict: + """解析哔哩哔哩视频""" + try: + # 手动构建URL,避免双重编码 + url = f"{self.api_url}/api/bilibili?{urlencode({'url': video_url})}" + + response = self._make_request(url) + data = response.json() + + return self._extract_data(data) + except Exception as e: + raise Exception(f"哔哩哔哩解析失败(BugPK API): {str(e)}") + + def _extract_data(self, data: Dict) -> Dict: + """提取并标准化数据""" + try: + if data.get("code") == 200 or data.get("status") == "success": + video_data = data.get("data", {}) + + # 如果data是列表,取第一个元素 + if isinstance(video_data, list): + video_data = video_data[0] if video_data else {} + + cover = video_data.get("cover", "") or video_data.get("pic", "") + video_url = video_data.get("url", "") or video_data.get("video_url", "") + title = video_data.get("title", "") + description = video_data.get("desc", "") or video_data.get("description", "") + + return self._normalize_response(cover, video_url, title, description) + else: + raise Exception(f"解析失败: {data.get('msg', '未知错误')}") + except Exception as e: + raise Exception(f"数据提取失败: {str(e)}") + + +class BilibiliYaohuParser(BaseParser): + """哔哩哔哩解析器 - 妖狐API""" + + def parse(self, video_url: str) -> Dict: + """解析哔哩哔哩视频""" + try: + # 手动构建URL,避免双重编码 + url = f"{self.api_url}/api/v6/video/bili?{urlencode({'key': self.api_key, 'url': video_url})}" + + response = self._make_request(url, verify=False) + data = response.json() + + return self._extract_data(data) + except Exception as e: + raise Exception(f"哔哩哔哩解析失败(妖狐API): {str(e)}") + + def _extract_data(self, data: Dict) -> Dict: + """提取并标准化数据""" + try: + if data.get("parse_type") == "video": + video_data = data.get("data", {}) + basic = video_data.get("basic", {}) + + # 提取基本信息 + cover = basic.get("cover", "") + title = basic.get("title", "") + description = basic.get("description", "") + + # 提取视频URL - 优先使用data.video_url,其次使用videos[0].url + video_url = video_data.get("video_url", "") + if not video_url: + videos = video_data.get("videos", []) + if isinstance(videos, list) and videos: + video_url = videos[0].get("url", "") + + return self._normalize_response(cover, video_url, title, description) + else: + raise Exception(f"解析失败: 不支持的类型 {data.get('parse_type')}") + except Exception as e: + raise Exception(f"数据提取失败: {str(e)}") diff --git a/parsers/douyin.py b/parsers/douyin.py new file mode 100644 index 0000000..2690314 --- /dev/null +++ b/parsers/douyin.py @@ -0,0 +1,89 @@ +from parsers.base import BaseParser +from typing import Dict +from urllib.parse import urlencode + +class DouyinParser(BaseParser): + """抖音解析器""" + + def parse(self, video_url: str) -> Dict: + """解析抖音视频""" + try: + # 步骤1: 提取视频ID + aweme_id = self._get_aweme_id(video_url) + + # 步骤2: 获取视频详细信息 + video_info = self._fetch_video_info(aweme_id) + + # 步骤3: 提取并标准化数据 + return self._extract_data(video_info) + except Exception as e: + raise Exception(f"抖音解析失败: {str(e)}") + + def _get_aweme_id(self, video_url: str) -> str: + """提取视频ID""" + # 手动构建URL,避免双重编码 + url = f"{self.api_url}/api/douyin/web/get_aweme_id?{urlencode({'url': video_url})}" + + response = self._make_request(url) + data = response.json() + + if data.get("code") != 200: + raise Exception(f"获取视频ID失败: {data.get('msg', '未知错误')}") + + return data.get("data") + + def _fetch_video_info(self, aweme_id: str) -> Dict: + """获取视频详细信息""" + # 手动构建URL,避免双重编码 + url = f"{self.api_url}/api/douyin/web/fetch_one_video?{urlencode({'aweme_id': aweme_id})}" + + response = self._make_request(url) + data = response.json() + + if data.get("code") != 200: + raise Exception("获取视频信息失败") + + return data.get("data", {}).get("aweme_detail", {}) + + def _extract_data(self, video_info: Dict) -> Dict: + """提取并标准化数据""" + try: + # 提取封面 + cover = video_info.get("video", {}).get("cover_original_scale", {}).get("url_list", [""])[0] + + # 提取视频URL + video_url = video_info.get("video", {}).get("play_addr", {}).get("url_list", [""])[0] + + # 提取标题(描述) + title = video_info.get("desc", "") + + # 提取作者信息作为简介 + author = video_info.get("author", {}) + author_name = author.get("nickname", "") + author_signature = author.get("signature", "") + description = f"作者: {author_name}" + if author_signature: + description += f" | {author_signature}" + + return self._normalize_response(cover, video_url, title, description) + except Exception as e: + raise Exception(f"数据提取失败: {str(e)}") + + +class DouyinDownloadParser(BaseParser): + """抖音下载解析器(直接下载)""" + + def parse(self, video_url: str) -> Dict: + """解析抖音视频(下载方式)""" + try: + download_url = f"{self.api_url}/api/download" + + # 手动构建URL,避免双重编码 + return self._normalize_response( + cover="", + video_url=f"{download_url}?{urlencode({'url': video_url})}", + title="抖音视频", + description="通过下载接口获取" + ) + except Exception as e: + raise Exception(f"抖音下载解析失败: {str(e)}") diff --git a/parsers/factory.py b/parsers/factory.py new file mode 100644 index 0000000..eb6c19a --- /dev/null +++ b/parsers/factory.py @@ -0,0 +1,87 @@ +from parsers.douyin import DouyinParser +from parsers.tiktok import TikTokParser +from parsers.bilibili import BilibiliMirParser, BilibiliBugPKParser, BilibiliYaohuParser +from models import ParserAPI +import random + +class ParserFactory: + """解析器工厂类""" + + @staticmethod + def create_parser(api_config: ParserAPI): + """根据API配置创建解析器实例""" + platform = api_config.platform.lower() + api_url = api_config.api_url + api_key = api_config.api_key + + if platform == 'douyin': + return DouyinParser(api_url, api_key) + elif platform == 'tiktok': + return TikTokParser(api_url, api_key) + elif platform == 'bilibili': + # 根据API名称选择不同的解析器 + if 'mir6' in api_url: + return BilibiliMirParser(api_url, api_key) + elif 'bugpk' in api_url: + return BilibiliBugPKParser(api_url, api_key) + elif 'yaohud' in api_url: + return BilibiliYaohuParser(api_url, api_key) + else: + return BilibiliMirParser(api_url, api_key) + else: + raise ValueError(f"不支持的平台: {platform}") + + @staticmethod + def get_parser_for_platform(platform: str): + """获取指定平台的解析器(带负载均衡)""" + from models import db + + # 查询该平台所有启用且健康的API + apis = ParserAPI.query.filter_by( + platform=platform.lower(), + is_enabled=True, + health_status=True + ).all() + + if not apis: + raise Exception(f"没有可用的{platform}解析接口") + + # 如果是哔哩哔哩,使用加权随机选择(负载均衡) + if platform.lower() == 'bilibili' and len(apis) > 1: + api = ParserFactory._weighted_random_choice(apis) + else: + # 其他平台选择第一个可用的 + api = apis[0] + + return ParserFactory.create_parser(api), api + + @staticmethod + def _weighted_random_choice(apis): + """加权随机选择""" + total_weight = sum(api.weight for api in apis) + if total_weight == 0: + return random.choice(apis) + + rand = random.uniform(0, total_weight) + current = 0 + + for api in apis: + current += api.weight + if rand <= current: + return api + + return apis[-1] + + @staticmethod + def detect_platform(video_url: str) -> str: + """检测视频链接所属平台""" + url_lower = video_url.lower() + + if 'douyin.com' in url_lower or 'v.douyin' in url_lower: + return 'douyin' + elif 'tiktok.com' in url_lower: + return 'tiktok' + elif 'bilibili.com' in url_lower or 'b23.tv' in url_lower: + return 'bilibili' + else: + raise ValueError("无法识别的视频平台") diff --git a/parsers/tiktok.py b/parsers/tiktok.py new file mode 100644 index 0000000..175574e --- /dev/null +++ b/parsers/tiktok.py @@ -0,0 +1,70 @@ +from parsers.base import BaseParser +from typing import Dict +from urllib.parse import urlencode + +class TikTokParser(BaseParser): + """TikTok解析器""" + + def parse(self, video_url: str) -> Dict: + """解析TikTok视频""" + try: + # 步骤1: 提取视频ID + aweme_id = self._get_aweme_id(video_url) + + # 步骤2: 获取视频详细信息 + video_info = self._fetch_video_info(aweme_id) + + # 步骤3: 提取并标准化数据 + return self._extract_data(video_info) + except Exception as e: + raise Exception(f"TikTok解析失败: {str(e)}") + + def _get_aweme_id(self, video_url: str) -> str: + """提取视频ID""" + # 手动构建URL,避免双重编码 + url = f"{self.api_url}/api/tiktok/web/get_aweme_id?{urlencode({'url': video_url})}" + + response = self._make_request(url) + data = response.json() + + if data.get("code") != 200: + raise Exception(f"获取视频ID失败: {data.get('msg', '未知错误')}") + + return data.get("data") + + def _fetch_video_info(self, aweme_id: str) -> Dict: + """获取视频详细信息""" + # 手动构建URL,避免双重编码 + url = f"{self.api_url}/api/tiktok/app/fetch_one_video?{urlencode({'aweme_id': aweme_id})}" + + response = self._make_request(url) + data = response.json() + + if data.get("code") != 200: + raise Exception("获取视频信息失败") + + # TikTok API 返回的数据直接在 data 字段下,没有 aweme_detail 层级 + return data.get("data", {}) + + def _extract_data(self, video_info: Dict) -> Dict: + """提取并标准化数据""" + try: + # TikTok数据结构与抖音类似 + cover = video_info.get("video", {}).get("cover_original_scale", {}).get("url_list", [""])[0] + if not cover: + cover = video_info.get("video", {}).get("cover", {}).get("url_list", [""])[0] + + video_url = video_info.get("video", {}).get("play_addr", {}).get("url_list", [""])[0] + + title = video_info.get("desc", "") + + author = video_info.get("author", {}) + author_name = author.get("nickname", "") + author_signature = author.get("signature", "") + description = f"Author: {author_name}" + if author_signature: + description += f" | {author_signature}" + + return self._normalize_response(cover, video_url, title, description) + except Exception as e: + raise Exception(f"数据提取失败: {str(e)}") diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..ed643d8 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,15 @@ +Flask==3.0.0 +Flask-SQLAlchemy==3.1.1 +Flask-Login==0.6.3 +Flask-WTF==1.2.1 +PyMySQL==1.1.0 +cryptography==41.0.7 +bcrypt==4.1.2 +pyotp==2.9.0 +qrcode==7.4.2 +requests==2.31.0 +python-dotenv==1.0.0 +APScheduler==3.10.4 +redis==5.0.1 +email-validator==2.1.0 +Pillow==10.2.0 diff --git a/routes/__init__.py b/routes/__init__.py new file mode 100644 index 0000000..d212dab --- /dev/null +++ b/routes/__init__.py @@ -0,0 +1 @@ +# Routes package diff --git a/routes/admin.py b/routes/admin.py new file mode 100644 index 0000000..fd9a279 --- /dev/null +++ b/routes/admin.py @@ -0,0 +1,768 @@ +from flask import Blueprint, request, jsonify, session +from models import Admin, User, UserGroup, ParserAPI, SiteConfig, SMTPConfig, ParseLog, DailyParseStat, HealthCheckConfig +from models import db +from utils.security import hash_password, verify_password, get_client_ip +from utils.admin_auth import admin_required, verify_2fa, generate_2fa_secret, get_2fa_qrcode_url +from utils.email import EmailService +from datetime import datetime, timedelta, date +from sqlalchemy import func +import qrcode +import io +import base64 + +admin_bp = Blueprint('admin', __name__) + +@admin_bp.route('/login', methods=['GET', 'POST']) +def login(): + """管理员登录""" + if request.method == 'GET': + from flask import render_template + return render_template('admin_login.html') + + data = request.get_json() + username = data.get('username') + password = data.get('password') + code_2fa = data.get('code_2fa') + + if not all([username, password]): + return jsonify({'success': False, 'message': '请填写完整信息'}), 400 + + admin = Admin.query.filter_by(username=username).first() + + if not admin or not verify_password(password, admin.password): + return jsonify({'success': False, 'message': '用户名或密码错误'}), 401 + + # 检查2FA + if admin.is_2fa_enabled: + if not code_2fa: + return jsonify({'success': False, 'message': '请输入2FA验证码', 'require_2fa': True}), 400 + + if not verify_2fa(admin, code_2fa): + return jsonify({'success': False, 'message': '2FA验证码错误'}), 401 + + # 更新登录信息 + admin.last_login_ip = get_client_ip(request) + admin.last_login_at = datetime.utcnow() + db.session.commit() + + # 设置会话 + session['admin_id'] = admin.id + session['admin_username'] = admin.username + + return jsonify({'success': True, 'message': '登录成功'}) + +@admin_bp.route('/logout', methods=['POST']) +@admin_required +def logout(): + """管理员登出""" + session.pop('admin_id', None) + session.pop('admin_username', None) + return jsonify({'success': True, 'message': '已退出登录'}) + +@admin_bp.route('/dashboard', methods=['GET']) +@admin_required +def dashboard(): + """仪表板页面""" + from flask import render_template + return render_template('admin_dashboard.html') + +@admin_bp.route('/api/dashboard', methods=['GET']) +@admin_required +def dashboard_api(): + """仪表板统计API""" + today = date.today() + + # 今日统计 + today_stats = db.session.query( + func.sum(DailyParseStat.parse_count).label('total'), + func.sum(DailyParseStat.success_count).label('success'), + func.sum(DailyParseStat.fail_count).label('fail') + ).filter(DailyParseStat.date == today).first() + + # 总用户数 + total_users = User.query.count() + + # 总解析次数 + total_parses = ParseLog.query.count() + + # 活跃API数 + active_apis = ParserAPI.query.filter_by(is_enabled=True, health_status=True).count() + + return jsonify({ + 'success': True, + 'data': { + 'today': { + 'total': today_stats.total or 0, + 'success': today_stats.success or 0, + 'fail': today_stats.fail or 0 + }, + 'total_users': total_users, + 'total_parses': total_parses, + 'active_apis': active_apis + } + }) + +@admin_bp.route('/users', methods=['GET']) +@admin_required +def users_page(): + """用户管理页面""" + from flask import render_template + return render_template('admin_users.html') + +@admin_bp.route('/api/users', methods=['GET']) +@admin_required +def get_users(): + """获取用户列表API""" + page = request.args.get('page', 1, type=int) + per_page = request.args.get('per_page', 20, type=int) + group_id = request.args.get('group_id', type=int) + + # 构建查询 + query = User.query + if group_id: + query = query.filter_by(group_id=group_id) + + pagination = query.order_by(User.created_at.desc()).paginate( + page=page, per_page=per_page, error_out=False + ) + + users = [{ + 'id': u.id, + 'username': u.username, + 'email': u.email, + 'group_id': u.group_id, + 'group_name': u.group.name if u.group else '', + 'total_parse_count': u.total_parse_count, + 'is_active': u.is_active, + 'created_at': u.created_at.isoformat() + } for u in pagination.items] + + return jsonify({ + 'success': True, + 'data': users, + 'pagination': { + 'page': page, + 'per_page': per_page, + 'total': pagination.total, + 'pages': pagination.pages + } + }) + +@admin_bp.route('/api/users/', methods=['PUT']) +@admin_required +def update_user(user_id): + """更新用户信息""" + user = User.query.get_or_404(user_id) + data = request.get_json() + + if 'group_id' in data: + user.group_id = data['group_id'] + if 'is_active' in data: + user.is_active = data['is_active'] + + db.session.commit() + return jsonify({'success': True, 'message': '更新成功'}) + +@admin_bp.route('/api/groups', methods=['GET']) +@admin_required +def get_groups(): + """获取用户分组列表""" + groups = UserGroup.query.all() + return jsonify({ + 'success': True, + 'data': [{ + 'id': g.id, + 'name': g.name, + 'daily_limit': g.daily_limit, + 'description': g.description + } for g in groups] + }) + +@admin_bp.route('/api/groups/', methods=['PUT']) +@admin_required +def update_group(group_id): + """更新用户分组""" + group = UserGroup.query.get_or_404(group_id) + data = request.get_json() + + if 'daily_limit' in data: + group.daily_limit = data['daily_limit'] + if 'description' in data: + group.description = data['description'] + + db.session.commit() + return jsonify({'success': True, 'message': '更新成功'}) + +@admin_bp.route('/apis', methods=['GET']) +@admin_required +def apis_page(): + """接口管理页面""" + from flask import render_template + return render_template('admin_apis.html') + +@admin_bp.route('/api/apis', methods=['GET']) +@admin_required +def get_apis(): + """获取解析接口列表API""" + apis = ParserAPI.query.all() + return jsonify({ + 'success': True, + 'data': [{ + 'id': a.id, + 'name': a.name, + 'platform': a.platform, + 'api_url': a.api_url, + 'weight': a.weight, + 'is_enabled': a.is_enabled, + 'health_status': a.health_status, + 'total_calls': a.total_calls, + 'success_calls': a.success_calls, + 'avg_response_time': a.avg_response_time, + 'last_check_at': a.last_check_at.isoformat() if a.last_check_at else None + } for a in apis] + }) + +@admin_bp.route('/api/apis', methods=['POST']) +@admin_required +def create_api(): + """创建解析接口""" + data = request.get_json() + + api = ParserAPI( + name=data['name'], + platform=data['platform'], + api_url=data['api_url'], + api_key=data.get('api_key'), + weight=data.get('weight', 1), + is_enabled=data.get('is_enabled', True) + ) + db.session.add(api) + db.session.commit() + + return jsonify({'success': True, 'message': '创建成功', 'id': api.id}) + +@admin_bp.route('/api/apis/', methods=['PUT']) +@admin_required +def update_api(api_id): + """更新解析接口""" + api = ParserAPI.query.get_or_404(api_id) + data = request.get_json() + + for key in ['name', 'api_url', 'api_key', 'weight', 'is_enabled']: + if key in data: + setattr(api, key, data[key]) + + db.session.commit() + return jsonify({'success': True, 'message': '更新成功'}) + +@admin_bp.route('/api/apis/', methods=['DELETE']) +@admin_required +def delete_api(api_id): + """删除解析接口""" + api = ParserAPI.query.get_or_404(api_id) + db.session.delete(api) + db.session.commit() + return jsonify({'success': True, 'message': '删除成功'}) + +@admin_bp.route('/api/apis//test', methods=['POST']) +@admin_required +def test_api(api_id): + """测试解析接口""" + from parsers.factory import ParserFactory + import time + + api = ParserAPI.query.get_or_404(api_id) + data = request.get_json() + test_url = data.get('test_url') + + if not test_url: + return jsonify({'success': False, 'message': '请提供测试链接'}), 400 + + try: + parser = ParserFactory.create_parser(api) + start_time = time.time() + result = parser.parse(test_url) + response_time = int((time.time() - start_time) * 1000) + + # 更新健康状态和统计 + api.health_status = True + api.fail_count = 0 + api.total_calls += 1 + api.success_calls += 1 + api.avg_response_time = int((api.avg_response_time * (api.total_calls - 1) + response_time) / api.total_calls) + api.last_check_at = datetime.utcnow() + db.session.commit() + + return jsonify({ + 'success': True, + 'message': '测试成功', + 'data': result, + 'response_time': response_time + }) + except Exception as e: + # 更新失败状态 + api.fail_count += 1 + api.total_calls += 1 + if api.fail_count >= 3: + api.health_status = False + api.last_check_at = datetime.utcnow() + db.session.commit() + + return jsonify({'success': False, 'message': f'测试失败: {str(e)}'}), 500 + +@admin_bp.route('/config', methods=['GET']) +@admin_required +def config_page(): + """站点配置页面""" + from flask import render_template + return render_template('admin_config.html') + +@admin_bp.route('/api/config', methods=['GET']) +@admin_required +def get_config(): + """获取站点配置API""" + configs = SiteConfig.query.all() + return jsonify({ + 'success': True, + 'data': {c.config_key: c.config_value for c in configs} + }) + +@admin_bp.route('/api/config', methods=['PUT']) +@admin_required +def update_config(): + """更新站点配置""" + data = request.get_json() + + for key, value in data.items(): + config = SiteConfig.query.filter_by(config_key=key).first() + if config: + config.config_value = str(value) + else: + config = SiteConfig(config_key=key, config_value=str(value)) + db.session.add(config) + + db.session.commit() + return jsonify({'success': True, 'message': '更新成功'}) + + +@admin_bp.route('/smtp', methods=['GET']) +@admin_required +def smtp_page(): + """SMTP配置页面""" + from flask import render_template + return render_template('admin_smtp.html') +@admin_bp.route('/api/smtp', methods=['GET']) +@admin_required +def get_smtp(): + """获取SMTP配置列表""" + smtps = SMTPConfig.query.all() + return jsonify({ + 'success': True, + 'data': [{ + 'id': s.id, + 'name': s.name, + 'host': s.host, + 'port': s.port, + 'username': s.username, + 'from_email': s.from_email, + 'from_name': s.from_name, + 'use_tls': s.use_tls, + 'is_enabled': s.is_enabled, + 'is_default': s.is_default, + 'weight': s.weight, + 'send_count': s.send_count, + 'fail_count': s.fail_count + } for s in smtps] + }) +@admin_bp.route('/api/smtp', methods=['POST']) +@admin_required +def create_smtp(): + """创建SMTP配置""" + data = request.get_json() + + smtp = SMTPConfig( + name=data['name'], + host=data['host'], + port=data['port'], + username=data['username'], + password=data['password'], + from_email=data['from_email'], + from_name=data.get('from_name', ''), + use_tls=data.get('use_tls', True), + is_enabled=data.get('is_enabled', True), + weight=data.get('weight', 1) + ) + db.session.add(smtp) + db.session.commit() + + return jsonify({'success': True, 'message': '创建成功', 'id': smtp.id}) + +@admin_bp.route('/api/smtp/', methods=['PUT']) +@admin_required +def update_smtp(smtp_id): + """更新SMTP配置""" + smtp = SMTPConfig.query.get_or_404(smtp_id) + data = request.get_json() + + for key in ['name', 'host', 'port', 'username', 'from_email', 'from_name', 'use_tls', 'is_enabled', 'weight']: + if key in data: + setattr(smtp, key, data[key]) + + # Only update password when user explicitly provides one, so empty edits won't wipe valid credentials. + if 'password' in data and data.get('password'): + smtp.password = data['password'] + + db.session.commit() + return jsonify({'success': True, 'message': '更新成功'}) + +@admin_bp.route('/api/smtp/test', methods=['POST']) +@admin_required +def test_smtp(): + """测试SMTP配置""" + data = request.get_json() + email = data.get('email') + + if not email: + return jsonify({'success': False, 'message': '请提供测试邮箱'}), 400 + + try: + EmailService.send_email( + email, + '【短视频解析平台】SMTP测试邮件', + '

测试成功

如果您收到此邮件,说明SMTP配置正常。

', + html=True + ) + return jsonify({'success': True, 'message': '测试邮件已发送'}) + except Exception as e: + import traceback + error_detail = traceback.format_exc() + print(f"SMTP测试失败详细信息:\n{error_detail}") + + # 提供更友好的错误提示 + error_msg = str(e) + if 'Connection unexpectedly closed' in error_msg: + error_msg = '连接被服务器关闭,请检查:1) 端口和加密方式是否匹配 2) QQ邮箱需使用授权码而非密码 3) 用户名是否正确' + elif 'Authentication failed' in error_msg or '535' in error_msg: + error_msg = '认证失败,请检查用户名和密码(QQ邮箱需使用授权码)' + elif 'timed out' in error_msg: + error_msg = '连接超时,请检查网络和服务器地址' + + return jsonify({'success': False, 'message': f'邮件发送失败: {error_msg}'}), 500 + +@admin_bp.route('/api/stats/parse', methods=['GET']) +@admin_required +def get_parse_stats(): + """获取解析统计""" + days = request.args.get('days', 7, type=int) + start_date = date.today() - timedelta(days=days-1) + + stats = db.session.query( + DailyParseStat.date, + func.sum(DailyParseStat.parse_count).label('total'), + func.sum(DailyParseStat.success_count).label('success'), + func.sum(DailyParseStat.fail_count).label('fail') + ).filter(DailyParseStat.date >= start_date).group_by(DailyParseStat.date).all() + + return jsonify({ + 'success': True, + 'data': [{ + 'date': s.date.isoformat(), + 'total': s.total or 0, + 'success': s.success or 0, + 'fail': s.fail or 0 + } for s in stats] + }) + +@admin_bp.route('/api/stats/platform', methods=['GET']) +@admin_required +def get_platform_stats(): + """获取平台统计""" + stats = db.session.query( + ParseLog.platform, + func.count(ParseLog.id).label('count') + ).group_by(ParseLog.platform).all() + + return jsonify({ + 'success': True, + 'data': [{ + 'platform': s.platform, + 'count': s.count + } for s in stats] + }) + +@admin_bp.route('/api/2fa/enable', methods=['POST']) +@admin_required +def enable_2fa(): + """启用2FA""" + admin_id = session.get('admin_id') + admin = Admin.query.get(admin_id) + + if admin.is_2fa_enabled: + return jsonify({'success': False, 'message': '2FA已启用'}), 400 + + # 生成密钥 + secret = generate_2fa_secret() + qr_url = get_2fa_qrcode_url(admin, secret) + + # 生成二维码 + qr = qrcode.QRCode(version=1, box_size=10, border=5) + qr.add_data(qr_url) + qr.make(fit=True) + img = qr.make_image(fill_color="black", back_color="white") + + # 转换为base64 + buffer = io.BytesIO() + img.save(buffer, format='PNG') + img_str = base64.b64encode(buffer.getvalue()).decode() + + # 临时保存密钥(需要验证后才正式启用) + session['temp_2fa_secret'] = secret + + return jsonify({ + 'success': True, + 'secret': secret, + 'qr_code': f'data:image/png;base64,{img_str}' + }) + +@admin_bp.route('/api/2fa/verify', methods=['POST']) +@admin_required +def verify_2fa_setup(): + """验证并启用2FA""" + admin_id = session.get('admin_id') + admin = Admin.query.get(admin_id) + data = request.get_json() + code = data.get('code') + + secret = session.get('temp_2fa_secret') + if not secret: + return jsonify({'success': False, 'message': '请先生成2FA密钥'}), 400 + + # 验证代码 + import pyotp + totp = pyotp.TOTP(secret) + if not totp.verify(code, valid_window=1): + return jsonify({'success': False, 'message': '验证码错误'}), 400 + + # 启用2FA + admin.totp_secret = secret + admin.is_2fa_enabled = True + db.session.commit() + + session.pop('temp_2fa_secret', None) + + return jsonify({'success': True, 'message': '2FA已启用'}) + +@admin_bp.route('/api/2fa/disable', methods=['POST']) +@admin_required +def disable_2fa(): + """禁用2FA""" + admin_id = session.get('admin_id') + admin = Admin.query.get(admin_id) + data = request.get_json() + code = data.get('code') + + if not admin.is_2fa_enabled: + return jsonify({'success': False, 'message': '2FA未启用'}), 400 + + # 验证代码 + if not verify_2fa(admin, code): + return jsonify({'success': False, 'message': '验证码错误'}), 401 + + # 禁用2FA + admin.is_2fa_enabled = False + admin.totp_secret = None + db.session.commit() + + return jsonify({'success': True, 'message': '2FA已禁用'}) + +@admin_bp.route('/api/smtp/', methods=['DELETE']) +@admin_required +def delete_smtp(smtp_id): + """删除SMTP配置""" + smtp = SMTPConfig.query.get_or_404(smtp_id) + db.session.delete(smtp) + db.session.commit() + return jsonify({'success': True, 'message': '删除成功'}) + +@admin_bp.route('/api/smtp//set-default', methods=['POST']) +@admin_required +def set_default_smtp(smtp_id): + """设置默认SMTP""" + SMTPConfig.query.update({'is_default': False}) + smtp = SMTPConfig.query.get_or_404(smtp_id) + smtp.is_default = True + db.session.commit() + return jsonify({'success': True, 'message': '已设置为默认SMTP'}) + + +@admin_bp.route('/logs', methods=['GET']) +@admin_required +def logs_page(): + """解析日志页面""" + from flask import render_template + return render_template('admin_logs.html') +@admin_bp.route('/api/logs', methods=['GET']) +@admin_required +def get_logs(): + """获取解析日志""" + page = request.args.get('page', 1, type=int) + per_page = request.args.get('per_page', 50, type=int) + platform = request.args.get('platform') + status = request.args.get('status') + + query = ParseLog.query + if platform: + query = query.filter_by(platform=platform) + if status: + query = query.filter_by(status=status) + + pagination = query.order_by(ParseLog.created_at.desc()).paginate( + page=page, per_page=per_page, error_out=False + ) + + logs = [{ + 'id': log.id, + 'user_id': log.user_id, + 'ip_address': log.ip_address, + 'platform': log.platform, + 'video_url': log.video_url, + 'status': log.status, + 'error_message': log.error_message, + 'response_time': log.response_time, + 'created_at': log.created_at.isoformat() + } for log in pagination.items] + + return jsonify({ + 'success': True, + 'data': logs, + 'pagination': { + 'page': page, + 'per_page': per_page, + 'total': pagination.total, + 'pages': pagination.pages + } + }) + + +@admin_bp.route('/health-checks', methods=['GET']) +@admin_required +def health_checks_page(): + """健康检查配置页面""" + from flask import render_template + return render_template('admin_health_checks.html') +@admin_bp.route('/api/health-checks', methods=['GET']) +@admin_required +def get_health_checks(): + """获取健康检查配置""" + configs = HealthCheckConfig.query.all() + return jsonify({ + 'success': True, + 'data': [{ + 'id': c.id, + 'platform': c.platform, + 'test_url': c.test_url, + 'check_interval': c.check_interval, + 'is_enabled': c.is_enabled, + 'alert_email': c.alert_email + } for c in configs] + }) +@admin_bp.route('/api/health-checks', methods=['POST']) +@admin_required +def create_health_check(): + """创建健康检查配置""" + data = request.get_json() + config = HealthCheckConfig( + platform=data['platform'], + test_url=data['test_url'], + check_interval=data.get('check_interval', 300), + is_enabled=data.get('is_enabled', True), + alert_email=data.get('alert_email') + ) + db.session.add(config) + db.session.commit() + return jsonify({'success': True, 'message': '创建成功', 'id': config.id}) + +@admin_bp.route('/api/health-checks/', methods=['PUT']) +@admin_required +def update_health_check(config_id): + """更新健康检查配置""" + config = HealthCheckConfig.query.get_or_404(config_id) + data = request.get_json() + + for key in ['test_url', 'check_interval', 'is_enabled', 'alert_email']: + if key in data: + setattr(config, key, data[key]) + + db.session.commit() + return jsonify({'success': True, 'message': '更新成功'}) + +@admin_bp.route('/api/health-checks/', methods=['DELETE']) +@admin_required +def delete_health_check(config_id): + """删除健康检查配置""" + config = HealthCheckConfig.query.get_or_404(config_id) + db.session.delete(config) + db.session.commit() + return jsonify({'success': True, 'message': '删除成功'}) + + +@admin_bp.route('/profile', methods=['GET']) +@admin_required +def profile_page(): + """账号管理页面""" + from flask import render_template + return render_template('admin_profile.html') +@admin_bp.route('/api/profile', methods=['GET']) +@admin_required +def get_profile(): + """获取管理员信息""" + admin_id = session.get('admin_id') + admin = Admin.query.get(admin_id) + return jsonify({ + 'success': True, + 'data': { + 'id': admin.id, + 'username': admin.username, + 'email': admin.email, + 'is_2fa_enabled': admin.is_2fa_enabled, + 'last_login_ip': admin.last_login_ip, + 'last_login_at': admin.last_login_at.isoformat() if admin.last_login_at else None + } + }) + +@admin_bp.route('/api/profile/password', methods=['PUT']) +@admin_required +def change_password(): + """修改管理员密码""" + admin_id = session.get('admin_id') + admin = Admin.query.get(admin_id) + data = request.get_json() + + old_password = data.get('old_password') + new_password = data.get('new_password') + + if not all([old_password, new_password]): + return jsonify({'success': False, 'message': '请填写完整信息'}), 400 + + if not verify_password(old_password, admin.password): + return jsonify({'success': False, 'message': '原密码错误'}), 401 + + admin.password = hash_password(new_password) + db.session.commit() + return jsonify({'success': True, 'message': '密码修改成功'}) + +@admin_bp.route('/api/profile/email', methods=['PUT']) +@admin_required +def change_email(): + """修改管理员邮箱""" + admin_id = session.get('admin_id') + admin = Admin.query.get(admin_id) + data = request.get_json() + + email = data.get('email') + if not email: + return jsonify({'success': False, 'message': '请提供邮箱地址'}), 400 + + admin.email = email + db.session.commit() + return jsonify({'success': True, 'message': '邮箱修改成功'}) diff --git a/routes/auth.py b/routes/auth.py new file mode 100644 index 0000000..b0f5c53 --- /dev/null +++ b/routes/auth.py @@ -0,0 +1,276 @@ +from flask import Blueprint, request, jsonify, session +from flask_login import login_user, logout_user, login_required, current_user +from models import User, EmailVerification +from models import db +from utils.security import hash_password, verify_password, generate_verification_code, get_client_ip +from utils.email import EmailService +from datetime import datetime, timedelta +import re + +auth_bp = Blueprint('auth', __name__) + +def validate_email(email): + """验证邮箱格式""" + pattern = r'^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$' + return re.match(pattern, email) is not None + +@auth_bp.route('/send-code', methods=['POST']) +def send_verification_code(): + """发送验证码""" + data = request.get_json() + email = data.get('email') + purpose = data.get('purpose', 'register') + + if not email or not validate_email(email): + return jsonify({'success': False, 'message': '邮箱格式不正确'}), 400 + + # 检查用户是否已存在 + if purpose == 'register': + if User.query.filter_by(email=email).first(): + return jsonify({'success': False, 'message': '该邮箱已被注册'}), 400 + elif purpose in ['reset_password', 'forgot_password']: + if not User.query.filter_by(email=email).first(): + return jsonify({'success': False, 'message': '该邮箱未注册'}), 400 + + # 生成验证码 + code = generate_verification_code(6) + expires_at = datetime.utcnow() + timedelta(minutes=10) + + # 保存验证码 + verification = EmailVerification( + email=email, + code=code, + purpose=purpose, + expires_at=expires_at + ) + db.session.add(verification) + db.session.commit() + + # 发送邮件 + try: + EmailService.send_verification_code(email, code, purpose) + return jsonify({'success': True, 'message': '验证码已发送'}) + except Exception as e: + return jsonify({'success': False, 'message': str(e)}), 500 + +@auth_bp.route('/register', methods=['GET', 'POST']) +def register(): + """用户注册""" + if request.method == 'GET': + from flask import render_template + return render_template('register.html') + + data = request.get_json() + username = data.get('username') + email = data.get('email') + password = data.get('password') + code = data.get('code') + + # 验证输入 + if not all([username, email, password, code]): + return jsonify({'success': False, 'message': '请填写完整信息'}), 400 + + if not validate_email(email): + return jsonify({'success': False, 'message': '邮箱格式不正确'}), 400 + + if len(password) < 6: + return jsonify({'success': False, 'message': '密码长度至少6位'}), 400 + + # 检查用户名和邮箱是否已存在 + if User.query.filter_by(username=username).first(): + return jsonify({'success': False, 'message': '用户名已存在'}), 400 + + if User.query.filter_by(email=email).first(): + return jsonify({'success': False, 'message': '邮箱已被注册'}), 400 + + # 验证验证码 + verification = EmailVerification.query.filter_by( + email=email, + code=code, + purpose='register', + is_used=False + ).filter(EmailVerification.expires_at > datetime.utcnow()).first() + + if not verification: + return jsonify({'success': False, 'message': '验证码无效或已过期'}), 400 + + # 创建用户 + user = User( + username=username, + email=email, + password=hash_password(password), + register_ip=get_client_ip(request), + group_id=2 # 默认普通用户 + ) + db.session.add(user) + + # 标记验证码已使用 + verification.is_used = True + db.session.commit() + + return jsonify({'success': True, 'message': '注册成功'}) + +@auth_bp.route('/login', methods=['GET', 'POST']) +def login(): + """用户登录""" + if request.method == 'GET': + from flask import render_template + return render_template('login.html') + + data = request.get_json() + email = data.get('email') + password = data.get('password') + + if not all([email, password]): + return jsonify({'success': False, 'message': '请填写完整信息'}), 400 + + user = User.query.filter_by(email=email).first() + + if not user or not verify_password(password, user.password): + return jsonify({'success': False, 'message': '邮箱或密码错误'}), 401 + + if not user.is_active: + return jsonify({'success': False, 'message': '账号已被禁用'}), 403 + + # 更新登录信息 + user.last_login_ip = get_client_ip(request) + db.session.commit() + + # 登录用户 + login_user(user) + + return jsonify({ + 'success': True, + 'message': '登录成功', + 'user': { + 'id': user.id, + 'username': user.username, + 'email': user.email + } + }) + +@auth_bp.route('/logout', methods=['POST']) +@login_required +def logout(): + """用户登出""" + logout_user() + return jsonify({'success': True, 'message': '已退出登录'}) + +@auth_bp.route('/reset-password', methods=['POST']) +def reset_password(): + """重置密码""" + data = request.get_json() + email = data.get('email') + code = data.get('code') + new_password = data.get('new_password') + + if not all([email, code, new_password]): + return jsonify({'success': False, 'message': '请填写完整信息'}), 400 + + if len(new_password) < 6: + return jsonify({'success': False, 'message': '密码长度至少6位'}), 400 + + # 验证验证码 + verification = EmailVerification.query.filter_by( + email=email, + code=code, + is_used=False + ).filter( + EmailVerification.purpose.in_(['reset_password', 'forgot_password']) + ).filter(EmailVerification.expires_at > datetime.utcnow()).first() + + if not verification: + return jsonify({'success': False, 'message': '验证码无效或已过期'}), 400 + + # 更新密码 + user = User.query.filter_by(email=email).first() + if not user: + return jsonify({'success': False, 'message': '用户不存在'}), 404 + + user.password = hash_password(new_password) + verification.is_used = True + db.session.commit() + + return jsonify({'success': True, 'message': '密码重置成功'}) + +@auth_bp.route('/user-info', methods=['GET']) +@login_required +def user_info(): + """获取当前用户信息""" + return jsonify({ + 'success': True, + 'user': { + 'id': current_user.id, + 'username': current_user.username, + 'email': current_user.email, + 'group_id': current_user.group_id, + 'total_parse_count': current_user.total_parse_count + } + }) + +@auth_bp.route('/profile', methods=['GET']) +@login_required +def profile(): + """用户个人中心页面""" + from flask import render_template + return render_template('profile.html') + +@auth_bp.route('/api/profile', methods=['GET']) +@login_required +def get_profile(): + """获取用户个人中心数据""" + from models import UserGroup, DailyParseStat, ParseLog + from datetime import date + + # 获取用户组信息 + user_group = UserGroup.query.get(current_user.group_id) + daily_limit = user_group.daily_limit if user_group else 10 + group_name = user_group.name if user_group else '普通用户' + + # 获取今日使用次数 + today = date.today() + today_stat = DailyParseStat.query.filter_by( + user_id=current_user.id, + date=today + ).first() + + today_used = today_stat.parse_count if today_stat else 0 + today_remaining = max(0, daily_limit - today_used) + + # 获取解析记录(最近20条) + parse_logs = ParseLog.query.filter_by( + user_id=current_user.id + ).order_by(ParseLog.created_at.desc()).limit(20).all() + + logs_data = [{ + 'id': log.id, + 'platform': log.platform, + 'video_url': log.video_url[:50] + '...' if len(log.video_url) > 50 else log.video_url, + 'status': log.status, + 'response_time': log.response_time, + 'created_at': log.created_at.strftime('%Y-%m-%d %H:%M:%S') if log.created_at else '' + } for log in parse_logs] + + return jsonify({ + 'success': True, + 'data': { + 'user': { + 'id': current_user.id, + 'username': current_user.username, + 'email': current_user.email, + 'created_at': current_user.created_at.strftime('%Y-%m-%d') if current_user.created_at else '' + }, + 'group': { + 'id': current_user.group_id, + 'name': group_name, + 'daily_limit': daily_limit + }, + 'usage': { + 'daily_limit': daily_limit, + 'today_used': today_used, + 'today_remaining': today_remaining, + 'total_parse_count': current_user.total_parse_count or 0 + }, + 'parse_logs': logs_data + } + }) diff --git a/routes/main.py b/routes/main.py new file mode 100644 index 0000000..137397f --- /dev/null +++ b/routes/main.py @@ -0,0 +1,26 @@ +from flask import Blueprint, render_template, redirect + +main_bp = Blueprint('main', __name__) + +@main_bp.route('/') +def index(): + from models import SiteConfig + config = {} + configs = SiteConfig.query.all() + for c in configs: + config[c.config_key] = c.config_value + return render_template('index.html', config=config) + +@main_bp.route('/favicon.ico') +def favicon(): + """处理 favicon 请求""" + from models import SiteConfig + favicon_config = SiteConfig.query.filter_by(config_key='site_favicon').first() + if favicon_config and favicon_config.config_value: + return redirect(favicon_config.config_value) + # 如果没有设置 favicon,尝试使用 logo + logo_config = SiteConfig.query.filter_by(config_key='site_logo').first() + if logo_config and logo_config.config_value: + return redirect(logo_config.config_value) + # 返回空响应避免 404 + return '', 204 diff --git a/routes/parser.py b/routes/parser.py new file mode 100644 index 0000000..fbface2 --- /dev/null +++ b/routes/parser.py @@ -0,0 +1,202 @@ +from flask import Blueprint, request, jsonify +from flask_login import current_user +from models import ParseLog +from models import db +from utils.security import get_client_ip +from utils.limiter import RateLimiter +from utils.queue import ParseQueue, ConcurrencyController +from parsers.factory import ParserFactory +import uuid +import time +from datetime import datetime + +parser_bp = Blueprint('parser', __name__) + +@parser_bp.route('/parse', methods=['POST']) +def parse_video(): + """解析视频""" + data = request.get_json() + video_url = data.get('url') + + if not video_url: + return jsonify({'success': False, 'message': '请提供视频链接'}), 400 + + # 获取用户信息 + user_id = current_user.id if current_user.is_authenticated else None + ip_address = get_client_ip(request) + + # 检查限流 + limit_check = RateLimiter.check_limit(user_id=user_id, ip_address=ip_address) + if not limit_check['allowed']: + return jsonify({ + 'success': False, + 'message': f"今日解析次数已达上限({limit_check['limit']}次)", + 'limit_info': limit_check + }), 429 + + # 检测平台 + try: + platform = ParserFactory.detect_platform(video_url) + except ValueError as e: + return jsonify({'success': False, 'message': str(e)}), 400 + + # 生成任务ID + task_id = str(uuid.uuid4()) + + # 添加到队列 + ParseQueue.add_task(task_id, video_url, user_id, ip_address) + + # 尝试立即处理 + if ConcurrencyController.can_process(): + result = _process_task(task_id, video_url, platform, user_id, ip_address) + return jsonify(result) + else: + # 返回任务ID,让前端轮询 + return jsonify({ + 'success': True, + 'status': 'queued', + 'task_id': task_id, + 'message': '任务已加入队列,请稍候...', + 'queue_status': ParseQueue.get_queue_status() + }) + +@parser_bp.route('/task/', methods=['GET']) +def get_task_result(task_id): + """获取任务结果""" + result = ParseQueue.get_result(task_id) + + if result: + return jsonify(result) + else: + # 检查是否还在队列中 + queue_status = ParseQueue.get_queue_status() + return jsonify({ + 'success': False, + 'status': 'processing', + 'message': '任务处理中...', + 'queue_status': queue_status + }) + +@parser_bp.route('/queue-status', methods=['GET']) +def queue_status(): + """获取队列状态""" + status = ParseQueue.get_queue_status() + return jsonify({ + 'success': True, + 'queue_status': status + }) + +def _process_task(task_id, video_url, platform, user_id, ip_address): + """处理解析任务""" + start_time = time.time() + + # 获取该平台所有可用的API + from models import ParserAPI + available_apis = ParserAPI.query.filter_by( + platform=platform.lower(), + is_enabled=True + ).all() + + if not available_apis: + return { + 'success': False, + 'status': 'failed', + 'message': f'没有可用的{platform}解析接口', + 'response_time': int((time.time() - start_time) * 1000) + } + + last_error = None + + # 尝试所有可用的API + for api_config in available_apis: + try: + # 创建解析器 + parser = ParserFactory.create_parser(api_config) + + # 执行解析 + result = parser.parse(video_url) + + # 计算响应时间 + response_time = int((time.time() - start_time) * 1000) + + # 记录日志 + log = ParseLog( + user_id=user_id, + ip_address=ip_address, + platform=platform, + video_url=video_url, + parser_api_id=api_config.id, + status='success', + response_time=response_time + ) + db.session.add(log) + + # 更新API统计 + api_config.total_calls += 1 + api_config.success_calls += 1 + api_config.avg_response_time = int( + (api_config.avg_response_time * (api_config.total_calls - 1) + response_time) / api_config.total_calls + ) + api_config.fail_count = 0 # 重置失败计数 + + # 更新用户统计 + if user_id: + from models import User + user = User.query.get(user_id) + user.total_parse_count += 1 + + # 更新限流计数 + RateLimiter.increment_count(user_id=user_id, ip_address=ip_address, success=True) + + db.session.commit() + + # 保存结果 + response = { + 'success': True, + 'status': 'completed', + 'data': result, + 'response_time': response_time + } + ParseQueue.complete_task(task_id, response) + + return response + + except Exception as e: + # 记录失败,继续尝试下一个API + last_error = str(e) + api_config.total_calls += 1 + api_config.fail_count += 1 + db.session.commit() + continue + + # 所有API都失败了 + # 计算响应时间 + response_time = int((time.time() - start_time) * 1000) + + # 记录失败日志 + log = ParseLog( + user_id=user_id, + ip_address=ip_address, + platform=platform, + video_url=video_url, + status='failed', + error_message=last_error or '所有接口都失败', + response_time=response_time + ) + db.session.add(log) + + # 更新限流计数(失败也计数) + RateLimiter.increment_count(user_id=user_id, ip_address=ip_address, success=False) + + db.session.commit() + + # 保存错误结果 + response = { + 'success': False, + 'status': 'failed', + 'message': last_error or '所有解析接口都失败', + 'response_time': response_time + } + ParseQueue.complete_task(task_id, response) + + return response diff --git a/scheduler.py b/scheduler.py new file mode 100644 index 0000000..6aa1a91 --- /dev/null +++ b/scheduler.py @@ -0,0 +1,57 @@ +from apscheduler.schedulers.background import BackgroundScheduler +from apscheduler.triggers.interval import IntervalTrigger +from utils.health_check import HealthChecker +from models import HealthCheckConfig, db +from app import create_app +import logging + +# 配置日志 +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +def init_scheduler(app): + """初始化定时任务调度器""" + scheduler = BackgroundScheduler() + + with app.app_context(): + # 获取所有启用的健康检查配置 + configs = HealthCheckConfig.query.filter_by(is_enabled=True).all() + + for config in configs: + # 为每个平台添加定时任务 + scheduler.add_job( + func=lambda platform=config.platform: run_health_check(app, platform), + trigger=IntervalTrigger(seconds=config.check_interval), + id=f'health_check_{config.platform}', + name=f'{config.platform}健康检查', + replace_existing=True + ) + logger.info(f"已添加{config.platform}健康检查任务,间隔{config.check_interval}秒") + + scheduler.start() + logger.info("定时任务调度器已启动") + + return scheduler + +def run_health_check(app, platform): + """执行健康检查""" + with app.app_context(): + try: + logger.info(f"开始执行{platform}健康检查") + HealthChecker.check_platform(platform) + logger.info(f"{platform}健康检查完成") + except Exception as e: + logger.error(f"{platform}健康检查失败: {str(e)}") + +if __name__ == '__main__': + app = create_app() + scheduler = init_scheduler(app) + + try: + # 保持程序运行 + import time + while True: + time.sleep(1) + except (KeyboardInterrupt, SystemExit): + scheduler.shutdown() + logger.info("定时任务调度器已停止") diff --git a/static/css/admin.css b/static/css/admin.css new file mode 100644 index 0000000..df1d59b --- /dev/null +++ b/static/css/admin.css @@ -0,0 +1,237 @@ +/* Admin Dashboard Styles - 2025 */ + +.admin-layout { + min-height: 100vh; + background-color: var(--bg-body); +} + +/* Header/Navbar */ +.admin-header { + background: white; + border-bottom: 1px solid var(--secondary-200); + padding: 0.75rem 0; + position: sticky; + top: 0; + z-index: 30; + box-shadow: var(--shadow-sm); +} + +.header-container { + max-width: 1200px; + margin: 0 auto; + padding: 0 1.5rem; + display: flex; + justify-content: space-between; + align-items: center; +} + +.brand { + font-size: 1.25rem; + font-weight: 700; + color: var(--primary-600); + text-decoration: none; + display: flex; + align-items: center; + gap: 0.5rem; +} + +.nav-links { + display: flex; + gap: 0.5rem; +} + +.nav-item { + padding: 0.5rem 0.75rem; + color: var(--secondary-500); + text-decoration: none; + font-size: 0.875rem; + font-weight: 500; + border-radius: var(--radius-md); + transition: var(--transition); +} + +.nav-item:hover { + background: var(--secondary-50); + color: var(--secondary-900); +} + +.nav-item.active { + background: var(--primary-50); + color: var(--primary-700); +} + +/* Main Content */ +.main-container { + max-width: 1200px; + margin: 2rem auto; + padding: 0 1.5rem; +} + +.page-header { + margin-bottom: 2rem; + display: flex; + justify-content: space-between; + align-items: center; +} + +.page-title { + font-size: 1.5rem; + font-weight: 700; + color: var(--secondary-900); +} + +/* Stats Grid */ +.stats-grid { + display: grid; + grid-template-columns: repeat(auto-fit, minmax(240px, 1fr)); + gap: 1.5rem; + margin-bottom: 2rem; +} + +.stat-card { + background: white; + padding: 1.5rem; + border-radius: var(--radius-lg); + border: 1px solid var(--secondary-200); + box-shadow: var(--shadow-sm); + transition: var(--transition); +} + +.stat-card:hover { + transform: translateY(-2px); + box-shadow: var(--shadow-md); +} + +.stat-label { + font-size: 0.875rem; + color: var(--secondary-500); + font-weight: 500; + margin-bottom: 0.5rem; +} + +.stat-value { + font-size: 2rem; + font-weight: 700; + color: var(--secondary-900); +} + +/* Tables */ +.table-container { + background: white; + border-radius: var(--radius-lg); + border: 1px solid var(--secondary-200); + box-shadow: var(--shadow-sm); + overflow-x: auto; +} + +table { + width: 100%; + border-collapse: collapse; + text-align: left; +} + +thead { + background: var(--secondary-50); + border-bottom: 1px solid var(--secondary-200); +} + +th { + padding: 1rem 1.5rem; + font-size: 0.75rem; + font-weight: 600; + text-transform: uppercase; + color: var(--secondary-500); + letter-spacing: 0.05em; + white-space: nowrap; +} + +td { + padding: 1rem 1.5rem; + font-size: 0.875rem; + color: var(--secondary-800); + border-bottom: 1px solid var(--secondary-100); + white-space: nowrap; +} + +tr:last-child td { + border-bottom: none; +} + +tr:hover td { + background: var(--secondary-50); +} + +/* Badges */ +.badge { + display: inline-flex; + align-items: center; + padding: 0.25rem 0.625rem; + border-radius: 9999px; + font-size: 0.75rem; + font-weight: 600; +} + +.badge-success { + background: #dcfce7; + color: #166534; +} + +.badge-error { + background: #fee2e2; + color: #991b1b; +} + +.badge-warning { + background: #fef3c7; + color: #92400e; +} + +.badge-info { + background: #dbeafe; + color: #1e40af; +} + +.badge-neutral { + background: #f1f5f9; + color: #475569; +} + +/* Forms */ +.form-card { + background: white; + padding: 2rem; + border-radius: var(--radius-lg); + border: 1px solid var(--secondary-200); + box-shadow: var(--shadow-sm); + max-width: 600px; +} + +.form-group { + margin-bottom: 1.5rem; +} + +.form-group label { + display: block; + font-size: 0.875rem; + font-weight: 500; + color: var(--secondary-700); + margin-bottom: 0.5rem; +} + +/* Responsive */ +@media (max-width: 768px) { + .header-container { + flex-direction: column; + gap: 1rem; + } + + .nav-links { + width: 100%; + overflow-x: auto; + padding-bottom: 0.5rem; + } + + .stats-grid { + grid-template-columns: 1fr; + } +} diff --git a/static/css/ui-components.css b/static/css/ui-components.css new file mode 100644 index 0000000..9bad1ac --- /dev/null +++ b/static/css/ui-components.css @@ -0,0 +1,306 @@ +/* Modern UI Design System - 2025 */ +@import url('https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700&display=swap'); + +:root { + /* Color Palette - Indigo/Violet Theme */ + --primary-50: #eef2ff; + --primary-100: #e0e7ff; + --primary-500: #6366f1; + --primary-600: #4f46e5; + --primary-700: #4338ca; + + --secondary-50: #f8fafc; + --secondary-100: #f1f5f9; + --secondary-200: #e2e8f0; + --secondary-500: #64748b; + --secondary-800: #1e293b; + --secondary-900: #0f172a; + + --success: #10b981; + --warning: #f59e0b; + --error: #ef4444; + --info: #3b82f6; + + /* Semantic Colors */ + --bg-body: #f8fafc; + --bg-card: #ffffff; + --text-main: var(--secondary-800); + --text-muted: var(--secondary-500); + --border-color: var(--secondary-200); + + /* Effects */ + --shadow-sm: 0 1px 2px 0 rgb(0 0 0 / 0.05); + --shadow-md: 0 4px 6px -1px rgb(0 0 0 / 0.1), 0 2px 4px -2px rgb(0 0 0 / 0.1); + --shadow-lg: 0 10px 15px -3px rgb(0 0 0 / 0.1), 0 4px 6px -4px rgb(0 0 0 / 0.1); + --shadow-xl: 0 20px 25px -5px rgb(0 0 0 / 0.1), 0 8px 10px -6px rgb(0 0 0 / 0.1); + + --radius-sm: 0.375rem; + --radius-md: 0.5rem; + --radius-lg: 0.75rem; + --radius-xl: 1rem; + + --transition: all 0.2s cubic-bezier(0.4, 0, 0.2, 1); +} + +* { + margin: 0; + padding: 0; + box-sizing: border-box; +} + +body { + font-family: 'Inter', system-ui, -apple-system, sans-serif; + background-color: var(--bg-body); + color: var(--text-main); + line-height: 1.5; + -webkit-font-smoothing: antialiased; +} + +/* Typography */ +h1, h2, h3, h4, h5, h6 { + color: var(--secondary-900); + font-weight: 700; + letter-spacing: -0.025em; +} + +/* Buttons */ +.ui-btn { + display: inline-flex; + align-items: center; + justify-content: center; + padding: 0.625rem 1.25rem; + font-size: 0.875rem; + font-weight: 500; + border-radius: var(--radius-md); + border: 1px solid transparent; + cursor: pointer; + transition: var(--transition); + gap: 0.5rem; +} + +.ui-btn:active { + transform: scale(0.98); +} + +.ui-btn-primary { + background: linear-gradient(135deg, var(--primary-600), var(--primary-700)); + color: white; + box-shadow: 0 4px 6px -1px rgba(79, 70, 229, 0.2); +} + +.ui-btn-primary:hover { + background: linear-gradient(135deg, var(--primary-500), var(--primary-600)); + box-shadow: 0 6px 8px -1px rgba(79, 70, 229, 0.3); +} + +.ui-btn-secondary { + background: white; + border-color: var(--secondary-200); + color: var(--secondary-800); +} + +.ui-btn-secondary:hover { + background: var(--secondary-50); + border-color: var(--secondary-300); +} + +.ui-btn-danger { + background: var(--error); + color: white; +} + +.ui-btn-danger:hover { + background: #dc2626; +} + +.ui-btn-success { + background: var(--success); + color: white; +} + +/* Inputs */ +.ui-input { + width: 100%; + padding: 0.625rem 1rem; + border: 1px solid var(--secondary-200); + border-radius: var(--radius-md); + background-color: white; + color: var(--text-main); + font-size: 0.875rem; + transition: var(--transition); +} + +.ui-input:focus { + outline: none; + border-color: var(--primary-500); + box-shadow: 0 0 0 3px var(--primary-100); +} + +/* Select / Dropdown */ +select.ui-input, +.ui-select { + width: 100%; + padding: 0.625rem 2.5rem 0.625rem 1rem; + border: 1px solid var(--secondary-200); + border-radius: var(--radius-md); + background-color: white; + background-image: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='16' height='16' viewBox='0 0 24 24' fill='none' stroke='%2364748b' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3E%3Cpolyline points='6 9 12 15 18 9'%3E%3C/polyline%3E%3C/svg%3E"); + background-repeat: no-repeat; + background-position: right 0.75rem center; + background-size: 1rem; + color: var(--text-main); + font-size: 0.875rem; + cursor: pointer; + transition: var(--transition); + -webkit-appearance: none; + -moz-appearance: none; + appearance: none; +} + +select.ui-input:focus, +.ui-select:focus { + outline: none; + border-color: var(--primary-500); + box-shadow: 0 0 0 3px var(--primary-100); +} + +select.ui-input:hover, +.ui-select:hover { + border-color: var(--secondary-300); +} + +/* Cards */ +.ui-card { + background: var(--bg-card); + border-radius: var(--radius-lg); + border: 1px solid var(--secondary-100); + box-shadow: var(--shadow-sm); + padding: 1.5rem; +} + +/* Notifications */ +#notification-container { + position: fixed; + top: 1.5rem; + right: 1.5rem; + z-index: 50; + display: flex; + flex-direction: column; + gap: 0.75rem; +} + +.notification { + min-width: 320px; + padding: 1rem; + background: white; + border-radius: var(--radius-md); + box-shadow: var(--shadow-lg); + border-left: 4px solid transparent; + transform: translateX(100%); + opacity: 0; + transition: all 0.4s cubic-bezier(0.16, 1, 0.3, 1); + display: flex; + align-items: center; + gap: 0.75rem; +} + +.notification.show { + transform: translateX(0); + opacity: 1; +} + +.notification-success { border-left-color: var(--success); } +.notification-error { border-left-color: var(--error); } +.notification-warning { border-left-color: var(--warning); } +.notification-info { border-left-color: var(--info); } + +.notification-icon { + flex-shrink: 0; + width: 24px; + height: 24px; + border-radius: 50%; + display: flex; + align-items: center; + justify-content: center; + font-size: 12px; + color: white; +} + +.notification-success .notification-icon { background: var(--success); } +.notification-error .notification-icon { background: var(--error); } +.notification-warning .notification-icon { background: var(--warning); } +.notification-info .notification-icon { background: var(--info); } + +.notification-message { + font-size: 0.875rem; + font-weight: 500; + color: var(--secondary-800); +} + +/* Modal */ +.ui-modal { + position: fixed; + inset: 0; + z-index: 40; + display: flex; + align-items: center; + justify-content: center; + opacity: 0; + pointer-events: none; + transition: opacity 0.2s; +} + +.ui-modal.show { + opacity: 1; + pointer-events: auto; +} + +.ui-modal-overlay { + position: absolute; + inset: 0; + background: rgba(15, 23, 42, 0.4); + backdrop-filter: blur(4px); +} + +.ui-modal-content { + position: relative; + background: white; + width: 90%; + max-width: 500px; + max-height: 90vh; + border-radius: var(--radius-lg); + box-shadow: var(--shadow-xl); + padding: 1.5rem; + transform: scale(0.95); + transition: transform 0.2s; + overflow-y: auto; + overflow-x: hidden; +} + +.ui-modal.show .ui-modal-content { + transform: scale(1); +} + +.ui-modal-content h3 { + font-size: 1.125rem; + margin-bottom: 1rem; +} + +.ui-modal-actions { + margin-top: 1.5rem; + display: flex; + justify-content: flex-end; + gap: 0.75rem; +} + +/* Utilities */ +.text-sm { font-size: 0.875rem; } +.text-muted { color: var(--text-muted); } +.font-medium { font-weight: 500; } +.flex { display: flex; } +.items-center { align-items: center; } +.justify-between { justify-content: space-between; } +.gap-2 { gap: 0.5rem; } +.gap-4 { gap: 1rem; } +.mt-4 { margin-top: 1rem; } +.mb-4 { margin-bottom: 1rem; } diff --git a/static/js/ui-components.js b/static/js/ui-components.js new file mode 100644 index 0000000..72cc2e6 --- /dev/null +++ b/static/js/ui-components.js @@ -0,0 +1,120 @@ +// 现代化UI组件库 +const UI = { + // 显示通知消息 + notify(message, type = 'info', duration = 3000) { + const container = document.getElementById('notification-container') || this.createNotificationContainer(); + const notification = document.createElement('div'); + notification.className = `notification notification-${type}`; + notification.innerHTML = ` +
+ ${this.getIcon(type)} + ${message} +
+ `; + container.appendChild(notification); + setTimeout(() => notification.classList.add('show'), 10); + setTimeout(() => { + notification.classList.remove('show'); + setTimeout(() => notification.remove(), 300); + }, duration); + }, + + // 显示确认对话框 + confirm(title, message, onConfirm, onCancel) { + const modal = document.createElement('div'); + modal.className = 'ui-modal'; + modal.innerHTML = ` +
+
+

${title}

+

${message}

+
+ + +
+
+ `; + document.body.appendChild(modal); + setTimeout(() => modal.classList.add('show'), 10); + + modal.querySelector('[data-action="confirm"]').onclick = () => { + this.closeModal(modal); + if (onConfirm) onConfirm(); + }; + modal.querySelector('[data-action="cancel"]').onclick = () => { + this.closeModal(modal); + if (onCancel) onCancel(); + }; + modal.querySelector('.ui-modal-overlay').onclick = () => { + this.closeModal(modal); + if (onCancel) onCancel(); + }; + }, + + // 显示输入对话框 + prompt(title, placeholder, defaultValue, onConfirm, onCancel) { + const modal = document.createElement('div'); + modal.className = 'ui-modal'; + modal.innerHTML = ` +
+
+

${title}

+ +
+ + +
+
+ `; + document.body.appendChild(modal); + setTimeout(() => modal.classList.add('show'), 10); + + const input = modal.querySelector('.ui-input'); + input.focus(); + input.select(); + + const confirm = () => { + const value = input.value.trim(); + if (value) { + this.closeModal(modal); + if (onConfirm) onConfirm(value); + } + }; + + modal.querySelector('[data-action="confirm"]').onclick = confirm; + input.onkeypress = (e) => { if (e.key === 'Enter') confirm(); }; + modal.querySelector('[data-action="cancel"]').onclick = () => { + this.closeModal(modal); + if (onCancel) onCancel(); + }; + modal.querySelector('.ui-modal-overlay').onclick = () => { + this.closeModal(modal); + if (onCancel) onCancel(); + }; + }, + + // 关闭模态框 + closeModal(modal) { + modal.classList.remove('show'); + setTimeout(() => modal.remove(), 300); + }, + + // 创建通知容器 + createNotificationContainer() { + const container = document.createElement('div'); + container.id = 'notification-container'; + document.body.appendChild(container); + return container; + }, + + // 获取图标 + getIcon(type) { + const icons = { + success: '✓', + error: '✕', + warning: '⚠', + info: 'ℹ' + }; + return icons[type] || icons.info; + } +}; diff --git a/templates/admin_apis.html b/templates/admin_apis.html new file mode 100644 index 0000000..3bf374b --- /dev/null +++ b/templates/admin_apis.html @@ -0,0 +1,282 @@ + + + + + + + 接口管理 - 管理后台 + + + + + +
+ +
+ +
+ + +
+
+
加载中...
+
+
+
+ + +
+
+
+

添加接口

+
+ +
+ + +
+
+ + +
+
+ + +
+
+ + +
+
+ + +
+
+ + +
+
+ + +
+
+
+
+ + + + + + \ No newline at end of file diff --git a/templates/admin_config.html b/templates/admin_config.html new file mode 100644 index 0000000..d18af41 --- /dev/null +++ b/templates/admin_config.html @@ -0,0 +1,181 @@ + + + + + + + 站点配置 - 管理后台 + + + + + +
+ +
+ +
+ + +
+
+
+ + + 显示在浏览器标签 +
+ +
+ + + 显示在首页的主标题 +
+ +
+ + + 显示在首页主标题下方 +
+ +
+ + + 留空则不显示Logo +
+ +
+ + + 浏览器标签页显示的小图标,留空则使用Logo +
+ +
+ + + 显示在首页顶部(留空则不显示) +
+ +
+ + + 显示在页面底部 +
+ +
+ + + 同时处理的最大解析任务数(1-20) +
+ +
+ + + 未登录用户每天可解析的次数 +
+ +
+ + + 已注册用户每天可解析的次数 +
+ +
+
+ +
+
+
+
+ + + + + + \ No newline at end of file diff --git a/templates/admin_dashboard.html b/templates/admin_dashboard.html new file mode 100644 index 0000000..fc62fc9 --- /dev/null +++ b/templates/admin_dashboard.html @@ -0,0 +1,150 @@ + + + + + + + 仪表板 - 管理后台 + + + + + +
+ +
+ +
+ + +
+
+
今日解析总数
+
-
+
+
+
今日成功解析
+
-
+
+
+
总注册用户
+
-
+
+
+
活跃接口节点
+
-
+
+
+ +
+
+

实时解析动态

+ 查看全部 → +
+ +
+ + + + + + + + + + + + + + +
时间平台状态耗时
加载中...
+
+
+
+ + + + + + \ No newline at end of file diff --git a/templates/admin_health_checks.html b/templates/admin_health_checks.html new file mode 100644 index 0000000..35dcc65 --- /dev/null +++ b/templates/admin_health_checks.html @@ -0,0 +1,232 @@ + + + + + + + 健康检查配置 - 管理后台 + + + + + +
+ +
+ +
+ + +
+
+ + + + + + + + + + + + + + + + + +
ID平台测试链接检查间隔告警邮箱状态操作
加载中...
+
+
+
+ + +
+
+
+

添加健康检查配置

+
+ +
+ + +
+
+ + + 用于健康检查的测试视频链接 +
+
+ + + 建议300秒(5分钟) +
+
+ + + 接口异常时发送告警邮件 +
+
+ + +
+
+ + +
+
+
+
+ + + + + + \ No newline at end of file diff --git a/templates/admin_login.html b/templates/admin_login.html new file mode 100644 index 0000000..926abda --- /dev/null +++ b/templates/admin_login.html @@ -0,0 +1,161 @@ + + + + + + + 管理员登录 - JieXi Admin + + + + + + + + + + + + + diff --git a/templates/admin_logs.html b/templates/admin_logs.html new file mode 100644 index 0000000..f55988f --- /dev/null +++ b/templates/admin_logs.html @@ -0,0 +1,157 @@ + + + + + + + 日志审计 - 管理后台 + + + + + +
+ +
+ +
+ + +
+
+ + + + + + + + + + + + + + + + + + + +
ID时间用户IDIP地址平台视频链接状态响应时间错误信息
加载中...
+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/templates/admin_profile.html b/templates/admin_profile.html new file mode 100644 index 0000000..6d517ba --- /dev/null +++ b/templates/admin_profile.html @@ -0,0 +1,292 @@ + + + + + + + 账号管理 - 管理后台 + + + + + +
+ +
+ +
+ + +
+ +
+

基本信息

+
+ +
-
+
+
+ +
-
+
+
+ +
-
+
+
+ +
+ - (IP: -) +
+
+
+ + +
+

修改邮箱

+
+
+ + +
+ +
+
+ + +
+

修改密码

+
+
+ + +
+
+ + + 密码长度至少8位 +
+
+ + +
+ +
+
+ + +
+

两步验证 (2FA)

+

启用2FA可以提高账号安全性,需要使用Google Authenticator等应用扫描二维码。

+ +
+ +
+ + +
+
+
+ + + + + + \ No newline at end of file diff --git a/templates/admin_smtp.html b/templates/admin_smtp.html new file mode 100644 index 0000000..e798ca1 --- /dev/null +++ b/templates/admin_smtp.html @@ -0,0 +1,335 @@ + + + + + + + SMTP配置 - 管理后台 + + + + + +
+ +
+ +
+ + +
+
+ + + + + + + + + + + + + + + + + + + + +
ID名称服务器端口用户名发件邮箱权重状态统计操作
加载中...
+
+
+
+ + +
+
+
+

添加SMTP配置

+
+ +
+ + +
+
+ + +
+
+ + + 常用端口:25 (不加密), 587 (TLS), 465 (SSL) +
+
+ + +
+
+ + + 创建时必填,编辑时留空表示不修改 +
+
+ + +
+
+ + +
+
+ + +
+
+ + +
+
+ + +
+
+ + +
+
+
+
+ + +
+
+
+

测试SMTP配置

+
+
+ + +
+
+ + +
+
+
+
+ + + + + + diff --git a/templates/admin_users.html b/templates/admin_users.html new file mode 100644 index 0000000..7c31d9a --- /dev/null +++ b/templates/admin_users.html @@ -0,0 +1,400 @@ + + + + + + + 用户管理 - 管理后台 + + + + + +
+ +
+ +
+ + + +
+
+
总用户数
+
-
+
+
+
普通用户
+
-
+
+
+
VIP用户
+
-
+
+
+
SVIP用户
+
-
+
+
+ + +
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + +
ID用户名邮箱用户组解析次数状态注册时间操作
加载中...
+
+ + +
+
+ + +
+
+
+

编辑用户

+
+ +
+ + +
+
+ + +
+
+ + +
+
+
+
+ + +
+
+
+

用户组管理

+
+ + + + + + + + + + + + +
分组名称每日解析次数描述操作
加载中...
+
+
+ +
+
+
+ + +
+
+
+

编辑用户组

+
+ +
+ + +
+
+ + +
+
+ + +
+
+ + +
+
+
+
+ + + + + + \ No newline at end of file diff --git a/templates/index.html b/templates/index.html new file mode 100644 index 0000000..ad085a6 --- /dev/null +++ b/templates/index.html @@ -0,0 +1,460 @@ + + + + + + 短视频解析平台 - 免费高清无水印 + + + + + {% if config.get('site_notice') %} +
+
{{ config.get('site_notice') | safe }}
+
+ {% endif %} + +
+

{{ config.get('home_title', 'JieXi Pro') }}

+

{{ config.get('home_subtitle', '新一代全能短视频去水印解析工具') }}

+
+ +
+
+ +
+ + +
+ +
+

+

+
+ + 下载视频 + + + +
+ +
+
+
+ + + + {% if config.get('site_footer') %} +
+ {{ config.get('site_footer') | safe }} +
+ {% endif %} + + + + + diff --git a/templates/login.html b/templates/login.html new file mode 100644 index 0000000..0406da7 --- /dev/null +++ b/templates/login.html @@ -0,0 +1,243 @@ + + + + + + 用户登录 - JieXi Pro + + + + +
+
+

欢迎回来

+

登录您的账号继续使用

+
+ +
+
+ + +
+ +
+ + +
+ + + + +
+ + +
+ + + + + diff --git a/templates/profile.html b/templates/profile.html new file mode 100644 index 0000000..0d48b6c --- /dev/null +++ b/templates/profile.html @@ -0,0 +1,447 @@ + + + + + + + 个人中心 - 短视频解析平台 + + + + + +
+ + + +
+
+
今日已用
+
-
+
+
+
今日剩余
+
-
+
+
+
每日限额
+
-
+
+
+
累计解析
+
-
+
+
+ + +
+

账户信息

+
+ 用户名 + - +
+
+ 邮箱 + - +
+
+ 用户组 + - +
+
+ 注册时间 + - +
+
+ 今日使用进度 + - +
+
+
+
+
+ + +
+
+

解析记录(最近20条)

+
+ + + + + + + + + + + + + + + +
平台视频链接状态耗时时间
加载中...
+
+
+ + + + + + diff --git a/templates/register.html b/templates/register.html new file mode 100644 index 0000000..d79c409 --- /dev/null +++ b/templates/register.html @@ -0,0 +1,305 @@ + + + + + + 用户注册 - JieXi Pro + + + + +
+
+

创建账号

+

注册后享受更多解析次数

+
+ +
+
+ + +
+ +
+ + +
+ +
+ +
+ + +
+
+ +
+ + +
+ +
+ + +
+ + +
+ + +
+ + + + + diff --git a/utils/__init__.py b/utils/__init__.py new file mode 100644 index 0000000..dd7ee44 --- /dev/null +++ b/utils/__init__.py @@ -0,0 +1 @@ +# Utils package diff --git a/utils/admin_auth.py b/utils/admin_auth.py new file mode 100644 index 0000000..63c5947 --- /dev/null +++ b/utils/admin_auth.py @@ -0,0 +1,37 @@ +from functools import wraps +from flask import session, jsonify, redirect, url_for, request +from models import Admin +import pyotp + +def admin_required(f): + """管理员权限装饰器""" + @wraps(f) + def decorated_function(*args, **kwargs): + if 'admin_id' not in session: + # 如果是API请求,返回JSON + if request.path.startswith('/admin/api/'): + return jsonify({'success': False, 'message': '请先登录'}), 401 + # 如果是页面请求,重定向到登录页 + return redirect(url_for('admin.login')) + return f(*args, **kwargs) + return decorated_function + +def verify_2fa(admin: Admin, code: str) -> bool: + """验证2FA代码""" + if not admin.is_2fa_enabled or not admin.totp_secret: + return True + + totp = pyotp.TOTP(admin.totp_secret) + return totp.verify(code, valid_window=1) + +def generate_2fa_secret() -> str: + """生成2FA密钥""" + return pyotp.random_base32() + +def get_2fa_qrcode_url(admin: Admin, secret: str) -> str: + """获取2FA二维码URL""" + totp = pyotp.TOTP(secret) + return totp.provisioning_uri( + name=admin.username, + issuer_name='短视频解析平台' + ) diff --git a/utils/email.py b/utils/email.py new file mode 100644 index 0000000..8d69e37 --- /dev/null +++ b/utils/email.py @@ -0,0 +1,152 @@ +import smtplib +import ssl +from email.mime.text import MIMEText +from email.mime.multipart import MIMEMultipart +from email.utils import formataddr +from email.header import Header +from models import SMTPConfig +import random + +class EmailService: + """邮件服务类""" + + @staticmethod + def get_smtp_config(): + """获取可用的SMTP配置(负载均衡)""" + configs = SMTPConfig.query.filter_by(is_enabled=True).all() + + if not configs: + raise Exception("没有可用的SMTP配置") + + # 加权随机选择 + total_weight = sum(c.weight for c in configs) + if total_weight == 0: + return random.choice(configs) + + rand = random.uniform(0, total_weight) + current = 0 + + for config in configs: + current += config.weight + if rand <= current: + return config + + return configs[-1] + + @staticmethod + def _do_send(smtp_config, to_email: str, subject: str, content: str, html: bool = True): + """实际发送邮件的内部方法""" + server = None + try: + print(f"[SMTP调试] 开始发送邮件") + print(f"[SMTP调试] 服务器: {smtp_config.host}:{smtp_config.port}") + print(f"[SMTP调试] 用户名: {smtp_config.username}") + print(f"[SMTP调试] 使用TLS: {smtp_config.use_tls}") + print(f"[SMTP调试] 发件人: {smtp_config.from_email}") + + msg = MIMEMultipart('alternative') + from_email = smtp_config.from_email or smtp_config.username + from_name = smtp_config.from_name or smtp_config.username + msg['From'] = formataddr((str(Header(from_name, 'utf-8')), from_email)) + msg['To'] = to_email + msg['Subject'] = Header(subject, 'utf-8') + + if html: + msg.attach(MIMEText(content, 'html', 'utf-8')) + else: + msg.attach(MIMEText(content, 'plain', 'utf-8')) + + print(f"[SMTP调试] 开始连接服务器...") + if smtp_config.port == 465: + print(f"[SMTP调试] 使用 SSL 模式(端口 465)") + server = smtplib.SMTP_SSL(smtp_config.host, smtp_config.port, timeout=30) + elif smtp_config.use_tls: + print(f"[SMTP调试] 使用 STARTTLS 模式(端口 {smtp_config.port})") + server = smtplib.SMTP(smtp_config.host, smtp_config.port, timeout=30) + server.ehlo() + context = ssl.create_default_context() + server.starttls(context=context) + server.ehlo() + else: + print(f"[SMTP调试] 使用普通 SMTP 模式(端口 {smtp_config.port})") + server = smtplib.SMTP(smtp_config.host, smtp_config.port, timeout=30) + + print(f"[SMTP调试] 连接成功,开始登录...") + server.login(smtp_config.username, smtp_config.password) + print(f"[SMTP调试] 登录成功,开始发送邮件...") + server.sendmail(from_email, [to_email], msg.as_string()) + print(f"[SMTP调试] 邮件发送成功!") + + # 更新发送统计 + smtp_config.send_count += 1 + from models import db + db.session.commit() + + return True + finally: + if server: + try: + server.quit() + except: + pass + + @staticmethod + def send_email(to_email: str, subject: str, content: str, html: bool = True): + """发送邮件(支持故障自动转移)""" + configs = SMTPConfig.query.filter_by(is_enabled=True).all() + + if not configs: + raise Exception("没有可用的SMTP配置") + + # 按权重排序,优先尝试高权重的配置 + configs.sort(key=lambda x: x.weight, reverse=True) + + last_error = None + tried_ids = [] + + for smtp_config in configs: + tried_ids.append(smtp_config.id) + try: + return EmailService._do_send(smtp_config, to_email, subject, content, html) + except Exception as e: + last_error = str(e) + print(f"[SMTP调试] {smtp_config.name} 发送失败: {last_error}") + # 更新失败统计 + smtp_config.fail_count += 1 + from models import db + db.session.commit() + + # 如果还有其他配置,继续尝试 + remaining = len(configs) - len(tried_ids) + if remaining > 0: + print(f"[SMTP调试] 尝试下一个SMTP配置,剩余 {remaining} 个") + continue + + # 所有配置都失败了 + raise Exception(f"所有SMTP配置均发送失败,最后错误: {last_error}") + + @staticmethod + def send_verification_code(to_email: str, code: str, purpose: str): + """发送验证码邮件""" + purpose_text = { + 'register': '注册账号', + 'reset_password': '重置密码', + 'forgot_password': '找回密码' + }.get(purpose, '验证') + + subject = f"【短视频解析平台】{purpose_text}验证码" + + content = f""" + + +

验证码

+

您正在进行{purpose_text}操作,验证码为:

+

{code}

+

验证码有效期为10分钟,请勿泄露给他人。

+
+

如果这不是您的操作,请忽略此邮件。

+ + + """ + + return EmailService.send_email(to_email, subject, content, html=True) diff --git a/utils/health_check.py b/utils/health_check.py new file mode 100644 index 0000000..75214c4 --- /dev/null +++ b/utils/health_check.py @@ -0,0 +1,139 @@ +from models import ParserAPI, HealthCheckConfig, HealthCheckLog +from models import db +from parsers.factory import ParserFactory +from utils.email import EmailService +from datetime import datetime +import time + +class HealthChecker: + """健康检查器""" + + @staticmethod + def check_api(api: ParserAPI, test_url: str) -> dict: + """检查单个API""" + start_time = time.time() + + try: + # 创建解析器 + parser = ParserFactory.create_parser(api) + + # 执行解析 + result = parser.parse(test_url) + + # 计算响应时间 + response_time = int((time.time() - start_time) * 1000) + + # 检查结果是否有效 + if result and result.get('video_url'): + return { + 'success': True, + 'response_time': response_time, + 'error': None + } + else: + return { + 'success': False, + 'response_time': response_time, + 'error': '解析结果无效' + } + + except Exception as e: + response_time = int((time.time() - start_time) * 1000) + return { + 'success': False, + 'response_time': response_time, + 'error': str(e) + } + + @staticmethod + def check_platform(platform: str): + """检查指定平台的所有API""" + # 获取健康检查配置 + config = HealthCheckConfig.query.filter_by( + platform=platform, + is_enabled=True + ).first() + + if not config: + return + + # 获取该平台的所有API + apis = ParserAPI.query.filter_by(platform=platform).all() + + failed_apis = [] + + for api in apis: + # 执行健康检查 + result = HealthChecker.check_api(api, config.test_url) + + # 记录日志 + log = HealthCheckLog( + parser_api_id=api.id, + status='success' if result['success'] else 'failed', + response_time=result['response_time'], + error_message=result['error'] + ) + db.session.add(log) + + # 更新API状态 + api.last_check_at = datetime.utcnow() + + if result['success']: + api.health_status = True + api.fail_count = 0 + else: + api.fail_count += 1 + # 连续失败3次标记为不健康 + if api.fail_count >= 3: + api.health_status = False + failed_apis.append({ + 'name': api.name, + 'error': result['error'] + }) + + db.session.commit() + + # 发送告警邮件 + if failed_apis and config.alert_email: + HealthChecker.send_alert_email(platform, failed_apis, config.alert_email) + + @staticmethod + def send_alert_email(platform: str, failed_apis: list, alert_email: str): + """发送告警邮件""" + subject = f"【短视频解析平台】{platform}接口健康检查告警" + + content = f""" + + +

接口健康检查告警

+

以下{platform}解析接口健康检查失败:

+
    + """ + + for api in failed_apis: + content += f"
  • {api['name']}: {api['error']}
  • " + + content += """ +
+

请及时检查并处理。

+
+

此邮件由系统自动发送,请勿回复。

+ + + """ + + try: + EmailService.send_email(alert_email, subject, content, html=True) + except Exception as e: + print(f"发送告警邮件失败: {str(e)}") + + @staticmethod + def check_all_platforms(): + """检查所有平台""" + configs = HealthCheckConfig.query.filter_by(is_enabled=True).all() + + for config in configs: + try: + HealthChecker.check_platform(config.platform) + except Exception as e: + print(f"检查{config.platform}平台失败: {str(e)}") diff --git a/utils/limiter.py b/utils/limiter.py new file mode 100644 index 0000000..b02e915 --- /dev/null +++ b/utils/limiter.py @@ -0,0 +1,89 @@ +from datetime import date +from models import DailyParseStat, UserGroup +from models import db + +class RateLimiter: + """限流器""" + + @staticmethod + def check_limit(user_id=None, ip_address=None): + """检查是否超过限制""" + today = date.today() + + if user_id: + # 已登录用户 + stat = DailyParseStat.query.filter_by( + user_id=user_id, + date=today + ).first() + + from models import User + user = User.query.get(user_id) + group = UserGroup.query.get(user.group_id) + limit = group.daily_limit + + current_count = stat.parse_count if stat else 0 + + return { + 'allowed': current_count < limit, + 'current': current_count, + 'limit': limit, + 'remaining': max(0, limit - current_count) + } + else: + # 游客 + stat = DailyParseStat.query.filter_by( + ip_address=ip_address, + date=today + ).first() + + from models import SiteConfig + config = SiteConfig.query.filter_by(config_key='guest_daily_limit').first() + limit = int(config.config_value) if config else 5 + + current_count = stat.parse_count if stat else 0 + + return { + 'allowed': current_count < limit, + 'current': current_count, + 'limit': limit, + 'remaining': max(0, limit - current_count) + } + + @staticmethod + def increment_count(user_id=None, ip_address=None, success=True): + """增加计数""" + today = date.today() + + if user_id: + stat = DailyParseStat.query.filter_by( + user_id=user_id, + date=today + ).first() + + if not stat: + stat = DailyParseStat(user_id=user_id, date=today) + db.session.add(stat) + + stat.parse_count = (stat.parse_count or 0) + 1 + if success: + stat.success_count = (stat.success_count or 0) + 1 + else: + stat.fail_count = (stat.fail_count or 0) + 1 + else: + stat = DailyParseStat.query.filter_by( + ip_address=ip_address, + date=today + ).first() + + if not stat: + stat = DailyParseStat(ip_address=ip_address, date=today) + db.session.add(stat) + + stat.parse_count = (stat.parse_count or 0) + 1 + if success: + stat.success_count = (stat.success_count or 0) + 1 + else: + stat.fail_count = (stat.fail_count or 0) + 1 + + db.session.commit() diff --git a/utils/queue.py b/utils/queue.py new file mode 100644 index 0000000..c1234b3 --- /dev/null +++ b/utils/queue.py @@ -0,0 +1,141 @@ +import json +import time +from datetime import datetime +from typing import Dict, Optional + +# 内存队列(当Redis不可用时使用) +_memory_queue = [] +_memory_processing = {} +_memory_results = {} + +def get_redis_client(): + """获取Redis客户端""" + try: + from app import redis_client + return redis_client + except: + return None + +class ParseQueue: + """解析队列管理器""" + + QUEUE_KEY = "parse_queue" + PROCESSING_KEY = "parse_processing" + RESULT_KEY_PREFIX = "parse_result:" + + @staticmethod + def add_task(task_id: str, video_url: str, user_id: Optional[int] = None, ip_address: str = ""): + """添加任务到队列""" + task = { + 'task_id': task_id, + 'video_url': video_url, + 'user_id': user_id, + 'ip_address': ip_address, + 'created_at': datetime.utcnow().isoformat(), + 'status': 'queued' + } + + redis_client = get_redis_client() + if redis_client: + redis_client.rpush(ParseQueue.QUEUE_KEY, json.dumps(task)) + else: + # 使用内存队列 + _memory_queue.append(task) + return task_id + + @staticmethod + def get_task() -> Optional[Dict]: + """从队列获取任务""" + redis_client = get_redis_client() + if redis_client: + task_json = redis_client.lpop(ParseQueue.QUEUE_KEY) + if task_json: + task = json.loads(task_json) + redis_client.hset(ParseQueue.PROCESSING_KEY, task['task_id'], json.dumps(task)) + return task + else: + # 使用内存队列 + if _memory_queue: + task = _memory_queue.pop(0) + _memory_processing[task['task_id']] = task + return task + return None + + @staticmethod + def complete_task(task_id: str, result: Dict): + """完成任务""" + redis_client = get_redis_client() + if redis_client: + redis_client.hdel(ParseQueue.PROCESSING_KEY, task_id) + redis_client.setex( + f"{ParseQueue.RESULT_KEY_PREFIX}{task_id}", + 3600, + json.dumps(result) + ) + else: + # 使用内存 + _memory_processing.pop(task_id, None) + _memory_results[task_id] = result + + @staticmethod + def get_result(task_id: str) -> Optional[Dict]: + """获取任务结果""" + redis_client = get_redis_client() + if redis_client: + result_json = redis_client.get(f"{ParseQueue.RESULT_KEY_PREFIX}{task_id}") + if result_json: + return json.loads(result_json) + else: + # 使用内存 + return _memory_results.get(task_id) + return None + + @staticmethod + def get_queue_length() -> int: + """获取队列长度""" + redis_client = get_redis_client() + if redis_client: + return redis_client.llen(ParseQueue.QUEUE_KEY) + else: + return len(_memory_queue) + + @staticmethod + def get_processing_count() -> int: + """获取正在处理的任务数""" + redis_client = get_redis_client() + if redis_client: + return redis_client.hlen(ParseQueue.PROCESSING_KEY) + else: + return len(_memory_processing) + + @staticmethod + def get_queue_status() -> Dict: + """获取队列状态""" + return { + 'queued': ParseQueue.get_queue_length(), + 'processing': ParseQueue.get_processing_count() + } + + +class ConcurrencyController: + """并发控制器""" + + @staticmethod + def can_process() -> bool: + """检查是否可以处理新任务""" + from models import SiteConfig + config = SiteConfig.query.filter_by(config_key='max_concurrent').first() + max_concurrent = int(config.config_value) if config else 3 + + processing_count = ParseQueue.get_processing_count() + return processing_count < max_concurrent + + @staticmethod + def wait_for_slot(timeout: int = 60) -> bool: + """等待可用槽位""" + start_time = time.time() + while time.time() - start_time < timeout: + if ConcurrencyController.can_process(): + return True + time.sleep(0.5) + return False diff --git a/utils/security.py b/utils/security.py new file mode 100644 index 0000000..dccc0a2 --- /dev/null +++ b/utils/security.py @@ -0,0 +1,23 @@ +import bcrypt +import random +import string +from datetime import datetime, timedelta + +def hash_password(password: str) -> str: + """加密密码""" + salt = bcrypt.gensalt() + return bcrypt.hashpw(password.encode('utf-8'), salt).decode('utf-8') + +def verify_password(password: str, hashed: str) -> bool: + """验证密码""" + return bcrypt.checkpw(password.encode('utf-8'), hashed.encode('utf-8')) + +def generate_verification_code(length: int = 6) -> str: + """生成验证码""" + return ''.join(random.choices(string.digits, k=length)) + +def get_client_ip(request): + """获取客户端IP""" + if request.headers.get('X-Forwarded-For'): + return request.headers.get('X-Forwarded-For').split(',')[0] + return request.remote_addr diff --git a/开发文档/参考网站.jpg b/开发文档/参考网站.jpg new file mode 100644 index 0000000..e6478a5 Binary files /dev/null and b/开发文档/参考网站.jpg differ diff --git a/开发文档/哔哩哔哩解析.postman_collection.json b/开发文档/哔哩哔哩解析.postman_collection.json new file mode 100644 index 0000000..c380a65 --- /dev/null +++ b/开发文档/哔哩哔哩解析.postman_collection.json @@ -0,0 +1,98 @@ +{ + "info": { + "_postman_id": "8813fbd0-cdcf-443e-9b29-d62a8a7f891f", + "name": "哔哩哔哩解析", + "schema": "https://schema.getpostman.com/json/collection/v2.1.0/collection.json", + "_exporter_id": "49242368", + "_collection_link": "https://lixin0229-2646365.postman.co/workspace/shihao's-Workspace~249f47cc-12a0-4152-8c64-d21cf5552a6c/collection/49242368-8813fbd0-cdcf-443e-9b29-d62a8a7f891f?action=share&source=collection_link&creator=49242368" + }, + "item": [ + { + "name": "米人API", + "request": { + "method": "GET", + "header": [], + "url": { + "raw": "https://api.mir6.com/api/bzjiexi?url=https://www.bilibili.com/video/BV1vrU6B4ELQ/?share_source=copy_web%26vd_source=8977adbddf938cc18f327c3c21c5120c", + "protocol": "https", + "host": [ + "api", + "mir6", + "com" + ], + "path": [ + "api", + "bzjiexi" + ], + "query": [ + { + "key": "url", + "value": "https://www.bilibili.com/video/BV1vrU6B4ELQ/?share_source=copy_web%26vd_source=8977adbddf938cc18f327c3c21c5120c" + } + ] + } + }, + "response": [] + }, + { + "name": "BugPKAPI", + "request": { + "method": "GET", + "header": [], + "url": { + "raw": "https://api.bugpk.com/api/bilibili?url=https://www.bilibili.com/video/BV1vrU6B4ELQ/?share_source=copy_web%26vd_source=8977adbddf938cc18f327c3c21c5120c", + "protocol": "https", + "host": [ + "api", + "bugpk", + "com" + ], + "path": [ + "api", + "bilibili" + ], + "query": [ + { + "key": "url", + "value": "https://www.bilibili.com/video/BV1vrU6B4ELQ/?share_source=copy_web&vd_source=8977adbddf938cc18f327c3c21c5120c" + } + ] + } + }, + "response": [] + }, + { + "name": "妖狐API", + "request": { + "method": "GET", + "header": [], + "url": { + "raw": "https://api.yaohud.cn/api/v6/video/bili?key=SM227DLC0ZgJ6DXJhAx&url=https://www.bilibili.com/video/BV1vrU6B4ELQ/?share_source=copy_web%26vd_source=8977adbddf938cc18f327c3c21c5120c", + "protocol": "https", + "host": [ + "api", + "yaohud", + "cn" + ], + "path": [ + "api", + "v6", + "video", + "bili" + ], + "query": [ + { + "key": "key", + "value": "SM227DLC0ZgJ6DXJhAx" + }, + { + "key": "url", + "value": "https://www.bilibili.com/video/BV1vrU6B4ELQ/?share_source=copy_web&vd_source=8977adbddf938cc18f327c3c21c5120c" + } + ] + } + }, + "response": [] + } + ] +} \ No newline at end of file diff --git a/开发文档/开发需求.md b/开发文档/开发需求.md new file mode 100644 index 0000000..cbf935d --- /dev/null +++ b/开发文档/开发需求.md @@ -0,0 +1,173 @@ +# 短视频解析平台 —— 开发需求文档 + +## 1. 项目概述 + +本项目是一个聚合多家短视频解析接口的 Web 平台,对外提供统一的视频解析服务。 +后台支持管理员管理解析接口、用户、站点配置等功能,并支持基础的限流与队列控制。 + +## 2. 技术栈与基础要求 + +- 后端语言:**Python** +- 数据库:**MySQL** +- 前端:Web 页面,需**适配移动端(响应式布局)** +- 部署形态:Web 应用(具体框架、部署方式可由开发时确定) + +## 3. 主要功能模块 + +### 3.1 前台解析平台 + +1. 用户可在前台输入短视频链接,平台调用内部解析接口,返回统一格式的数据。 +2. 平台内部对接多个解析接口(含多个哔哩哔哩解析接口),通过“适配器模式”进行统一封装。 +3. 前台展示解析结果时,只需要以下字段: + - 封面(cover) + - 视频 URL(video_url) + - 标题(title) + - 简介(description) +4. 前台页面需要适配移动端,保证在手机端有良好显示与操作体验。 + +### 3.2 用户系统 + +1. **注册登录** + - 用户通过**邮箱注册**,注册时需要邮箱验证码。 + - 支持登录、退出登录。 +2. **邮箱验证码场景** + - 用户注册 + - 重置密码 + - 找回密码 + 上述操作均需要发送邮箱验证码并进行校验。 +3. **用户信息存储(MySQL)** + - 用户名(名称) + - 邮箱 + - 密码(需加密存储) + - 注册 / 最近登录 IP + - 使用解析的次数(可按总次数和/或按日统计) + - 用户分组(如:游客、普通用户、VIP 用户、SVIP 用户) +4. **解析次数限制** + - 未登录用户:每天最多解析 **5 次** + - 已登录用户:每天最多解析 **10 次** + - 管理员后台可根据用户所属分组自定义每日解析次数(见「用户分组管理」)。 + +### 3.3 用户分组与限流策略 + +1. 默认用户分组:**普通用户** +2. 预设分组类型(可在后台配置每日解析次数): + - 游客 + - 普通用户 + - VIP 用户 + - SVIP 用户 +3. 功能需求: + - 管理员可在后台给用户分配 / 修改所属分组。 + - 每个分组可单独配置“每日最大解析次数”。 + - 平台在用户发起解析时,需要根据分组和当日已用次数进行校验与限制。 + +### 3.4 队列与并发控制 + +1. 平台采用**队列模式**处理解析请求: + - 多用户同时访问时,请求需要进入队列排队。 +2. **并发上限控制**: + - 默认最大并发解析数为 **3**。 + - 并发上限值需支持在后台由管理员配置。 +3. 队列中的请求按照先进先出(FIFO)处理,并在前台给出简要提示(例如:排队中、预计等待等,可以简单实现)。 + +## 4. 解析接口与适配器设计 + +### 4.1 多解析接口接入 + +1. 在“开发文档文件夹”中有多个解析接口的 API 文档。 +2. 平台需要为**每一个解析接口**开发独立的**适配器(Adapter)**: + - 负责调用对应的第三方解析 API。 + - 将第三方接口返回的数据**统一映射**为平台标准数据结构: + ```json + { + "cover": "...", + "video_url": "...", + "title": "...", + "description": "..." + } + ``` +3. 若某些 API **没有开发文档或示例返回值**: + - 使用其自带的测试链接进行调用。 + - 开发过程中自行记录返回值结构和字段含义,并在代码或内部文档中进行整理记录。 + +### 4.2 哔哩哔哩多接口负载均衡与健康检查 + +1. 哔哩哔哩解析接口将接入 **3 个不同的接口**。 +2. 平台需要对这 3 个接口做**负载均衡**,例如: + - 轮询 + - 权重 + - 或按健康状态优先级选择 +3. 管理员后台需提供任务配置: + - 可以设置**定时任务**,定期对这三个 B 站接口执行健康检查。 + - 检查使用的视频链接由管理员在后台自行配置(如健康检查用的固定测试视频链接)。 +4. **健康检查逻辑**: + - 定时任务按设定时间间隔调用 3 个接口,对测试视频链接进行解析。 + - 如解析失败、超时或返回异常等,视为健康检查不通过。 + - 健康检查不通过时,需给管理员发送邮件告警(可简要说明是哪个接口异常)。 + +## 5. 管理员后台功能 + +### 5.1 管理员账户与安全 + +1. 默认管理员账号: + - 用户名:`shihao` + - 密码:`80012029Lz` +2. 管理员登录系统后,可以为自己的账号开启**二次验证(2FA)**: + - 支持基于 TOTP(如 Google Authenticator)或其他实现方式(具体实现可由开发决定)。 +3. 后台需要基本的权限控制,非管理员账号无法访问后台管理页面。 + +### 5.2 站点配置管理 + +管理员可以在后台管理以下站点级配置: + +1. 网站基础信息 + - 网站 Logo(上传或填 URL) + - 网站标题 + - 网站公告(可显示在前台) + - 网站底部信息(如备案号、版权信息等) +2. 并发与队列配置 + - 最大并发解析数(默认 3,可修改) +3. 用户分组配置 + - 各分组每日解析次数限制 +4. 解析接口管理 + - 各解析接口启用 / 禁用 + - 各接口权重(如用于简单负载均衡) + - B 站接口健康检查测试链接配置及定时任务配置参数 + +### 5.3 SMTP 邮件配置与负载均衡 + +1. 系统初始使用一套**默认 SMTP 配置**。 +2. 管理员可以在后台: + - 修改默认 SMTP 配置 + - 新增多个 SMTP 配置 + - 对多个 SMTP 进行**负载均衡**(如轮询发送或故障切换)。 +3. 提供**发送测试邮件**功能,方便管理员验证 SMTP 配置是否可用。 +4. SMTP 配置将用于: + - 用户注册验证码邮件 + - 找回密码 / 重置密码验证码邮件 + - 管理员通知邮件(例如接口健康检查失败告警) + +### 5.4 统计与日志 + +管理员后台需要提供基础统计信息展示,包括但不限于: + +1. 访问 / 解析统计 + - 按 IP 统计使用次数 + - 总解析次数、成功/失败次数 + - 按时间维度的分布(例如每日/每小时解析次数趋势) +2. 接口使用情况 + - 各解析接口被调用次数 + - 失败率、平均响应时间(可选,视实现复杂度) +3. 用户使用情况 + - 各用户解析次数 + - 各用户分组下的使用量对比 + +(统计精度可以根据实际实现情况简化,核心是能看到基本的 IP 使用、时间分布和接口使用情况) + +--- + +## 6. 安全与其他要求 + +1. 用户密码必须进行加密存储(如使用 PBKDF2、bcrypt 等方式)。 +2. 接口调用需做好基础的异常处理、超时控制及错误日志记录。 +3. 对解析次数限制、队列和并发控制要在服务端进行校验,避免被前端绕过。 +4. 所有配置项(如 SMTP、站点信息、分组限制、并发数等)需持久化存储在数据库或配置表中,并在后台可视化管理。 diff --git a/开发文档/抖音API接口文档.md b/开发文档/抖音API接口文档.md new file mode 100644 index 0000000..982f34b --- /dev/null +++ b/开发文档/抖音API接口文档.md @@ -0,0 +1,619 @@ +# 抖音视频解析 API 接口文档 + +## 概述 + +本文档描述了自部署的抖音视频解析 API 接口,用于从抖音分享链接中提取视频信息和下载视频文件。 + +**API 基础地址**: `https://dy.zoomcaronline.net` + +--- + +## 接口列表 + +### 1. 提取视频 ID + +从抖音分享链接中提取 `aweme_id`(视频唯一标识符)。 + +#### 请求信息 + +- **接口地址**: `/api/douyin/web/get_aweme_id` +- **请求方法**: `GET` +- **请求参数**: + +| 参数名 | 类型 | 必填 | 说明 | 示例 | +|--------|------|------|------|------| +| url | string | 是 | 抖音分享链接 | `https://v.douyin.com/FLNGa7kXvMY/` | + +#### 请求示例 + +```bash +GET https://dy.zoomcaronline.net/api/douyin/web/get_aweme_id?url=https://v.douyin.com/FLNGa7kXvMY/ +``` + +```javascript +// JavaScript 示例 +const response = await fetch('https://dy.zoomcaronline.net/api/douyin/web/get_aweme_id?url=https://v.douyin.com/FLNGa7kXvMY/'); +const data = await response.json(); +``` + +```python +# Python 示例 +import requests + +url = "https://dy.zoomcaronline.net/api/douyin/web/get_aweme_id" +params = {"url": "https://v.douyin.com/FLNGa7kXvMY/"} +response = requests.get(url, params=params) +data = response.json() +``` + +#### 响应示例 + +```json +{ + "code": 200, + "msg": "success", + "data": "7576474555553706981" +} +``` + +#### 响应字段说明 + +| 字段名 | 类型 | 说明 | +|--------|------|------| +| code | integer | 状态码,200 表示成功 | +| msg | string | 响应消息 | +| data | string | 视频 ID (aweme_id) | + +--- + +### 2. 获取视频详细信息 + +根据 `aweme_id` 获取视频的完整信息,包括作者、描述、封面图、视频地址等。 + +#### 请求信息 + +- **接口地址**: `/api/douyin/web/fetch_one_video` +- **请求方法**: `GET` +- **请求参数**: + +| 参数名 | 类型 | 必填 | 说明 | 示例 | +|--------|------|------|------|------| +| aweme_id | string | 是 | 视频 ID | `7576474555553706981` | + +#### 请求示例 + +```bash +GET https://dy.zoomcaronline.net/api/douyin/web/fetch_one_video?aweme_id=7576474555553706981 +``` + +```javascript +// JavaScript 示例 +const awemeId = "7576474555553706981"; +const response = await fetch(`https://dy.zoomcaronline.net/api/douyin/web/fetch_one_video?aweme_id=${awemeId}`); +const data = await response.json(); +``` + +```python +# Python 示例 +import requests + +url = "https://dy.zoomcaronline.net/api/douyin/web/fetch_one_video" +params = {"aweme_id": "7576474555553706981"} +response = requests.get(url, params=params) +data = response.json() +``` + +#### 响应示例(精简版) + +```json +{ + "code": 200, + "router": "/api/douyin/web/fetch_one_video", + "data": { + "aweme_detail": { + "aweme_id": "7576474555553706981", + "desc": "#美女 #漫展 #cos #二次元 #正常穿搭无不良引导", + "create_time": 1764035452, + "duration": 7315, + "author": { + "uid": "7558808540429747259", + "nickname": "小鱼小鱼🐟", + "signature": "永做真我!\n点赞+关注 休闲娱乐不迷路", + "avatar_thumb": { + "url_list": ["https://p3-pc.douyinpic.com/aweme/100x100/..."] + }, + "follower_count": 2325, + "total_favorited": 22399, + "sec_uid": "MS4wLjABAAAAdl73D1NlE1ws4rViqBn9A_RKcKl3Sa-2LNi_SnO4a3w...", + "short_id": "24435946694", + "unique_id": "24435946694" + }, + "video": { + "cover_original_scale": { + "url_list": [ + "https://p3-pc-sign.douyinpic.com/...", + "https://p9-pc-sign.douyinpic.com/..." + ] + }, + "play_addr": { + "url_list": [ + "https://v5-hl-szyd-ov.zjcdn.com/...", + "https://v5-dy-o-abtest.zjcdn.com/...", + "https://www.douyin.com/aweme/v1/play/..." + ] + }, + "duration": 7315, + "width": 1080, + "height": 1920 + }, + "statistics": { + "digg_count": 12345, + "comment_count": 678, + "share_count": 90, + "play_count": 567890 + }, + "music": { + "id": "7123456789", + "title": "原声", + "author": "小鱼小鱼🐟", + "play_url": { + "url_list": ["https://..."] + } + } + } + } +} +``` + +#### 响应字段说明 + +##### 根字段 + +| 字段名 | 类型 | 说明 | +|--------|------|------| +| code | integer | 状态码,200 表示成功 | +| router | string | 路由信息 | +| data | object | 视频详细数据 | + +##### data.aweme_detail 核心字段 + +| 字段路径 | 类型 | 说明 | 示例值 | +|----------|------|------|--------| +| **基本信息** | +| aweme_id | string | 视频唯一 ID | "7576474555553706981" | +| desc | string | 视频描述/标题 | "#美女 #漫展 #cos" | +| create_time | integer | 创建时间(Unix 时间戳) | 1764035452 | +| duration | integer | 视频时长(毫秒) | 7315 | +| **作者信息 (author)** | +| author.uid | string | 作者用户 ID | "7558808540429747259" | +| author.nickname | string | 作者昵称 | "小鱼小鱼🐟" | +| author.signature | string | 作者签名/简介 | "永做真我!" | +| author.avatar_thumb.url_list | array | 作者头像 URL 列表 | ["https://..."] | +| author.follower_count | integer | 粉丝数 | 2325 | +| author.total_favorited | integer | 获赞总数 | 22399 | +| author.sec_uid | string | 作者安全 ID(用于分享) | "MS4wLjABAAAA..." | +| author.short_id | string | 作者短 ID | "24435946694" | +| author.unique_id | string | 作者唯一 ID(抖音号) | "24435946694" | +| **视频信息 (video)** | +| video.play_addr.url_list | array | **视频播放地址列表(重要)** | ["https://v5-hl..."] | +| video.cover_original_scale.url_list | array | **封面图 URL 列表(原始尺寸)** | ["https://p3-pc..."] | +| video.duration | integer | 视频时长(毫秒) | 7315 | +| video.width | integer | 视频宽度(像素) | 1080 | +| video.height | integer | 视频高度(像素) | 1920 | +| **统计数据 (statistics)** | +| statistics.digg_count | integer | 点赞数 | 12345 | +| statistics.comment_count | integer | 评论数 | 678 | +| statistics.share_count | integer | 分享数 | 90 | +| statistics.play_count | integer | 播放数 | 567890 | +| **音乐信息 (music)** | +| music.id | string | 音乐 ID | "7123456789" | +| music.title | string | 音乐标题 | "原声" | +| music.author | string | 音乐作者 | "小鱼小鱼🐟" | +| music.play_url.url_list | array | 音乐播放地址列表 | ["https://..."] | + +##### 关键字段提取路径 + +**最常用的字段提取代码**: + +```python +# 提取视频信息 +aweme_detail = response.json()["data"]["aweme_detail"] + +# 作者信息 +author_name = aweme_detail["author"]["nickname"] +author_avatar = aweme_detail["author"]["avatar_thumb"]["url_list"][0] +author_uid = aweme_detail["author"]["uid"] + +# 视频描述 +video_desc = aweme_detail["desc"] + +# 封面图(取第一个) +cover_url = aweme_detail["video"]["cover_original_scale"]["url_list"][0] + +# 视频播放地址(取第一个,通常是最优质的) +video_url = aweme_detail["video"]["play_addr"]["url_list"][0] + +# 视频时长(毫秒) +duration_ms = aweme_detail["duration"] +duration_sec = duration_ms / 1000 # 转换为秒 + +# 统计数据 +likes = aweme_detail["statistics"]["digg_count"] +comments = aweme_detail["statistics"]["comment_count"] +shares = aweme_detail["statistics"]["share_count"] +``` + +--- + +### 3. 下载视频文件 + +直接从抖音分享链接下载视频文件(无水印)。 + +#### 请求信息 + +- **接口地址**: `/api/download` +- **请求方法**: `GET` +- **请求参数**: + +| 参数名 | 类型 | 必填 | 说明 | 示例 | +|--------|------|------|------|------| +| url | string | 是 | 抖音分享链接 | `https://v.douyin.com/FLNGa7kXvMY/` | + +#### 请求示例 + +```bash +GET https://dy.zoomcaronline.net/api/download?url=https://v.douyin.com/FLNGa7kXvMY/ +``` + +```javascript +// JavaScript 示例 - 下载视频 +const response = await fetch('https://dy.zoomcaronline.net/api/download?url=https://v.douyin.com/FLNGa7kXvMY/'); +const blob = await response.blob(); +const url = window.URL.createObjectURL(blob); +const a = document.createElement('a'); +a.href = url; +a.download = 'douyin_video.mp4'; +a.click(); +``` + +```python +# Python 示例 - 下载视频 +import requests + +url = "https://dy.zoomcaronline.net/api/download" +params = {"url": "https://v.douyin.com/FLNGa7kXvMY/"} +response = requests.get(url, params=params) + +# 保存视频文件 +with open("douyin_video.mp4", "wb") as f: + f.write(response.content) +``` + +```php +// PHP 示例 - 下载视频 + +``` + +#### 响应信息 + +- **Content-Type**: `video/mp4` +- **响应体**: 视频文件的二进制数据 +- **文件格式**: MP4 +- **特点**: 无水印 + +--- + +## 完整使用流程 + +### 方式一:两步获取(推荐用于需要视频信息的场景) + +```python +import requests + +# 步骤 1: 提取视频 ID +share_url = "https://v.douyin.com/FLNGa7kXvMY/" +response = requests.get( + "https://dy.zoomcaronline.net/api/douyin/web/get_aweme_id", + params={"url": share_url} +) +aweme_id = response.json()["data"] + +# 步骤 2: 获取视频详细信息 +response = requests.get( + "https://dy.zoomcaronline.net/api/douyin/web/fetch_one_video", + params={"aweme_id": aweme_id} +) +video_info = response.json() + +# 提取关键信息 +author_name = video_info["data"]["aweme_detail"]["author"]["nickname"] +video_desc = video_info["data"]["aweme_detail"]["desc"] +cover_url = video_info["data"]["aweme_detail"]["video"]["cover_original_scale"]["url_list"][0] +video_url = video_info["data"]["aweme_detail"]["video"]["play_addr"]["url_list"][0] + +print(f"作者: {author_name}") +print(f"描述: {video_desc}") +print(f"封面: {cover_url}") +print(f"视频: {video_url}") +``` + +### 方式二:直接下载(推荐用于只需要视频文件的场景) + +```python +import requests + +# 直接下载视频 +share_url = "https://v.douyin.com/FLNGa7kXvMY/" +response = requests.get( + "https://dy.zoomcaronline.net/api/download", + params={"url": share_url} +) + +# 保存视频 +with open("douyin_video.mp4", "wb") as f: + f.write(response.content) + +print("视频下载完成") +``` + +--- + +## 抖音分享链接格式 + +抖音分享链接通常有以下几种格式: + +1. **短链接格式**(最常见): + ``` + https://v.douyin.com/FLNGa7kXvMY/ + ``` + +2. **完整链接格式**: + ``` + https://www.douyin.com/video/7576474555553706981 + ``` + +3. **分享文本格式**(包含额外文字): + ``` + 2.05 10/31 F@H.VY 错也行 错过也行 过去就行 # 明日方舟 https://v.douyin.com/FLNGa7kXvMY/ 复制此链接,打开Dou音搜索,直接观看视频! + ``` + +**提取建议**: 使用正则表达式提取链接部分: +```python +import re + +text = "2.05 10/31 F@H.VY https://v.douyin.com/FLNGa7kXvMY/ 复制此链接" +pattern = r'https?://v\.douyin\.com/[A-Za-z0-9]+' +match = re.search(pattern, text) +if match: + url = match.group(0) + print(url) # https://v.douyin.com/FLNGa7kXvMY/ +``` + +--- + +## 错误处理 + +### 常见错误码 + +| 错误码 | 说明 | 解决方案 | +|--------|------|----------| +| 200 | 成功 | - | +| 400 | 请求参数错误 | 检查 URL 参数是否正确 | +| 404 | 视频不存在 | 视频可能已被删除或链接无效 | +| 500 | 服务器错误 | 稍后重试 | + +### 错误处理示例 + +```python +import requests + +def get_douyin_video_info(share_url): + try: + # 提取视频 ID + response = requests.get( + "https://dy.zoomcaronline.net/api/douyin/web/get_aweme_id", + params={"url": share_url}, + timeout=30 + ) + + if response.status_code != 200: + return {"error": f"HTTP {response.status_code}"} + + result = response.json() + if result.get("code") != 200: + return {"error": result.get("msg", "未知错误")} + + aweme_id = result.get("data") + if not aweme_id: + return {"error": "未找到视频 ID"} + + # 获取视频信息 + response = requests.get( + "https://dy.zoomcaronline.net/api/douyin/web/fetch_one_video", + params={"aweme_id": aweme_id}, + timeout=30 + ) + + if response.status_code != 200: + return {"error": f"HTTP {response.status_code}"} + + return response.json() + + except requests.Timeout: + return {"error": "请求超时"} + except requests.RequestException as e: + return {"error": f"网络错误: {str(e)}"} + except Exception as e: + return {"error": f"未知错误: {str(e)}"} + +# 使用示例 +result = get_douyin_video_info("https://v.douyin.com/FLNGa7kXvMY/") +if "error" in result: + print(f"错误: {result['error']}") +else: + print("成功获取视频信息") +``` + +--- + +## 注意事项 + +1. **请求频率限制**: 建议控制请求频率,避免频繁请求导致 IP 被封禁 +2. **超时设置**: 下载视频时建议设置较长的超时时间(建议 60-600 秒) +3. **视频大小**: 下载的视频文件大小通常在 500KB - 50MB 之间 +4. **链接有效期**: 从 `play_addr.url_list` 获取的视频链接可能有时效性 +5. **无水印**: 通过 `/api/download` 接口下载的视频为无水印版本 +6. **SSL 证书**: 如果遇到 SSL 证书验证问题,可以临时禁用验证(不推荐生产环境) + +--- + +## 应用场景 + +1. **视频下载工具**: 开发抖音视频下载器 +2. **内容聚合平台**: 聚合抖音视频内容 +3. **数据分析**: 分析视频数据、作者信息等 +4. **自动化机器人**: 微信/QQ 机器人自动解析抖音链接 +5. **视频备份**: 批量备份抖音视频 +6. **内容审核**: 获取视频信息进行内容审核 + +--- + +## 技术支持 + +- **API 地址**: `https://dy.zoomcaronline.net` +- **更新日期**: 2025-11-26 +- **版本**: v1.0 + +--- + +## 附录:完整代码示例 + +### Node.js 示例 + +```javascript +const axios = require('axios'); +const fs = require('fs'); + +async function downloadDouyinVideo(shareUrl) { + try { + // 1. 提取视频 ID + const idResponse = await axios.get('https://dy.zoomcaronline.net/api/douyin/web/get_aweme_id', { + params: { url: shareUrl } + }); + + const awemeId = idResponse.data.data; + console.log('视频 ID:', awemeId); + + // 2. 获取视频信息 + const infoResponse = await axios.get('https://dy.zoomcaronline.net/api/douyin/web/fetch_one_video', { + params: { aweme_id: awemeId } + }); + + const videoInfo = infoResponse.data.data.aweme_detail; + console.log('作者:', videoInfo.author.nickname); + console.log('描述:', videoInfo.desc); + + // 3. 下载视频 + const videoResponse = await axios.get('https://dy.zoomcaronline.net/api/download', { + params: { url: shareUrl }, + responseType: 'arraybuffer' + }); + + fs.writeFileSync('douyin_video.mp4', videoResponse.data); + console.log('视频下载完成'); + + } catch (error) { + console.error('错误:', error.message); + } +} + +// 使用示例 +downloadDouyinVideo('https://v.douyin.com/FLNGa7kXvMY/'); +``` + +### Java 示例 + +```java +import java.io.*; +import java.net.*; +import org.json.*; + +public class DouyinDownloader { + + public static void main(String[] args) throws Exception { + String shareUrl = "https://v.douyin.com/FLNGa7kXvMY/"; + downloadVideo(shareUrl); + } + + public static void downloadVideo(String shareUrl) throws Exception { + // 1. 提取视频 ID + String idUrl = "https://dy.zoomcaronline.net/api/douyin/web/get_aweme_id?url=" + + URLEncoder.encode(shareUrl, "UTF-8"); + String idResponse = sendGetRequest(idUrl); + JSONObject idJson = new JSONObject(idResponse); + String awemeId = idJson.getString("data"); + + System.out.println("视频 ID: " + awemeId); + + // 2. 获取视频信息 + String infoUrl = "https://dy.zoomcaronline.net/api/douyin/web/fetch_one_video?aweme_id=" + awemeId; + String infoResponse = sendGetRequest(infoUrl); + JSONObject infoJson = new JSONObject(infoResponse); + JSONObject videoInfo = infoJson.getJSONObject("data").getJSONObject("aweme_detail"); + + System.out.println("作者: " + videoInfo.getJSONObject("author").getString("nickname")); + System.out.println("描述: " + videoInfo.getString("desc")); + + // 3. 下载视频 + String downloadUrl = "https://dy.zoomcaronline.net/api/download?url=" + + URLEncoder.encode(shareUrl, "UTF-8"); + downloadFile(downloadUrl, "douyin_video.mp4"); + + System.out.println("视频下载完成"); + } + + private static String sendGetRequest(String urlString) throws Exception { + URL url = new URL(urlString); + HttpURLConnection conn = (HttpURLConnection) url.openConnection(); + conn.setRequestMethod("GET"); + + BufferedReader in = new BufferedReader(new InputStreamReader(conn.getInputStream())); + String inputLine; + StringBuilder response = new StringBuilder(); + + while ((inputLine = in.readLine()) != null) { + response.append(inputLine); + } + in.close(); + + return response.toString(); + } + + private static void downloadFile(String urlString, String fileName) throws Exception { + URL url = new URL(urlString); + HttpURLConnection conn = (HttpURLConnection) url.openConnection(); + + InputStream in = conn.getInputStream(); + FileOutputStream out = new FileOutputStream(fileName); + + byte[] buffer = new byte[4096]; + int bytesRead; + while ((bytesRead = in.read(buffer)) != -1) { + out.write(buffer, 0, bytesRead); + } + + out.close(); + in.close(); + } +} +``` + +--- + +**文档版本**: v1.0 +**最后更新**: 2025-11-26 +**维护者**: ShiHao diff --git a/开发文档/自部署解析.postman_collection.json b/开发文档/自部署解析.postman_collection.json new file mode 100644 index 0000000..cb979b4 --- /dev/null +++ b/开发文档/自部署解析.postman_collection.json @@ -0,0 +1,181 @@ +{ + "info": { + "_postman_id": "19126577-9e30-48ce-ad50-bea77c5f7266", + "name": "自部署解析", + "schema": "https://schema.getpostman.com/json/collection/v2.1.0/collection.json", + "_exporter_id": "49242368", + "_collection_link": "https://lixin0229-2646365.postman.co/workspace/shihao's-Workspace~249f47cc-12a0-4152-8c64-d21cf5552a6c/collection/49242368-19126577-9e30-48ce-ad50-bea77c5f7266?action=share&source=collection_link&creator=49242368" + }, + "item": [ + { + "name": "抖音自部署提取视频ID", + "request": { + "method": "GET", + "header": [], + "url": { + "raw": "https://dy.zoomcaronline.net/api/douyin/web/get_aweme_id?url=https://v.douyin.com/KOxslyaGzCY/ 07/10 ZZz:/ L@j.CH", + "protocol": "https", + "host": [ + "dy", + "zoomcaronline", + "net" + ], + "path": [ + "api", + "douyin", + "web", + "get_aweme_id" + ], + "query": [ + { + "key": "url", + "value": "https://v.douyin.com/KOxslyaGzCY/ 07/10 ZZz:/ L@j.CH" + } + ] + } + }, + "response": [] + }, + { + "name": "抖音自部署获取视频数据", + "request": { + "method": "GET", + "header": [], + "url": { + "raw": "https://dy.zoomcaronline.net/api/douyin/web/fetch_one_video?aweme_id=7576474555553706981", + "protocol": "https", + "host": [ + "dy", + "zoomcaronline", + "net" + ], + "path": [ + "api", + "douyin", + "web", + "fetch_one_video" + ], + "query": [ + { + "key": "aweme_id", + "value": "7576474555553706981" + } + ] + } + }, + "response": [] + }, + { + "name": "抖音自部署视频下载", + "request": { + "method": "GET", + "header": [], + "url": { + "raw": "https://dy.zoomcaronline.net/api/download?url=https://v.douyin.com/KOxslyaGzCY/ 07/10 ZZz:/ L@j.CH", + "protocol": "https", + "host": [ + "dy", + "zoomcaronline", + "net" + ], + "path": [ + "api", + "download" + ], + "query": [ + { + "key": "url", + "value": "https://v.douyin.com/KOxslyaGzCY/ 07/10 ZZz:/ L@j.CH" + } + ] + } + }, + "response": [] + }, + { + "name": "tiktok自部署提取视频ID", + "request": { + "method": "GET", + "header": [], + "url": { + "raw": "https://dy.zoomcaronline.net/api/tiktok/web/get_aweme_id?url", + "protocol": "https", + "host": [ + "dy", + "zoomcaronline", + "net" + ], + "path": [ + "api", + "tiktok", + "web", + "get_aweme_id" + ], + "query": [ + { + "key": "url", + "value": null + } + ] + } + }, + "response": [] + }, + { + "name": "tiktok自部署获取视频数据", + "request": { + "method": "GET", + "header": [], + "url": { + "raw": "https://dy.zoomcaronline.net/api/tiktok/app/fetch_one_video?aweme_id=7556964678446959879", + "protocol": "https", + "host": [ + "dy", + "zoomcaronline", + "net" + ], + "path": [ + "api", + "tiktok", + "app", + "fetch_one_video" + ], + "query": [ + { + "key": "aweme_id", + "value": "7556964678446959879" + } + ] + } + }, + "response": [] + }, + { + "name": "tiktok自部署视频下载", + "request": { + "method": "GET", + "header": [], + "url": { + "raw": "https://dy.zoomcaronline.net/api/download?url=https://www.tiktok.com/@janice_yoong/video/7556964678446959879?is_from_webapp=1%26sender_device=pc", + "protocol": "https", + "host": [ + "dy", + "zoomcaronline", + "net" + ], + "path": [ + "api", + "download" + ], + "query": [ + { + "key": "url", + "value": "https://www.tiktok.com/@janice_yoong/video/7556964678446959879?is_from_webapp=1&sender_device=pc" + } + ] + } + }, + "response": [] + } + ] +} \ No newline at end of file diff --git a/开发文档/默认SMPT.md b/开发文档/默认SMPT.md new file mode 100644 index 0000000..63b27d1 --- /dev/null +++ b/开发文档/默认SMPT.md @@ -0,0 +1,4 @@ +# Host: smtp.resend.com +# Port: 25, 465, 587, 2465, or 2587 +# Username: resend +# Password: re_TSgVYFuT_HG9g3Mu6P4i6LEdYgqVpW6S5 diff --git a/部署指南.md b/部署指南.md new file mode 100644 index 0000000..731c459 --- /dev/null +++ b/部署指南.md @@ -0,0 +1,201 @@ +# 短视频解析平台 - 部署指南 + +## 环境要求 + +- Python 3.8+ +- MySQL 5.7+ +- Redis 5.0+ + +## 部署步骤 + +### 1. 安装依赖 + +```bash +pip install -r requirements.txt +``` + +### 2. 配置环境变量 + +复制 `.env.example` 为 `.env` 并修改配置: + +```bash +cp .env.example .env +``` + +编辑 `.env` 文件: + +```env +# Flask配置 +SECRET_KEY=your-secret-key-here + +# 数据库配置 +DB_HOST=localhost +DB_PORT=3306 +DB_USER=root +DB_PASSWORD=your-password +DB_NAME=video_parser + +# Redis配置 +REDIS_HOST=localhost +REDIS_PORT=6379 +REDIS_DB=0 +REDIS_PASSWORD= + +# 应用配置 +MAX_CONCURRENT=3 +SESSION_LIFETIME=7200 +``` + +### 3. 创建数据库 + +```bash +mysql -u root -p +``` + +```sql +CREATE DATABASE video_parser CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci; +``` + +### 4. 导入数据库表结构 + +```bash +mysql -u root -p video_parser < database/schema.sql +``` + +### 5. 初始化管理员账号 + +```bash +python init_admin.py +``` + +默认管理员账号: +- 用户名:`shihao` +- 密码:`80012029Lz` + +### 6. 初始化解析接口数据 + +```bash +python init_data.py +``` + +### 7. 启动应用 + +```bash +python app.py +``` + +应用将在 `http://localhost:5000` 启动。 + +### 8. 访问应用 + +- 前台首页:`http://localhost:5000` +- 管理后台:`http://localhost:5000/admin/login` + +## 生产环境部署 + +### 使用 Gunicorn + +```bash +pip install gunicorn +gunicorn -w 4 -b 0.0.0.0:5000 app:app +``` + +### 使用 Nginx 反向代理 + +Nginx 配置示例: + +```nginx +server { + listen 80; + server_name your-domain.com; + + location / { + proxy_pass http://127.0.0.1:5000; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + + location /static { + alias /path/to/JieXi/static; + } +} +``` + +### 使用 Supervisor 管理进程 + +Supervisor 配置示例: + +```ini +[program:video_parser] +command=/path/to/venv/bin/gunicorn -w 4 -b 127.0.0.1:5000 app:app +directory=/path/to/JieXi +user=www-data +autostart=true +autorestart=true +redirect_stderr=true +stdout_logfile=/var/log/video_parser.log +``` + +## 定时任务 + +定时任务会在应用启动时自动启动,用于执行健康检查。 + +如果需要单独运行定时任务: + +```bash +python scheduler.py +``` + +## 常见问题 + +### 1. Redis 连接失败 + +确保 Redis 服务已启动: + +```bash +redis-server +``` + +### 2. 数据库连接失败 + +检查 `.env` 文件中的数据库配置是否正确。 + +### 3. 邮件发送失败 + +检查 SMTP 配置是否正确,可以在管理后台测试 SMTP 配置。 + +### 4. 解析接口调用失败 + +- 检查网络连接 +- 检查解析接口是否可用 +- 在管理后台查看接口健康状态 + +## 安全建议 + +1. 修改默认管理员密码 +2. 使用强密码策略 +3. 启用管理员 2FA 验证 +4. 定期备份数据库 +5. 使用 HTTPS 加密传输 +6. 限制管理后台访问 IP + +## 性能优化 + +1. 使用 Redis 缓存 +2. 调整并发数配置 +3. 使用 CDN 加速静态资源 +4. 数据库索引优化 +5. 定期清理过期日志 + +## 监控与维护 + +1. 定期查看健康检查日志 +2. 监控解析成功率 +3. 关注邮件告警 +4. 定期更新解析接口配置 +5. 备份重要数据 + +## 技术支持 + +如有问题,请查看项目文档或提交 Issue。