feat:重构UI
This commit is contained in:
@@ -1,3 +1,4 @@
|
||||
import asyncio
|
||||
import hashlib
|
||||
import json
|
||||
import redis.asyncio as redis
|
||||
@@ -11,7 +12,11 @@ class CacheService:
|
||||
self.redis: redis.Redis | None = None
|
||||
|
||||
async def connect(self):
|
||||
self.redis = redis.from_url(settings.redis_url)
|
||||
try:
|
||||
self.redis = redis.from_url(settings.redis_url)
|
||||
await self.redis.ping()
|
||||
except Exception:
|
||||
self.redis = None
|
||||
|
||||
async def disconnect(self):
|
||||
if self.redis:
|
||||
@@ -38,7 +43,12 @@ class CacheService:
|
||||
async def get(self, key: str) -> dict | None:
|
||||
if not self.redis:
|
||||
return None
|
||||
data = await self.redis.get(key)
|
||||
try:
|
||||
data = await self.redis.get(key)
|
||||
except asyncio.CancelledError:
|
||||
return None
|
||||
except Exception:
|
||||
return None
|
||||
if data:
|
||||
return json.loads(data)
|
||||
return None
|
||||
@@ -47,7 +57,12 @@ class CacheService:
|
||||
if not self.redis:
|
||||
return
|
||||
ttl = ttl or settings.cache_ttl_seconds
|
||||
await self.redis.set(key, json.dumps(value), ex=ttl)
|
||||
try:
|
||||
await self.redis.set(key, json.dumps(value), ex=ttl)
|
||||
except asyncio.CancelledError:
|
||||
return
|
||||
except Exception:
|
||||
return
|
||||
|
||||
|
||||
cache_service = CacheService()
|
||||
|
||||
@@ -1,6 +1,11 @@
|
||||
import httpx
|
||||
import json
|
||||
from typing import AsyncGenerator
|
||||
from sqlalchemy import select
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
from ..core import get_settings
|
||||
from ..core.database import AsyncSessionLocal
|
||||
from ..models import AIProvider
|
||||
|
||||
settings = get_settings()
|
||||
|
||||
@@ -16,6 +21,29 @@ class LLMService:
|
||||
if self.client:
|
||||
await self.client.aclose()
|
||||
|
||||
async def _get_provider(self) -> dict:
|
||||
"""从数据库获取默认的 AI Provider 配置"""
|
||||
async with AsyncSessionLocal() as session:
|
||||
result = await session.execute(
|
||||
select(AIProvider).where(
|
||||
AIProvider.is_active == True,
|
||||
AIProvider.is_default == True
|
||||
)
|
||||
)
|
||||
provider = result.scalar_one_or_none()
|
||||
if provider:
|
||||
return {
|
||||
"api_key": provider.api_key,
|
||||
"base_url": provider.base_url or "https://api.openai.com/v1",
|
||||
"model": provider.model_id,
|
||||
}
|
||||
# 回退到环境变量配置
|
||||
return {
|
||||
"api_key": settings.llm_api_key,
|
||||
"base_url": settings.llm_base_url or "https://api.openai.com/v1",
|
||||
"model": settings.llm_model,
|
||||
}
|
||||
|
||||
def _build_prompt(
|
||||
self,
|
||||
source_text: str,
|
||||
@@ -44,14 +72,14 @@ class LLMService:
|
||||
if not self.client:
|
||||
raise RuntimeError("LLM client not initialized")
|
||||
|
||||
provider = await self._get_provider()
|
||||
messages = self._build_prompt(source_text, source_lang, target_lang, style)
|
||||
base_url = settings.llm_base_url or "https://api.openai.com/v1"
|
||||
|
||||
response = await self.client.post(
|
||||
f"{base_url}/chat/completions",
|
||||
headers={"Authorization": f"Bearer {settings.llm_api_key}"},
|
||||
f"{provider['base_url']}/chat/completions",
|
||||
headers={"Authorization": f"Bearer {provider['api_key']}"},
|
||||
json={
|
||||
"model": settings.llm_model,
|
||||
"model": provider["model"],
|
||||
"messages": messages,
|
||||
"temperature": settings.default_temperature,
|
||||
},
|
||||
@@ -70,15 +98,15 @@ class LLMService:
|
||||
if not self.client:
|
||||
raise RuntimeError("LLM client not initialized")
|
||||
|
||||
provider = await self._get_provider()
|
||||
messages = self._build_prompt(source_text, source_lang, target_lang, style)
|
||||
base_url = settings.llm_base_url or "https://api.openai.com/v1"
|
||||
|
||||
async with self.client.stream(
|
||||
"POST",
|
||||
f"{base_url}/chat/completions",
|
||||
headers={"Authorization": f"Bearer {settings.llm_api_key}"},
|
||||
f"{provider['base_url']}/chat/completions",
|
||||
headers={"Authorization": f"Bearer {provider['api_key']}"},
|
||||
json={
|
||||
"model": settings.llm_model,
|
||||
"model": provider["model"],
|
||||
"messages": messages,
|
||||
"temperature": settings.default_temperature,
|
||||
"stream": True,
|
||||
@@ -89,7 +117,6 @@ class LLMService:
|
||||
data = line[6:]
|
||||
if data == "[DONE]":
|
||||
break
|
||||
import json
|
||||
chunk = json.loads(data)
|
||||
delta = chunk["choices"][0].get("delta", {})
|
||||
if "content" in delta:
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import asyncio
|
||||
import time
|
||||
import redis.asyncio as redis
|
||||
from ..core import get_settings
|
||||
@@ -10,7 +11,11 @@ class RateLimitService:
|
||||
self.redis: redis.Redis | None = None
|
||||
|
||||
async def connect(self):
|
||||
self.redis = redis.from_url(settings.redis_url)
|
||||
try:
|
||||
self.redis = redis.from_url(settings.redis_url)
|
||||
await self.redis.ping()
|
||||
except Exception:
|
||||
self.redis = None
|
||||
|
||||
async def disconnect(self):
|
||||
if self.redis:
|
||||
@@ -23,9 +28,14 @@ class RateLimitService:
|
||||
now = int(time.time())
|
||||
window_key = f"rl:{key}:{now // 60}"
|
||||
|
||||
count = await self.redis.incr(window_key)
|
||||
if count == 1:
|
||||
await self.redis.expire(window_key, 60)
|
||||
try:
|
||||
count = await self.redis.incr(window_key)
|
||||
if count == 1:
|
||||
await self.redis.expire(window_key, 60)
|
||||
except asyncio.CancelledError:
|
||||
return True
|
||||
except Exception:
|
||||
return True
|
||||
|
||||
return count <= limit
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import asyncio
|
||||
import json
|
||||
from datetime import datetime
|
||||
import redis.asyncio as redis
|
||||
@@ -30,21 +31,26 @@ class StatsService:
|
||||
):
|
||||
if not self.redis:
|
||||
return
|
||||
now = datetime.utcnow()
|
||||
date = now.strftime("%Y-%m-%d")
|
||||
hour = now.hour
|
||||
key = self._get_key(provider_id, date, hour)
|
||||
try:
|
||||
now = datetime.utcnow()
|
||||
date = now.strftime("%Y-%m-%d")
|
||||
hour = now.hour
|
||||
key = self._get_key(provider_id, date, hour)
|
||||
|
||||
pipe = self.redis.pipeline()
|
||||
pipe.hincrby(key, "request_count", 1)
|
||||
pipe.hincrby(key, "input_tokens", input_tokens)
|
||||
pipe.hincrby(key, "output_tokens", output_tokens)
|
||||
if cached:
|
||||
pipe.hincrby(key, "cached_count", 1)
|
||||
if error:
|
||||
pipe.hincrby(key, "error_count", 1)
|
||||
pipe.expire(key, 86400 * 30)
|
||||
await pipe.execute()
|
||||
pipe = self.redis.pipeline()
|
||||
pipe.hincrby(key, "request_count", 1)
|
||||
pipe.hincrby(key, "input_tokens", input_tokens)
|
||||
pipe.hincrby(key, "output_tokens", output_tokens)
|
||||
if cached:
|
||||
pipe.hincrby(key, "cached_count", 1)
|
||||
if error:
|
||||
pipe.hincrby(key, "error_count", 1)
|
||||
pipe.expire(key, 86400 * 30)
|
||||
await pipe.execute()
|
||||
except asyncio.CancelledError:
|
||||
return
|
||||
except Exception:
|
||||
return
|
||||
|
||||
async def get_stats(self, provider_id: int, date: str) -> dict:
|
||||
if not self.redis:
|
||||
@@ -61,7 +67,12 @@ class StatsService:
|
||||
|
||||
for hour in range(24):
|
||||
key = self._get_key(provider_id, date, hour)
|
||||
data = await self.redis.hgetall(key)
|
||||
try:
|
||||
data = await self.redis.hgetall(key)
|
||||
except asyncio.CancelledError:
|
||||
return result
|
||||
except Exception:
|
||||
data = {}
|
||||
hourly = {
|
||||
"hour": hour,
|
||||
"request_count": int(data.get(b"request_count", 0)),
|
||||
@@ -79,27 +90,37 @@ class StatsService:
|
||||
async def get_rpm_tpm(self, provider_id: int) -> dict:
|
||||
if not self.redis:
|
||||
return {"rpm": 0, "tpm": 0}
|
||||
now = datetime.utcnow()
|
||||
minute_key = f"rpm:{provider_id}:{now.strftime('%Y-%m-%d-%H-%M')}"
|
||||
rpm = int(await self.redis.get(minute_key) or 0)
|
||||
try:
|
||||
now = datetime.utcnow()
|
||||
minute_key = f"rpm:{provider_id}:{now.strftime('%Y-%m-%d-%H-%M')}"
|
||||
rpm = int(await self.redis.get(minute_key) or 0)
|
||||
|
||||
tpm_key = f"tpm:{provider_id}:{now.strftime('%Y-%m-%d-%H-%M')}"
|
||||
tpm = int(await self.redis.get(tpm_key) or 0)
|
||||
return {"rpm": rpm, "tpm": tpm}
|
||||
tpm_key = f"tpm:{provider_id}:{now.strftime('%Y-%m-%d-%H-%M')}"
|
||||
tpm = int(await self.redis.get(tpm_key) or 0)
|
||||
return {"rpm": rpm, "tpm": tpm}
|
||||
except asyncio.CancelledError:
|
||||
return {"rpm": 0, "tpm": 0}
|
||||
except Exception:
|
||||
return {"rpm": 0, "tpm": 0}
|
||||
|
||||
async def incr_rpm_tpm(self, provider_id: int, tokens: int):
|
||||
if not self.redis:
|
||||
return
|
||||
now = datetime.utcnow()
|
||||
minute_key = f"rpm:{provider_id}:{now.strftime('%Y-%m-%d-%H-%M')}"
|
||||
tpm_key = f"tpm:{provider_id}:{now.strftime('%Y-%m-%d-%H-%M')}"
|
||||
try:
|
||||
now = datetime.utcnow()
|
||||
minute_key = f"rpm:{provider_id}:{now.strftime('%Y-%m-%d-%H-%M')}"
|
||||
tpm_key = f"tpm:{provider_id}:{now.strftime('%Y-%m-%d-%H-%M')}"
|
||||
|
||||
pipe = self.redis.pipeline()
|
||||
pipe.incr(minute_key)
|
||||
pipe.expire(minute_key, 120)
|
||||
pipe.incrby(tpm_key, tokens)
|
||||
pipe.expire(tpm_key, 120)
|
||||
await pipe.execute()
|
||||
pipe = self.redis.pipeline()
|
||||
pipe.incr(minute_key)
|
||||
pipe.expire(minute_key, 120)
|
||||
pipe.incrby(tpm_key, tokens)
|
||||
pipe.expire(tpm_key, 120)
|
||||
await pipe.execute()
|
||||
except asyncio.CancelledError:
|
||||
return
|
||||
except Exception:
|
||||
return
|
||||
|
||||
|
||||
stats_service = StatsService()
|
||||
|
||||
Reference in New Issue
Block a user