|
import asyncio |
|
import gc |
|
import httpx |
|
import logging |
|
import os |
|
import psutil |
|
import random |
|
import threading |
|
import time |
|
import traceback |
|
import weakref |
|
from dataclasses import dataclass, field |
|
from typing import Any, Optional, Tuple, List, Dict |
|
|
|
import gradio as gr |
|
|
|
|
|
|
|
HTTP_STATUS_OK = 200 |
|
HTTP_STATUS_CENSORED = 451 |
|
|
|
|
|
MAX_SEED = 2147483647 |
|
MAX_IMAGE_SIZE = 2048 |
|
MIN_IMAGE_SIZE = 256 |
|
MEMORY_CHECK_INTERVAL = 300 |
|
MEMORY_THRESHOLD_PERCENT = 80 |
|
DEBUG_MODE = os.environ.get("DEBUG_MODE", "false").lower() == "true" |
|
|
|
|
|
API_TOKEN = os.environ.get("API_TOKEN", "") |
|
if not API_TOKEN: |
|
raise ValueError("环境变量中未设置API_TOKEN") |
|
|
|
|
|
CHAT_WEBHOOK_URL = os.environ.get("CHAT_WEBHOOK_URL", "") |
|
|
|
|
|
DISCORD_LINK = os.environ.get("DISCORD_LINK", "https://discord.com/invite/AtRtbe9W8w") |
|
APP_INDEX_LINK = os.environ.get("APP_INDEX_LINK", "https://app.nieta.art/") |
|
APP_INDEX_ICON = "https://cdn-avatars.huggingface.co/v1/production/uploads/62be651a1e22ec8427aa7096/XQEUF5niIZXQbiOOxn8rQ.jpeg" |
|
|
|
|
|
MODEL_CONFIGS = { |
|
"ep6": "0622.pth", |
|
} |
|
|
|
|
|
HTTP_LIMITS = httpx.Limits( |
|
max_keepalive_connections=5, max_connections=10, keepalive_expiry=30.0 |
|
) |
|
HTTP_TIMEOUT = httpx.Timeout(connect=30.0, read=300.0, write=30.0, pool=10.0) |
|
|
|
|
|
logging.basicConfig( |
|
level=logging.INFO, |
|
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", |
|
handlers=[ |
|
logging.StreamHandler(), |
|
logging.FileHandler("app.log", mode="a", encoding="utf-8"), |
|
], |
|
) |
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
|
|
@dataclass |
|
class LuminaConfig: |
|
"""Lumina模型配置""" |
|
|
|
model_name: str | None = None |
|
cfg: float | None = None |
|
step: int | None = None |
|
|
|
|
|
@dataclass |
|
class ImageGenerationConfig: |
|
"""图像生成配置""" |
|
|
|
prompts: list[dict[str, Any]] = field(default_factory=list) |
|
width: int = 1024 |
|
height: int = 1024 |
|
seed: int | None = None |
|
use_polish: bool = False |
|
is_lumina: bool = True |
|
lumina_config: LuminaConfig = field(default_factory=LuminaConfig) |
|
|
|
|
|
|
|
image_client = None |
|
polish_client = None |
|
|
|
|
|
example_titles = [ |
|
"Emotionless figure stares upward in heavy rain, drenched and defeated, set against a monochrome grey background.", |
|
"Glamorous woman in vintage cheongsam on dim staircase, exuding 1930s Shanghai elegance and ethereal light", |
|
"薄明のサイバーパンク都市で、銀灰獣人少女が近未来装備を纏い振り返るクールなアニメイラスト", |
|
"都会の孤独な銀髪女性のアニメ風イラスト", |
|
"仰视视角下,翘腿而坐的西装大佬俯视着你,气场冷峻危险", |
|
"黄昏天台,白裙女子独望天空,惆怅静谧,风中飘扬", |
|
] |
|
|
|
full_prompts = { |
|
example_titles[ |
|
0 |
|
]: "looking_up, rain, heavy_rain, emotionless, high_angle, perspective, face_focus, from_above, from_side, grey_background, empty_eyes, defeated_look, limited_palette, partially_colored, monochrome_background, wet_hair, wet_clothes, messy_hair A figure stands motionless in torrential rain, their face captured from a high-angle perspective with vacant eyes staring upward into the downpour. Sodden strands of hair cling to pale skin, soaked clothing blending into the monochrome grey void surrounding them. The limited color palette isolates faint blue tones in their parted lips against the washed-out background, their expression of utter defeat frozen as raindrops streak vertically through the frame. Sharp focus on their emotionless features contrasts with the blurred chaos of falling water, the composition's oppressive atmosphere amplified by the overhead view's psychological distance.", |
|
example_titles[ |
|
1 |
|
]: "Elegant anime-style illustration of a glamorous woman standing on a dimly lit staircase, bathed in soft, ethereal light. Medium full-body shot. She has short, wavy black hair adorned with ornate golden hairpieces and a delicate feather. Her makeup is refined, with deep red lipstick and sharp eyeliner accentuating her poised expression. She wears a dark green high-collared cheongsam embroidered with a vividly colored crane and landscape motif, blending traditional Chinese elements with a touch of Art Deco. Her black lace gloves, beaded pearl chains draped across her body, and intricate earrings enhance her luxurious, vintage aura. A fur stole rests over her arms, adding texture and sophistication. The setting is subtly blurred, with falling sparkles and filtered light through windowpanes creating a dreamy, nostalgic atmosphere. The composition evokes 1930s Shanghai elegance, rendered in a painterly digital style with high fashion sensibility and dramatic lighting contrast.", |
|
example_titles[ |
|
2 |
|
]: "薄明のサイバーパンク都市を背景にした、ハイブリッドな獣人少女のアニメイラスト。構図はバストアップ〜ウエストまでの斜めアングルで、キャラクターが観覧者にやや振り返るような姿勢をとっている。彼女の顔は精巧な狼のマズルと人間的な表情が融合しており、瞳はグリッチ風の蛍光ブルーに輝くオッドアイ。毛皮は柔らかな銀灰色で、頬や耳の先端には淡い紫のグラデーションが入り、毛並みは丁寧に一本ずつ描写されている。衣装は近未来的なデザインのボディスーツで、胸元には蛍光回路のような文様が走り、左肩には機械義肢風のメタルアーマーが装着されている。耳にはピアスとデータ受信装置が取り付けられており、首には布製のスカーフと識別タグが結ばれている。背景にはネオンに照らされた廃墟ビルとホログラム広告が揺らめき、空にはサイバー・オーロラが流れている。カラーリングはダークトーンに蛍光アクセントが効いたネオ東京風配色(黒、紫、青、サイバーグリーン)。アートスタイルはセルルックアニメ+ハードエッジグラデーション、ディテール豊かで表情と毛の質感に特に重点を置く。キャラクターの雰囲気はクールで孤独だが、どこか繊細な感情を秘めた印象。", |
|
example_titles[ |
|
3 |
|
]: "銀白色の長い髪を持つ若い女性が黒いコートを羽織り、都会の街中に佇む、メランコリックな雰囲気のアニメ風イラスト。背景にはグレースケールで描かれた、顔のない人々の群衆がぼかされて描かれ、彼女だけがシャープな焦点で際立っている。彼女は長いダークカラーのオーバーコートを着用し、物憂げで遠くを見つめるような表情を浮かべている。シーンは現代の都市の街角で設定されており、背景には建築的な要素が描かれている。配色はモノクロームで、黒・白・灰色を基調としたフィルム・ノワール風の雰囲気を醸し出している。アートスタイルはリアルな都市描写とアニメ的キャラクターデザインを融合させており、主人公の細部まで描き込まれた描写と、幽霊のように描かれた背景の人々とのコントラストによって、強い孤独感と疎外感を表現している。どんよりとした曇り空のライティングと都市背景が、重苦しい空気感を一層引き立てている。キャラクターデザインは、暗い服装に映える銀髪により、どこか神秘的または超自然的な要素を感じさせる。プロフェッショナルなデジタルイラストで、映画のような質感と強い感情的トーンを備え、現代社会における孤独や断絶のテーマを描いている。雰囲気としては、心理的または超自然的な物語の一場面を想起させる構成となっている。", |
|
example_titles[ |
|
4 |
|
]: "戏剧感十足的数字插画,描绘一位黑社会风格的长发美男子大佬坐姿翘着二郎腿,从大仰视视角俯视观者,气场强烈。他身穿剪裁锋利的深色西装,带有低调的细条纹图案,外套敞开,内衬深色马甲与整洁的衬衫,显得既优雅又危险。他的长发黑亮顺滑,自肩膀垂落,几缕发丝在环境光中微微反光。他一只手漫不经心地夹着一根燃着的雪茄,袅袅烟雾在昏暗中盘旋上升。他俯视下方,眉头微蹙,唇角紧绷,脸上带有一丝明显的不悦,神情冷峻。仰视视角放大了他身上的压迫感与支配力,整个人散发出沉稳却不可侵犯的气势。另一只手或搭在椅扶或搁在膝上,动作自信从容。背景为昏暗豪华的私人会客室或办公室,可见金属饰条、皮革沙发或昏黄灯光等元素。画面风格精致、电影感强烈,通过明暗对比、表情细节与强光源制造紧张与权威氛围。", |
|
example_titles[ |
|
5 |
|
]: "惆怅氛围的数字插画,描绘一位身穿白裙的女子独自站在天台上,在黄昏时分凝望天空。她的长发与白裙的裙摆在风中轻轻飘扬,画面充满静谧与动感的交织。她仰望着染上橙粉色霞光的天空,脸上露出若有所思、带着忧郁的神情,仿佛沉浸在回忆或思念之中。夕阳的余晖洒落在她身上与天台上,为画面增添一层柔和的金色光晕。她的站姿略带沉重,肩膀微微下垂,双手自然垂落或轻轻拽住裙摆,展现出内敛的情绪张力。天台背景简洁,设有低矮护栏、通风设备等结构,远处是模糊的城市天际线,进一步强化孤独氛围。风吹起她的发丝与裙摆,为静止的场景注入一丝淡淡的动态诗意。画风柔和写意,色彩渐变细腻,注重光影、空气感与人物情绪的融合,营造出宁静而感伤的黄昏场景。", |
|
} |
|
|
|
|
|
|
|
def validate_dimensions(width: int, height: int) -> tuple[int, int]: |
|
"""验证并调整图片尺寸""" |
|
width = max(MIN_IMAGE_SIZE, min(int(width), MAX_IMAGE_SIZE)) |
|
height = max(MIN_IMAGE_SIZE, min(int(height), MAX_IMAGE_SIZE)) |
|
width = (width // 32) * 32 |
|
height = (height // 32) * 32 |
|
return width, height |
|
|
|
|
|
def feishu_notify(message: str): |
|
"""发送飞书通知(异步非阻塞)""" |
|
threading.Thread(target=_send_feishu_notify, args=(message,)).start() |
|
|
|
|
|
def _send_feishu_notify(message: str): |
|
"""发送飞书通知的内部实现""" |
|
try: |
|
headers = { |
|
"Content-Type": "application/json", |
|
} |
|
content = { |
|
"msg_type": "text", |
|
"content": { |
|
"text": message, |
|
}, |
|
} |
|
|
|
with httpx.Client(timeout=10.0) as client: |
|
response = client.post(CHAT_WEBHOOK_URL, headers=headers, json=content) |
|
if response.status_code == 200: |
|
logger.info("飞书通知发送成功") |
|
else: |
|
logger.warning(f"飞书通知发送失败: {response.status_code}") |
|
except Exception as e: |
|
logger.error(f"发送飞书通知时出错: {str(e)}") |
|
|
|
|
|
def format_error_for_notification(error_type: str, error_message: str, traceback_str: str) -> str: |
|
"""格式化错误信息用于飞书通知""" |
|
return f""" |
|
🚨 应用错误警报 |
|
|
|
错误类型: {error_type} |
|
错误信息: {error_message} |
|
|
|
详细错误栈: |
|
{traceback_str} |
|
|
|
时间: {time.strftime('%Y-%m-%d %H:%M:%S')} |
|
""" |
|
|
|
|
|
class PolishClient: |
|
"""提示词润色客户端""" |
|
|
|
def __init__(self): |
|
self.x_token = API_TOKEN |
|
if not self.x_token: |
|
raise ValueError("环境变量中未设置API_TOKEN") |
|
|
|
self.url = "https://api.talesofai.cn/v3/gpt/dify/text-complete" |
|
self.headers = { |
|
"x-token": self.x_token, |
|
"Content-Type": "application/json", |
|
"x-nieta-app-version": "5.14.0", |
|
"x-platform": "nieta-app/web", |
|
} |
|
|
|
async def polish_text(self, input_text: str) -> str: |
|
"""润色文本""" |
|
payload = { |
|
"query": "", |
|
"response_mode": "blocking", |
|
"preset_key": "latitude://28|live|running", |
|
"inputs": {"query": input_text}, |
|
} |
|
|
|
async with httpx.AsyncClient(timeout=HTTP_TIMEOUT) as client: |
|
response = await client.post(self.url, headers=self.headers, json=payload) |
|
if response.status_code == HTTP_STATUS_OK: |
|
response_data = response.json() |
|
polished_text = response_data.get("answer", input_text) |
|
return polished_text.strip() |
|
else: |
|
logger.warning(f"润色API调用失败: {response.status_code}") |
|
return input_text |
|
|
|
|
|
async def polish_prompt(prompt: str) -> str: |
|
"""提示词润色函数 - 使用外部API润色""" |
|
global polish_client |
|
if polish_client is None: |
|
polish_client = PolishClient() |
|
|
|
try: |
|
logger.info(f"润色提示词: {prompt[:50]}...") |
|
polished_prompt = await polish_client.polish_text(prompt) |
|
logger.info("提示词润色完成") |
|
return polished_prompt |
|
except Exception as e: |
|
error_message = f"提示词润色异常: {str(e)}" |
|
traceback_str = traceback.format_exc() |
|
logger.error(error_message) |
|
|
|
|
|
notification_message = format_error_for_notification("PolishPromptError", error_message, traceback_str) |
|
feishu_notify(notification_message) |
|
|
|
return prompt |
|
|
|
|
|
|
|
class ImageClient: |
|
"""图像生成客户端""" |
|
|
|
def __init__(self) -> None: |
|
self.x_token = API_TOKEN |
|
if not self.x_token: |
|
raise ValueError("环境变量中未设置API_TOKEN") |
|
|
|
self.lumina_api_url = "https://ops.api.talesofai.cn/v3/make_image" |
|
self.lumina_task_status_url = ( |
|
"https://ops.api.talesofai.cn/v1/artifact/task/{task_uuid}" |
|
) |
|
self.max_polling_attempts = 100 |
|
self.polling_interval = 3.0 |
|
self.default_headers = { |
|
"Content-Type": "application/json", |
|
"x-platform": "nieta-app/web", |
|
"X-Token": self.x_token, |
|
} |
|
|
|
self._client_config = { |
|
"limits": HTTP_LIMITS, |
|
"timeout": HTTP_TIMEOUT, |
|
"headers": self.default_headers, |
|
} |
|
|
|
self._active_tasks = weakref.WeakSet() |
|
|
|
def _prepare_prompt_data( |
|
self, prompt: str, negative_prompt: str = "" |
|
) -> list[dict[str, Any]]: |
|
"""准备提示词数据""" |
|
prompts_data = [{"type": "freetext", "value": prompt, "weight": 1.0}] |
|
if negative_prompt: |
|
prompts_data.append( |
|
{"type": "freetext", "value": negative_prompt, "weight": -1.0} |
|
) |
|
prompts_data.append( |
|
{ |
|
"type": "elementum", |
|
"value": "b5edccfe-46a2-4a14-a8ff-f4d430343805", |
|
"uuid": "b5edccfe-46a2-4a14-a8ff-f4d430343805", |
|
"weight": 1.0, |
|
"name": "lumina1", |
|
"img_url": "https://oss.talesofai.cn/picture_s/1y7f53e6itfn_0.jpeg", |
|
"domain": "", |
|
"parent": "", |
|
"label": None, |
|
"sort_index": 0, |
|
"status": "IN_USE", |
|
"polymorphi_values": {}, |
|
"sub_type": None, |
|
} |
|
) |
|
return prompts_data |
|
|
|
def _build_payload(self, config: ImageGenerationConfig) -> dict[str, Any]: |
|
"""构建请求载荷""" |
|
payload = { |
|
"storyId": "", |
|
"jobType": "universal", |
|
"width": config.width, |
|
"height": config.height, |
|
"rawPrompt": config.prompts, |
|
"seed": config.seed, |
|
"meta": {"entrance": "PICTURE,PURE"}, |
|
"context_model_series": None, |
|
"negative_freetext": "", |
|
"advanced_translator": config.use_polish, |
|
} |
|
if config.is_lumina: |
|
client_args = {} |
|
client_args["seed"] = config.seed |
|
if config.lumina_config.model_name: |
|
client_args["ckpt_name"] = config.lumina_config.model_name |
|
if config.lumina_config.cfg is not None: |
|
client_args["cfg"] = str(config.lumina_config.cfg) |
|
if config.lumina_config.step is not None: |
|
client_args["steps"] = str(config.lumina_config.step) |
|
if client_args: |
|
payload["client_args"] = client_args |
|
return payload |
|
|
|
async def _poll_task_status(self, task_uuid: str) -> dict[str, Any]: |
|
"""轮询任务状态 - 优化内存使用和连接管理""" |
|
status_url = self.lumina_task_status_url.format(task_uuid=task_uuid) |
|
poll_timeout = httpx.Timeout(connect=10.0, read=30.0, write=10.0, pool=5.0) |
|
|
|
try: |
|
async with httpx.AsyncClient( |
|
limits=HTTP_LIMITS, timeout=poll_timeout, headers=self.default_headers |
|
) as client: |
|
for attempt in range(self.max_polling_attempts): |
|
try: |
|
response = await client.get(status_url) |
|
if response.status_code != HTTP_STATUS_OK: |
|
logger.warning(f"轮询失败 - 状态码: {response.status_code}") |
|
return { |
|
"success": False, |
|
"error": f"获取任务状态失败: {response.status_code} - {response.text[:200]}", |
|
} |
|
|
|
try: |
|
result = response.json() |
|
except Exception as e: |
|
logger.warning(f"JSON解析失败: {str(e)}") |
|
return { |
|
"success": False, |
|
"error": f"任务状态响应解析失败: {response.text[:500]}", |
|
} |
|
|
|
task_status = result.get("task_status") |
|
if task_status == "SUCCESS": |
|
artifacts = result.get("artifacts", []) |
|
if artifacts and len(artifacts) > 0: |
|
image_url = artifacts[0].get("url") |
|
if image_url: |
|
return {"success": True, "image_url": image_url} |
|
return { |
|
"success": False, |
|
"error": "无法从结果中提取图像URL", |
|
} |
|
elif task_status == "FAILURE": |
|
return { |
|
"success": False, |
|
"error": result.get("error", "任务执行失败"), |
|
} |
|
elif task_status == "ILLEGAL_IMAGE": |
|
return {"success": False, "error": "图片不合规"} |
|
elif task_status == "TIMEOUT": |
|
return {"success": False, "error": "任务超时"} |
|
|
|
await asyncio.sleep(self.polling_interval) |
|
if attempt % 10 == 0 and attempt > 0: |
|
gc.collect() |
|
logger.debug(f"轮询第{attempt}次,执行内存清理") |
|
|
|
except asyncio.TimeoutError: |
|
logger.warning(f"轮询超时 - 尝试 {attempt}") |
|
if attempt >= 3: |
|
return { |
|
"success": False, |
|
"error": "连接超时,请检查网络状况", |
|
} |
|
await asyncio.sleep(self.polling_interval * 2) |
|
continue |
|
except Exception as e: |
|
logger.warning(f"轮询异常 - 尝试 {attempt}: {str(e)}") |
|
if attempt >= 3: |
|
return {"success": False, "error": f"网络异常: {str(e)}"} |
|
await asyncio.sleep(self.polling_interval) |
|
continue |
|
|
|
except Exception as e: |
|
logger.error(f"轮询过程异常: {str(e)}") |
|
return {"success": False, "error": f"轮询过程异常: {str(e)}"} |
|
finally: |
|
gc.collect() |
|
|
|
return { |
|
"success": False, |
|
"error": "⏳ 生图任务超时(5分钟),服务器可能正在处理大量请求,请稍后重试", |
|
} |
|
|
|
async def generate_image( |
|
self, |
|
prompt: str, |
|
negative_prompt: str, |
|
seed: int, |
|
width: int, |
|
height: int, |
|
cfg: float, |
|
steps: int, |
|
model_name: str = "ep6", |
|
use_polish: bool = False, |
|
) -> tuple[str | None, str | None]: |
|
"""生成图片""" |
|
try: |
|
model_path = MODEL_CONFIGS.get(model_name, MODEL_CONFIGS["ep6"]) |
|
config = ImageGenerationConfig( |
|
prompts=self._prepare_prompt_data(prompt, negative_prompt), |
|
width=width, |
|
height=height, |
|
seed=seed, |
|
use_polish=use_polish, |
|
is_lumina=True, |
|
lumina_config=LuminaConfig(model_name=model_path, cfg=cfg, step=steps), |
|
) |
|
|
|
async with httpx.AsyncClient(**self._client_config) as client: |
|
payload = self._build_payload(config) |
|
if DEBUG_MODE: |
|
logger.debug(f"发送API请求到 {self.lumina_api_url}") |
|
logger.info(f"请求载荷: {payload}") |
|
|
|
try: |
|
response = await client.post(self.lumina_api_url, json=payload) |
|
except asyncio.TimeoutError: |
|
return None, "请求超时,请稍后重试" |
|
except Exception as e: |
|
logger.error(f"API请求异常: {str(e)}") |
|
return None, f"网络请求失败: {str(e)}" |
|
|
|
if DEBUG_MODE: |
|
logger.debug(f"API响应状态码: {response.status_code}") |
|
logger.debug(f"API响应内容: {response.text[:1000]}") |
|
|
|
if response.status_code == HTTP_STATUS_CENSORED: |
|
return None, "内容不合规" |
|
if response.status_code == 433: |
|
return None, "⏳ 服务器正忙,同时生成的图片数量已达上限,请稍后重试" |
|
if response.status_code != HTTP_STATUS_OK: |
|
return ( |
|
None, |
|
f"API请求失败: {response.status_code} - {response.text[:200]}", |
|
) |
|
|
|
content = response.text.strip() |
|
task_uuid = content.replace('"', "") |
|
|
|
if DEBUG_MODE: |
|
logger.debug(f"API返回UUID: {task_uuid}") |
|
|
|
if not task_uuid: |
|
return None, f"未获取到任务ID,API响应: {response.text[:200]}" |
|
|
|
result = await self._poll_task_status(task_uuid) |
|
if result["success"]: |
|
return result["image_url"], None |
|
else: |
|
return None, result["error"] |
|
|
|
except Exception as e: |
|
error_message = f"生成图片异常: {str(e)}" |
|
traceback_str = traceback.format_exc() |
|
logger.error(error_message) |
|
|
|
|
|
notification_message = format_error_for_notification("ImageGenerationError", error_message, traceback_str) |
|
feishu_notify(notification_message) |
|
|
|
return None, f"生成图片时发生错误: {str(e)}" |
|
finally: |
|
gc.collect() |
|
|
|
def cleanup(self): |
|
"""清理资源""" |
|
try: |
|
self._active_tasks.clear() |
|
gc.collect() |
|
logger.info("ImageClient资源清理完成") |
|
except Exception as e: |
|
logger.error(f"资源清理异常: {str(e)}") |
|
|
|
|
|
class MemoryMonitor: |
|
"""内存监控器""" |
|
|
|
def __init__(self): |
|
self.running = False |
|
self.monitor_thread = None |
|
|
|
def start_monitoring(self): |
|
"""启动内存监控""" |
|
if not self.running: |
|
self.running = True |
|
self.monitor_thread = threading.Thread( |
|
target=self._monitor_loop, daemon=True |
|
) |
|
self.monitor_thread.start() |
|
logger.info("内存监控已启动") |
|
|
|
def stop_monitoring(self): |
|
"""停止内存监控""" |
|
self.running = False |
|
if self.monitor_thread: |
|
self.monitor_thread.join(timeout=5) |
|
logger.info("内存监控已停止") |
|
|
|
def _monitor_loop(self): |
|
"""内存监控循环""" |
|
while self.running: |
|
try: |
|
process = psutil.Process() |
|
memory_info = process.memory_info() |
|
memory_mb = memory_info.rss / 1024 / 1024 |
|
memory_percent = process.memory_percent() |
|
|
|
if memory_percent > MEMORY_THRESHOLD_PERCENT: |
|
logger.warning( |
|
f"内存使用量过高: {memory_mb:.2f} MB ({memory_percent:.1f}%)" |
|
) |
|
gc.collect() |
|
logger.info("执行垃圾回收") |
|
else: |
|
logger.debug( |
|
f"当前内存使用: {memory_mb:.2f} MB ({memory_percent:.1f}%)" |
|
) |
|
|
|
time.sleep(MEMORY_CHECK_INTERVAL) |
|
except Exception as e: |
|
logger.error(f"内存监控异常: {str(e)}") |
|
time.sleep(MEMORY_CHECK_INTERVAL) |
|
|
|
|
|
|
|
def get_image_client() -> ImageClient: |
|
"""获取图片生成客户端实例 - 单例模式""" |
|
global image_client |
|
if image_client is None: |
|
try: |
|
image_client = ImageClient() |
|
logger.info("ImageClient初始化成功") |
|
except Exception as e: |
|
logger.error(f"ImageClient初始化失败: {e}") |
|
raise |
|
return image_client |
|
|
|
|
|
async def cleanup_resources(): |
|
"""清理应用资源""" |
|
global image_client, memory_monitor, polish_client |
|
try: |
|
memory_monitor.stop_monitoring() |
|
if image_client is not None: |
|
image_client.cleanup() |
|
image_client = None |
|
if polish_client is not None: |
|
polish_client = None |
|
gc.collect() |
|
try: |
|
process = psutil.Process() |
|
memory_mb = process.memory_info().rss / 1024 / 1024 |
|
logger.info(f"清理后内存使用: {memory_mb:.2f} MB") |
|
except Exception: |
|
pass |
|
logger.info("应用资源清理完成") |
|
except Exception as e: |
|
logger.error(f"资源清理异常: {str(e)}") |
|
|
|
|
|
async def infer( |
|
prompt_text, |
|
use_polish_val, |
|
seed_val, |
|
randomize_seed_val, |
|
width_val, |
|
height_val, |
|
cfg_val, |
|
steps_val, |
|
model_name_val, |
|
): |
|
"""推理函数 - 优化内存管理和错误处理""" |
|
try: |
|
logger.info(f"开始生成图像: {prompt_text[:50]}...") |
|
try: |
|
client = get_image_client() |
|
except Exception as e: |
|
logger.error(f"获取ImageClient失败: {str(e)}") |
|
raise gr.Error("ImageClient 未正确初始化。请检查应用日志和API_TOKEN配置。") |
|
|
|
if not prompt_text.strip(): |
|
raise gr.Error("提示词不能为空") |
|
|
|
final_prompt = prompt_text |
|
if use_polish_val: |
|
final_prompt = await polish_prompt(prompt_text) |
|
|
|
|
|
system_prefix = "You are an assistant designed to generate anime images with the highest degree of image-text alignment based on danbooru tags. <Prompt Start>" |
|
generation_prompt = f"{system_prefix} {final_prompt}" |
|
|
|
current_seed = int(seed_val) |
|
if randomize_seed_val: |
|
current_seed = random.randint(0, MAX_SEED) |
|
|
|
width_val, height_val = validate_dimensions(width_val, height_val) |
|
|
|
if not (1.0 <= float(cfg_val) <= 20.0): |
|
raise gr.Error("CFG Scale 必须在 1.0 到 20.0 之间") |
|
if not (1 <= int(steps_val) <= 50): |
|
raise gr.Error("Steps 必须在 1 到 50 之间") |
|
|
|
image_url, error = await client.generate_image( |
|
prompt=generation_prompt, |
|
negative_prompt="", |
|
seed=current_seed, |
|
width=width_val, |
|
height=height_val, |
|
cfg=float(cfg_val), |
|
steps=int(steps_val), |
|
model_name=model_name_val, |
|
use_polish=False, |
|
) |
|
|
|
if error: |
|
logger.error(f"图像生成失败: {error}") |
|
raise gr.Error(error) |
|
|
|
logger.info("图像生成成功") |
|
return image_url, final_prompt, current_seed |
|
|
|
except gr.Error: |
|
raise |
|
except Exception as e: |
|
error_message = f"推理过程异常: {str(e)}" |
|
traceback_str = traceback.format_exc() |
|
logger.error(error_message) |
|
|
|
|
|
notification_message = format_error_for_notification("InferenceError", error_message, traceback_str) |
|
feishu_notify(notification_message) |
|
|
|
raise gr.Error(f"生成图像时发生意外错误: {str(e)}") |
|
finally: |
|
gc.collect() |
|
|
|
|
|
def setup_signal_handlers(): |
|
"""设置信号处理器以实现优雅关闭""" |
|
import signal |
|
import atexit |
|
|
|
def cleanup_handler(signum=None, frame=None): |
|
"""清理处理器""" |
|
logger.info("接收到关闭信号,开始清理资源...") |
|
try: |
|
asyncio.run(cleanup_resources()) |
|
except Exception as e: |
|
logger.error(f"清理资源时出错: {str(e)}") |
|
logger.info("应用已安全关闭") |
|
|
|
signal.signal(signal.SIGINT, cleanup_handler) |
|
signal.signal(signal.SIGTERM, cleanup_handler) |
|
atexit.register(cleanup_handler) |
|
|
|
|
|
|
|
def build_ui(): |
|
"""构建Gradio UI""" |
|
with gr.Blocks(theme=gr.themes.Soft(), title="Lumina Image Playground") as demo: |
|
gr.Markdown("<h1>🎨 NetaLumina_T2I_Playground | 捏Ta Lumina</h1>") |
|
gr.Markdown( |
|
"Fine-tuned Lumina model specialized for anime/manga style generation! Supports Chinese, English, and Japanese prompts. Model under active development - more exciting features coming soon!" |
|
) |
|
gr.Markdown( |
|
"🌸 专为二次元风格优化的Lumina模型!支持中文、英文、日文三语提示词,让您的创意无界限!模型持续优化中,敬请期待。" |
|
) |
|
|
|
gr.HTML(f""" |
|
<div style="display: flex; justify-content: flex-start; align-items: center; gap: 15px; margin-bottom: 20px; padding: 10px;"> |
|
<a href="{DISCORD_LINK}" target="_blank" style="text-decoration: none; color: #5865F2; font-weight: 500; display: inline-flex; align-items: center; gap: 5px;"> |
|
<img src="https://assets-global.website-files.com/6257adef93867e50d84d30e2/636e0a69f118df70ad7828d4_icon_clyde_blurple_RGB.svg" alt="Discord" style="height: 20px;"> |
|
Join Discord | 加入Discord |
|
</a> |
|
<a href="{APP_INDEX_LINK}" target="_blank" style="text-decoration: none; color: #333; font-weight: 500; display: inline-flex; align-items: center; gap: 5px;"> |
|
<img src="{APP_INDEX_ICON}" alt="App Index" style="height: 20px; border-radius: 3px;"> |
|
Nieta Home | 捏Ta主页 |
|
</a> |
|
</div> |
|
""") |
|
|
|
with gr.Row(variant="panel"): |
|
with gr.Column(scale=2): |
|
gr.Markdown("## ⚙️ Generation Controls | 生成控制") |
|
prompt = gr.Textbox( |
|
label="Prompt | 提示词", |
|
lines=5, |
|
placeholder="e.g., A majestic dragon soaring through a cyberpunk city skyline, neon lights reflecting off its scales, intricate details. | 例如:一条威武的巨龙翱翔在赛博朋克城市天际线,霓虹灯映照在它的鳞片上,细节精美。", |
|
info="Describe the image you want to create. | 描述您想要创建的图像。", |
|
) |
|
|
|
use_polish = gr.Checkbox( |
|
label="✨ Auto Polish Prompt | 自动润色提示词", |
|
value=True, |
|
info="Automatically optimize and enhance your prompt for better results. | 自动优化和增强您的提示词以获得更好的效果。", |
|
) |
|
|
|
run_button = gr.Button( |
|
"🚀 Generate Image | 生成图像", variant="primary", scale=0 |
|
) |
|
|
|
with gr.Accordion("🔧 Advanced Settings | 高级设置", open=False): |
|
model_name = gr.Dropdown( |
|
label="Model Version | 模型版本", |
|
choices=list(MODEL_CONFIGS.keys()), |
|
value="ep6", |
|
info="Select the generation model. | 选择生成模型。", |
|
) |
|
with gr.Row(): |
|
cfg = gr.Slider( |
|
label="CFG Scale | CFG缩放", |
|
minimum=1.0, |
|
maximum=20.0, |
|
step=0.1, |
|
value=5.5, |
|
info="Guidance strength. Higher values adhere more to prompt. | 引导强度,更高的值更贴近提示词。", |
|
) |
|
steps = gr.Slider( |
|
label="Sampling Steps | 采样步数", |
|
minimum=1, |
|
maximum=50, |
|
step=1, |
|
value=30, |
|
info="Number of steps. More steps can improve quality but take longer. | 步数,更多步数可提高质量但耗时更长。", |
|
) |
|
|
|
with gr.Row(): |
|
width = gr.Slider( |
|
label="Width | 宽度", |
|
minimum=MIN_IMAGE_SIZE, |
|
maximum=MAX_IMAGE_SIZE, |
|
step=32, |
|
value=1024, |
|
) |
|
height = gr.Slider( |
|
label="Height | 高度", |
|
minimum=MIN_IMAGE_SIZE, |
|
maximum=MAX_IMAGE_SIZE, |
|
step=32, |
|
value=1024, |
|
) |
|
|
|
with gr.Row(): |
|
seed = gr.Slider( |
|
label="Seed | 种子", |
|
minimum=0, |
|
maximum=MAX_SEED, |
|
step=1, |
|
value=random.randint(0, MAX_SEED), |
|
) |
|
randomize_seed = gr.Checkbox( |
|
label="Randomize Seed | 随机种子", |
|
value=True, |
|
info="Use a new random seed for each generation if checked. | 勾选后每次生成使用新的随机种子。", |
|
) |
|
|
|
with gr.Group(): |
|
gr.Markdown("### ✨ Example Prompts | 示例提示词") |
|
for i, title in enumerate(example_titles): |
|
btn = gr.Button(title) |
|
btn.click(lambda t=title: full_prompts[t], outputs=[prompt]) |
|
|
|
with gr.Column(scale=3): |
|
gr.Markdown("## 🖼️ Generated Image | 生成图像") |
|
result_image = gr.Image( |
|
label="Output Image | 输出图像", |
|
show_label=False, |
|
type="filepath", |
|
width=1024, |
|
height=576, |
|
show_download_button=True, |
|
interactive=False, |
|
elem_id="result_image_display", |
|
container=True, |
|
show_fullscreen_button=True, |
|
) |
|
used_prompt_info = gr.Textbox( |
|
label="Used Prompt | 使用的提示词", |
|
interactive=False, |
|
lines=3, |
|
placeholder="The actual prompt used for generation will appear here. | 生成时实际使用的提示词将显示在此处。", |
|
) |
|
generated_seed_info = gr.Textbox( |
|
label="Seed Used | 使用的种子", |
|
interactive=False, |
|
placeholder="The seed for the generated image will appear here. | 生成图像所使用的种子值将显示在此处。", |
|
) |
|
|
|
|
|
inputs_list = [ |
|
prompt, |
|
use_polish, |
|
seed, |
|
randomize_seed, |
|
width, |
|
height, |
|
cfg, |
|
steps, |
|
model_name, |
|
] |
|
outputs_list = [result_image, used_prompt_info, generated_seed_info] |
|
|
|
run_button.click( |
|
fn=infer, |
|
inputs=inputs_list, |
|
outputs=outputs_list, |
|
api_name="generate_image", |
|
) |
|
prompt.submit( |
|
fn=infer, |
|
inputs=inputs_list, |
|
outputs=outputs_list, |
|
api_name="generate_image_submit", |
|
) |
|
|
|
return demo |
|
|
|
|
|
|
|
def main(): |
|
"""主函数""" |
|
global memory_monitor |
|
|
|
try: |
|
|
|
setup_signal_handlers() |
|
|
|
|
|
memory_monitor = MemoryMonitor() |
|
memory_monitor.start_monitoring() |
|
|
|
if DEBUG_MODE: |
|
logger.info("调试模式已启用") |
|
|
|
if not os.environ.get("API_TOKEN"): |
|
logger.warning("API_TOKEN环境变量未设置") |
|
print( |
|
"**************************************************************************************" |
|
) |
|
print("WARNING: API_TOKEN environment variable is not set locally.") |
|
print( |
|
"The application will run, but image generation will fail until API_TOKEN is provided." |
|
) |
|
print( |
|
"You can set it by running: export API_TOKEN='your_actual_token_here'" |
|
) |
|
print( |
|
"Or if using a .env file, ensure it's loaded or API_TOKEN is set in your run config." |
|
) |
|
print( |
|
"**************************************************************************************" |
|
) |
|
|
|
|
|
try: |
|
process = psutil.Process() |
|
startup_memory = process.memory_info().rss / 1024 / 1024 |
|
logger.info(f"启动时内存使用: {startup_memory:.2f} MB") |
|
except Exception: |
|
pass |
|
|
|
logger.info("启动Gradio应用...") |
|
demo = build_ui() |
|
demo.launch( |
|
debug=DEBUG_MODE, |
|
show_error=True, |
|
server_name="0.0.0.0", |
|
server_port=7860, |
|
share=False, |
|
max_threads=40, |
|
favicon_path=None, |
|
) |
|
|
|
except KeyboardInterrupt: |
|
logger.info("接收到键盘中断,正在关闭...") |
|
except Exception as e: |
|
error_message = f"应用启动失败: {str(e)}" |
|
traceback_str = traceback.format_exc() |
|
logger.error(error_message) |
|
|
|
|
|
notification_message = format_error_for_notification("ApplicationStartupError", error_message, traceback_str) |
|
feishu_notify(notification_message) |
|
|
|
raise |
|
finally: |
|
|
|
try: |
|
asyncio.run(cleanup_resources()) |
|
except Exception as e: |
|
error_message = f"最终清理时出错: {str(e)}" |
|
traceback_str = traceback.format_exc() |
|
logger.error(error_message) |
|
|
|
|
|
notification_message = format_error_for_notification("CleanupError", error_message, traceback_str) |
|
feishu_notify(notification_message) |
|
logger.info("应用已退出") |
|
|
|
|
|
if __name__ == "__main__": |
|
main() |
|
|