重构:移除过时的业务模块,并更新文件管理器和量化页面的配置。

This commit is contained in:
hangyu.tao 2026-04-01 17:09:30 +08:00
parent f9f7f4fdd8
commit ff579da153
9 changed files with 805 additions and 22477 deletions

View File

@ -10,10 +10,19 @@ from framework.scripts.file_system_scenario import run_full_file_lifecycle
from framework.scripts.compute_resource_scenario import run_dev_machine_lifecycle from framework.scripts.compute_resource_scenario import run_dev_machine_lifecycle
from framework.scripts.desktop_lifecycle import run_cloud_desktop_lifecycle from framework.scripts.desktop_lifecycle import run_cloud_desktop_lifecycle
from framework.scripts.mirror_assets import run_mirror_assets_lifecycle from framework.scripts.mirror_assets import run_mirror_assets_lifecycle
from framework.scripts.three_d_generation_scenario import run_3d_generation_lifecycle
from framework.scripts.three_d_assets_scenario import run_3d_assets_lifecycle
from framework.scripts.quantization_scenario import run_quantization_lifecycle
from framework.scripts.monkey_scenario import run_monkey_testing
from framework.business.mirror_assets_page import MirrorAssetsPage from framework.business.mirror_assets_page import MirrorAssetsPage
from framework.business.three_d_generation_page import ThreeDGenerationPage
from framework.business.three_d_assets_page import ThreeDAssetsPage
from framework.business.quantization_page import QuantizationPage
from framework.scripts.quantization_scenario import run_quantization_lifecycle
import time import time
import os import os
import json
logger = get_logger("DataManagementRunner") logger = get_logger("DataManagementRunner")
@ -27,6 +36,10 @@ class DataManagement:
self.dm = None self.dm = None
self.cd = None self.cd = None
self.ma = None self.ma = None
self.tg = None
self.ta = None
self.qp = None
self.results = [] # 记录结构化测试结果
def start(self): def start(self):
"""启动浏览器并初始化组件""" """启动浏览器并初始化组件"""
@ -36,65 +49,192 @@ class DataManagement:
self.dm = DevMachinePage(self.page) self.dm = DevMachinePage(self.page)
self.cd = CloudDesktopPage(self.page) self.cd = CloudDesktopPage(self.page)
self.ma = MirrorAssetsPage(self.page) self.ma = MirrorAssetsPage(self.page)
self.tg = ThreeDGenerationPage(self.page)
self.ta = ThreeDAssetsPage(self.page)
self.qp = QuantizationPage(self.page)
def login(self, user, pwd): def login(self, user, pwd):
"""执行登录流程""" """执行登录流程"""
return self.ui.login(user, pwd) return self.ui.login(user, pwd)
def _safe_screenshot(self, name): def _safe_screenshot(self, name):
"""安全截图,防止浏览器已关闭时报错""" """支持统一存放路径的截图"""
base_dir = os.environ.get("ROBOGO_SCREENSHOTS_DIR", ".")
task_id = os.environ.get("ROBOGO_TASK_ID", "local")
# 统一命名规范: {task_id}_{name}.png
filename = f"{task_id}_{name}" if ".png" in name else f"{task_id}_{name}.png"
target_path = os.path.join(base_dir, filename)
try: try:
if self.page: # 增加对 page 状态的活跃检查 (包含 context 检查)
self.page.screenshot(path=name) if not self.page or self.page.is_closed():
except: return
logger.warning(f"⚠️ 截图失败(浏览器可能已关闭): {name}") if not self.page.context or self.page.context.browser is None:
return
# 使用 self.page 截图(注意:这里使用的是 BasePage 注入的 playwirght 页面)
self.page.screenshot(path=target_path, full_page=True, timeout=5000)
logger.info(f"✨ 最终状态截图已保存: {target_path}")
except Exception as e:
# 忽略由于浏览器/上下文已关闭导致的截图失败
err_msg = str(e).lower()
if "closed" in err_msg or "not open" in err_msg:
logger.info(" 浏览器环境已销毁,跳过末尾截图存档")
else:
logger.warning(f"⚠️ 截图存档过程出错: {e}")
def record_result(self, name, desc, expected, status, duration):
"""记录一个测试用例的结果"""
self.results.append({
"name": name,
"desc": desc,
"expected": expected,
"status": status, # 'PASS', 'FAIL', 'SKIP'
"duration": f"{duration:.2f}s"
})
def save_results(self):
"""将结构化结果保存到文件"""
task_id = os.environ.get("ROBOGO_TASK_ID", "local")
reports_dir = os.environ.get("ROBOGO_REPORTS_DIR", "platform_reports")
os.makedirs(reports_dir, exist_ok=True)
target_path = os.path.join(reports_dir, f"{task_id}_results.json")
try:
with open(target_path, 'w', encoding='utf-8') as f:
json.dump(self.results, f, ensure_ascii=False, indent=2)
logger.info(f"📊 结构化数据已保存: {target_path}")
except Exception as e:
logger.warning(f"⚠️ 结构化数据保存失败: {e}")
def run_all_scenarios(self): def run_all_scenarios(self):
""" """
场景指挥依次执行所有的业务流 场景指挥依次执行所有的业务流
每个场景独立 try-except一个失败不阻塞后续场景 每个场景独立 try-except一个失败不阻塞后续场景
""" """
env_name = os.environ.get("ROBOGO_ENV", "PROD")
scope = os.environ.get("ROBOGO_SCOPE", "all")
logger.info(f"🚦 启动场景编排 - 环境: {env_name} | 策略: {scope}")
# 动态定义本次要运行的模块
active_modules = []
if scope == "all":
active_modules = ["file", "dev", "cloud", "mirror", "quant" , "3d"]
elif scope == "smoke":
active_modules = ["file"] # 核心资源生命周期
elif scope == "core":
active_modules = ["cloud","dev", "mirror", "quant"] # 业务闭环
else:
active_modules = ["cloud"] # 默认兜底只跑云桌面
errors = [] errors = []
# 1. 执行文件系统场景 (跳过) # 1. 执行文件系统场景
if "file" in active_modules:
start_t = time.time()
try: try:
run_full_file_lifecycle(self.fm, Config.FOLDER_NAME) run_full_file_lifecycle(self.fm, Config.FOLDER_NAME)
self._safe_screenshot("file_system_final.png") self._safe_screenshot("file_system_final.png")
logger.info("✅ 文件系统场景通过") logger.info("✅ 文件系统场景通过")
self.record_result("file_system_lifecycle", "文件系统全生命周期测试", "文件夹创建、上传、重命名、删除全链路闭环", "PASS", time.time()-start_t)
except Exception as e: except Exception as e:
logger.error(f"❌ 文件系统场景失败: {e}") logger.error(f"❌ 文件系统场景失败: {e}")
self._safe_screenshot("file_system_error.png") self._safe_screenshot("file_system_error.png")
errors.append(f"文件系统: {e}") errors.append(f"文件系统: {e}")
self.record_result("file_system_lifecycle", "文件系统全生命周期测试", "文件夹创建、上传、重命名、删除全链路闭环", "FAIL", time.time()-start_t)
# 2. 执行开发机场景 (跳过) # 2. 执行开发机场景
if "dev" in active_modules:
start_t = time.time()
try: try:
run_dev_machine_lifecycle(self.dm) run_dev_machine_lifecycle(self.dm)
self._safe_screenshot("dev_machine_final.png") self._safe_screenshot("dev_machine_final.png")
logger.info("✅ 开发机场景通过") logger.info("✅ 开发机场景通过")
self.record_result("dev_machine_lifecycle", "开发机全生命周期测试", "开发机申请、启动、关机、销毁全流程验证", "PASS", time.time()-start_t)
except Exception as e: except Exception as e:
logger.error(f"❌ 开发机场景失败: {e}") logger.error(f"❌ 开发机场景失败: {e}")
self._safe_screenshot("dev_machine_error.png") self._safe_screenshot("dev_machine_error.png")
errors.append(f"开发机: {e}") errors.append(f"开发机: {e}")
self.record_result("dev_machine_lifecycle", "开发机全生命周期测试", "开发机申请、启动、关机、销毁全流程验证", "FAIL", time.time()-start_t)
# 3. 执行云桌面场景 # 3. 执行云桌面场景
if "cloud" in active_modules:
start_t = time.time()
try: try:
run_cloud_desktop_lifecycle(self.cd) run_cloud_desktop_lifecycle(self.cd)
self._safe_screenshot("cloud_desktop_final.png") self._safe_screenshot("cloud_desktop_final.png")
logger.info("✅ 云桌面场景通过") logger.info("✅ 云桌面场景通过")
self.record_result("cloud_desktop_lifecycle", "云桌面全生命周期测试", "桌面创建、连接、保存镜像、关机、删除全流程验证", "PASS", time.time()-start_t)
except Exception as e: except Exception as e:
logger.error(f"❌ 云桌面场景失败: {e}") logger.error(f"❌ 云桌面场景失败: {e}")
self._safe_screenshot("cloud_desktop_error.png") self._safe_screenshot("cloud_desktop_error.png")
errors.append(f"云桌面: {e}") errors.append(f"云桌面: {e}")
self.record_result("cloud_desktop_lifecycle", "云桌面全生命周期测试", "桌面创建、连接、保存镜像、关机、删除全流程验证", "FAIL", time.time()-start_t)
# 4. 执行镜像资产场景 # 4. 执行镜像资产场景
if "mirror" in active_modules:
start_t = time.time()
try: try:
run_mirror_assets_lifecycle(self.ma, self.cd) run_mirror_assets_lifecycle(self.ma, self.cd)
self._safe_screenshot("mirror_assets_final.png") self._safe_screenshot("mirror_assets_final.png")
logger.info("✅ 镜像资产场景通过") logger.info("✅ 镜像资产场景通过")
self.record_result("mirror_assets", "镜像资产巡检", "镜像列表加载及基本信息验证", "PASS", time.time()-start_t)
except Exception as e: except Exception as e:
logger.error(f"❌ 镜像资产场景失败: {e}") logger.error(f"❌ 镜像资产场景失败: {e}")
self._safe_screenshot("mirror_assets_error.png") self._safe_screenshot("mirror_assets_error.png")
errors.append(f"镜像资产: {e}") errors.append(f"镜像资产: {e}")
self.record_result("mirror_assets", "镜像资产巡检", "镜像列表加载及基本信息验证", "FAIL", time.time()-start_t)
# 5. 执行 3D 生成场景 -> 获取资产名 -> 执行归档场景
if "3d" in active_modules:
start_t = time.time()
try:
asset_name = run_3d_generation_lifecycle(self.tg)
self._safe_screenshot("3d_generation_final.png")
if asset_name:
# 联动:将生成的资产归档到数据中心
run_3d_assets_lifecycle(self.ta, asset_name)
self._safe_screenshot("3d_assets_archive_final.png")
logger.info("✅ 3D 生成与归档全链路已通过")
self.record_result("3d_lifecycle", "3D生成与资产归档", "通过 AIGC 生成 3D 模型并成功归档到资产中心", "PASS", time.time()-start_t)
else:
logger.warning("⚠️ 3D 生成未产生有效资产名,跳过归档场景")
self.record_result("3d_lifecycle", "3D生成与资产归档", "生成资产名为空", "SKIP", time.time()-start_t)
except Exception as e:
logger.error(f"❌ 3D 链路失败: {e}")
self._safe_screenshot("3d_chain_error.png")
errors.append(f"3D链路: {e}")
self.record_result("3d_lifecycle", "3D生成与资产归档", "存在执行错误", "FAIL", time.time()-start_t)
# 6. 执行量化工具场景
if "quant" in active_modules:
start_t = time.time()
try:
run_quantization_lifecycle(self.qp)
self._safe_screenshot("quantization_final.png")
logger.info("✅ 量化工具场景通过")
self.record_result("quantization", "量化工具效能测试", "执行模型量化脚本并验证输出一致性", "PASS", time.time()-start_t)
except Exception as e:
logger.error(f"❌ 量化工具场景失败: {e}")
self._safe_screenshot("quantization_error.png")
errors.append(f"量化工具: {e}")
self.record_result("quantization", "量化工具效能测试", "执行模型量化脚本并验证输出一致性", "FAIL", time.time()-start_t)
# 7. 全局 Monkey 稳定性打底测试
if "monkey" in active_modules:
start_t = time.time()
try:
logger.info("🚀 开启全面压测: 注入大范围 Monkey 测试策略...")
run_monkey_testing(self.page, action_count=50)
self._safe_screenshot("monkey_pass.png")
logger.info("✅ 全链路存活Monkey 压路机测试无严重崩溃阻断")
self.record_result("monkey_testing", "Monkey 稳定性压测", "50 次随机点击/交互注入,验证系统抗崩溃能力", "PASS", time.time()-start_t)
except Exception as e:
logger.error(f"❌ 稳定性告警: Monkey 测试导致了非预期崩溃或抛错: {e}")
self._safe_screenshot("monkey_error.png")
errors.append(f"Monkey测试异常: {e}")
self.record_result("monkey_testing", "Monkey 稳定性压测", "50 次随机点击/交互注入,验证系统抗崩溃能力", "FAIL", time.time()-start_t)
# 汇总 # 汇总
if errors: if errors:
@ -102,7 +242,7 @@ class DataManagement:
logger.error(f"{len(errors)} 个场景失败: {summary}") logger.error(f"{len(errors)} 个场景失败: {summary}")
raise Exception(f"{len(errors)} 个场景失败: {summary}") raise Exception(f"{len(errors)} 个场景失败: {summary}")
logger.info("🎉 所有 UI 模块遍历测试圆满完成!") logger.info(f"🎉 Robogo {env_name} 环境 {scope} 巡检执行圆满完成!")
def run(self, user, pwd): def run(self, user, pwd):
"""主入口""" """主入口"""
@ -115,6 +255,7 @@ class DataManagement:
self.run_all_scenarios() self.run_all_scenarios()
finally: finally:
self.save_results()
self.ui.stop() self.ui.stop()
if __name__ == "__main__": if __name__ == "__main__":

View File

@ -268,6 +268,7 @@ class QuantizationPage(BasePage):
# 主动调用 path() 即可在当前主线程强阻塞,直至磁盘下载动作完全落盘成功 # 主动调用 path() 即可在当前主线程强阻塞,直至磁盘下载动作完全落盘成功
dl_path = download.path() dl_path = download.path()
logger.info(f"🎉 文件下载成功落盘!临时归档路径: {dl_path}") logger.info(f"🎉 文件下载成功落盘!临时归档路径: {dl_path}")
time.sleep(2)
# 4. 收尾清理弹窗 UI # 4. 收尾清理弹窗 UI
close_btn = dialog.locator("button.p-dialog-header-close").first close_btn = dialog.locator("button.p-dialog-header-close").first

View File

@ -3,7 +3,12 @@ import os
class Config: class Config:
# --- 基础配置 --- # --- 基础配置 ---
_ENV = os.getenv("ROBOGO_ENV", "PROD").upper()
if _ENV in ["FAT"]:
BASE_URL = "https://robogo-fat.d-robotics.cc"
else:
BASE_URL = "https://robogo.d-robotics.cc" BASE_URL = "https://robogo.d-robotics.cc"
LOGIN_URL = f"{BASE_URL}/cloud-desktop/login" LOGIN_URL = f"{BASE_URL}/cloud-desktop/login"
# --- 文件管理配置 --- # --- 文件管理配置 ---
@ -17,8 +22,8 @@ class Config:
SYSTEM_DISK = "100" SYSTEM_DISK = "100"
# --- 登录凭证 (如果环境变量没有,则逻辑中会提示输入) --- # --- 登录凭证 (如果环境变量没有,则逻辑中会提示输入) ---
AUTH_ACCOUNT = os.getenv("ROBOGO_USER", "") AUTH_ACCOUNT = os.getenv("AUTH_ACCOUNT") or os.getenv("ROBOGO_USER", "")
AUTH_PASSWORD = os.getenv("ROBOGO_PWD", "") AUTH_PASSWORD = os.getenv("AUTH_PASSWORD") or os.getenv("ROBOGO_PWD", "")
# --- 其他框架配置 --- # --- 其他框架配置 ---
TIMEOUT = 30000 TIMEOUT = 30000

View File

@ -1,2 +1,5 @@
lark-oapi>=1.3.0 playwright>=1.40.0
requests>=2.28.0 requests>=2.28.0
lark-oapi>=1.3.0
flask>=2.3.0
flask-cors>=4.0.0

View File

@ -1,6 +1,7 @@
# framework/scripts/file_system_scenario.py # framework/scripts/file_system_scenario.py
import os import os
import time import time
import random
from framework.core.logger import get_logger from framework.core.logger import get_logger
logger = get_logger("FileSystemScenario") logger = get_logger("FileSystemScenario")
@ -28,34 +29,69 @@ def run_stress_upload_test(fm, file_path, cycles=3):
def run_full_file_lifecycle(fm, folder_name): def run_full_file_lifecycle(fm, folder_name):
""" """
业务逻辑完整的文件生命周期流程大文件版本 业务逻辑完整的文件生命周期流程
- test_data 目录随机选取文件上传
- 随机重命名已上传的压缩包
- 返回根目录后删除之前创建的文件夹
""" """
logger.info(f"--- 开启文件系统全生命周期测试 [{folder_name}] ---") logger.info(f"--- 开启文件系统全生命周期测试 [{folder_name}] ---")
fm.navigate_to() fm.navigate_to()
# 1. 创建并进入 # 1. 创建并进入测试文件夹
fm.create_folder(folder_name) fm.create_folder(folder_name)
fm.enter_folder(folder_name) fm.enter_folder(folder_name)
# 2. 上传压力测试 # 2. 收集 test_data 目录下所有文件
from framework.config.settings import Config from framework.config.settings import Config
test_file = Config.TEST_FILE test_data_dir = Config.TEST_DATA_DIR
if os.path.exists(test_file):
logger.info(f"📄 测试文件: {test_file} ({os.path.getsize(test_file)} bytes)")
run_stress_upload_test(fm, test_file, cycles=3)
# 3. 正式上传(等待完成) if not os.path.isdir(test_data_dir):
fm.upload_files(test_file) logger.warning(f"⚠️ test_data 目录不存在: {test_data_dir},跳过上传测试")
fm.wait_for_success(count=1) fm.back_to_root()
time.sleep(2)
fm.delete_item(folder_name)
time.sleep(3)
return
all_files = [f for f in os.listdir(test_data_dir) if os.path.isfile(os.path.join(test_data_dir, f))]
if not all_files:
logger.warning("⚠️ test_data 目录为空,跳过上传测试")
fm.back_to_root()
time.sleep(2)
fm.delete_item(folder_name)
time.sleep(3)
return
# 4. 重命名与删除 all_paths = [os.path.join(test_data_dir, f) for f in all_files]
fm.rename_item("Fruits-15.zip", "UI_TEST_RENAMED.zip") total_size = sum(os.path.getsize(p) for p in all_paths)
fm.delete_item("UI_TEST_RENAMED.zip") logger.info(f"📦 test_data 共 {len(all_files)} 个文件,总计 {total_size / 1024 / 1024:.1f} MB")
# 3. 一次性上传全部文件Playwright 支持多文件)
fm.upload_files(all_paths)
fm.wait_for_success(count=len(all_files))
# 4. 等待文件列表完全刷新(上传对话框关闭后列表需要数秒才能稳定)
logger.info("⏳ 等待文件列表稳定...")
time.sleep(5)
# 5. 随机选取一个当前页面可见的文件并重命名
page_text = fm.page.content()
visible_files = [f for f in all_files if f in page_text]
if visible_files:
rename_target = random.choice(visible_files)
logger.info(f"📄 当前第一页可见 {len(visible_files)} 个文件,随机命中: {rename_target}")
else: else:
logger.warning(f"⚠️ 测试文件不存在: {test_file},跳过上传测试") logger.warning("⚠️ 未在第一页检测到任何上传的文件名,回退使用完全随机选择")
rename_target = random.choice(all_files)
# 5. 清理 file_base, file_ext = os.path.splitext(rename_target)
random_suffix = random.randint(1000, 9999)
new_name = f"UI_RENAMED_{random_suffix}{file_ext}"
logger.info(f"🎲 随机重命名目标: {rename_target} -> {new_name}")
fm.rename_item(rename_target, new_name)
# 7. 返回根目录,并删除之前创建的测试文件夹
fm.back_to_root() fm.back_to_root()
time.sleep(5) time.sleep(5)
fm.delete_item(folder_name) fm.delete_item(folder_name)

View File

@ -24,7 +24,7 @@ def run_3d_generation_lifecycle(threed_page):
pic_path = os.path.join(test_data_dir, random_pic) pic_path = os.path.join(test_data_dir, random_pic)
# 调试 # 调试
# threed_page.page.pause() #threed_page.page.pause()
logger.info(f"🎲 随机选取的素材为: {random_pic}") logger.info(f"🎲 随机选取的素材为: {random_pic}")

View File

@ -11,445 +11,665 @@ import time
import queue import queue
import threading import threading
import subprocess import subprocess
from datetime import datetime import requests
from datetime import datetime, timedelta
from flask import Flask, request, jsonify, Response, send_from_directory from flask import Flask, request, jsonify, Response, send_from_directory
from flask_cors import CORS from flask_cors import CORS
app = Flask(__name__, static_folder='platform/static', template_folder='platform/templates') app = Flask(__name__, static_folder='platform', static_url_path='')
CORS(app) CORS(app)
# ── 持久化存储与资源目录 ───────────────────────────────────────────────────────────── # ── 持久化存储与资源目录 ─────────────────────────────────────────────────────────────
DB_FILE = "platform_db.json" DB_FILE = "platform_db.json"
REPORTS_DIR = "platform_reports" REPORTS_DIR = "platform_reports"
SCREENSHOTS_DIR = "platform_artifacts/screenshots" SCREENSHOTS_DIR = "platform_artifacts/screenshots"
LARK_WEBHOOK = "https://open.feishu.cn/open-apis/bot/v2/hook/d75c14ad-d782-489e-8a99-81b511ee4abd"
os.makedirs(REPORTS_DIR, exist_ok=True) os.makedirs(REPORTS_DIR, exist_ok=True)
os.makedirs(SCREENSHOTS_DIR, exist_ok=True) os.makedirs(SCREENSHOTS_DIR, exist_ok=True)
def _load_db(): # ── 全局状态与配置 ──
"""从本地文件加载数据,若不存在则初始化空文件"""
if os.path.exists(DB_FILE):
try:
with open(DB_FILE, 'r', encoding='utf-8') as f:
data = json.load(f)
return data.get("tasks", {}), data.get("reports", {})
except Exception as e:
print(f"⚠️ 数据库加载失败: {e}")
else:
# 初次运行,初始化一个空文件,方便用户看到文件位置
try:
with open(DB_FILE, 'w', encoding='utf-8') as f:
json.dump({"tasks": {}, "reports": {}}, f)
except:
pass
return {}, {}
def _save_db():
"""保存数据到本地文件"""
try:
# 保护性写入:先写临时文件再 rename
tmp_file = DB_FILE + ".tmp"
with open(tmp_file, 'w', encoding='utf-8') as f:
json.dump({
"tasks": tasks_db,
"reports": reports_db
}, f, indent=2, ensure_ascii=False)
os.replace(tmp_file, DB_FILE)
except Exception as e:
print(f"❌ 数据库保存失败: {e}")
# ── 全局状态加载 ─────────────────────────────────────────────────────────────
tasks_db, reports_db = _load_db() # 启动时恢复历史数据
log_queues = {} # 实时日志队列无需持久化,仅用于当前会话流转
PRODUCTS = { PRODUCTS = {
"robogo": { "robogo": {
"name": "Robogo", "name": "Robogo",
"desc": "Robogo PROD环境全链路 UI 巡检 (文件管理/开发机/云桌面)", "desc": "Robogo PROD 环境全链路 UI 巡检",
"icon": "🤖", "icon": "🤖",
"entry": "run_ui_tests.py" "entry": "run_ui_tests.py"
}, },
"data_loop": { "data_loop": {
"name": "数据闭环", "name": "数据闭环",
"desc": "数据闭环平台端到端验证", "desc": "数据闭环平台端到端业务流水线验证",
"icon": "🔄", "icon": "🔄",
"entry": None # 待接入 "entry": None # 待接入
} }
} }
# 内存数据存储
tasks_db, reports_db = {}, {}
log_queues = {} # taskId -> queue.Queue (用于 SSE)
process_pids = {} # taskId -> PID (用于任务停止)
# ── 任务运行核心 ─────────────────────────────────────────────────────────────── def _load_db():
def _stream_run(task_id: str, entry: str, account: str, password: str, run_count: int): global tasks_db, reports_db
"""在后台线程中运行自动化脚本,并把日志实时推到队列""" if os.path.exists(DB_FILE):
log_q = log_queues.get(task_id) or queue.Queue() try:
log_queues[task_id] = log_q with open(DB_FILE, 'r', encoding='utf-8') as f:
data = json.load(f)
tasks_db = data.get("tasks", {})
reports_db = data.get("reports", {})
except Exception as e:
print(f"⚠️ 数据库加载失败: {e}")
task = tasks_db[task_id] def _cleanup_task_assets(tid):
try:
# 为了让历史报告依然能查看完整的文字详情,我们[不再]删除日志和结果的 JSON 文件
# log_f = os.path.join(REPORTS_DIR, f"{tid}.json")
# res_f = os.path.join(REPORTS_DIR, f"{tid}_results.json")
# if os.path.exists(log_f): os.remove(log_f)
# if os.path.exists(res_f): os.remove(res_f)
# 只物理清理占据硬盘 99% 空间的巨大高清截图
if os.path.exists(SCREENSHOTS_DIR):
for s in os.listdir(SCREENSHOTS_DIR):
if s.startswith(tid):
try: os.remove(os.path.join(SCREENSHOTS_DIR, s))
except: pass
except Exception as e:
print(f"⚠️ 清理物理资源失败: {e}")
def _save_db():
try:
# 只针对未软删除的“单次执行 (once)”的临时/历史任务进行数量统计和自动清理
active_once_tasks = [t for t in tasks_db.values() if t.get("schedule_type", "once") == "once" and not t.get("is_deleted")]
if len(active_once_tasks) > 100:
sorted_tasks = sorted(active_once_tasks, key=lambda t: t.get("created_at", ""))
to_delete = sorted_tasks[:50]
for t in to_delete:
tid = t.get("id")
if tid:
_cleanup_task_assets(tid)
t["is_deleted"] = True # 软删除标记供看板保留审计UI不展示
print(f"🧹 执行自动数据留存策略: 已软删除过期单次任务剔除物理文件保留DB {len(to_delete)}")
with open(DB_FILE, 'w', encoding='utf-8') as f:
json.dump({"tasks": tasks_db, "reports": reports_db}, f, ensure_ascii=False)
except:
pass
_load_db()
# ── 核心业务逻辑 ─────────────────────────────────────────────────────────────
def send_alerts(report):
"""发送飞书告警"""
task_id = report.get("task_id")
task = tasks_db.get(task_id, {})
channels = task.get("alert_channels", [])
if "lark" in channels:
rule = task.get("alert_rule", "always")
if rule == "only_on_fail" and report["result"] == "PASS":
return # 跳过
try:
status_color = "green" if report["result"] == "PASS" else "red"
status_text = "成功" if report["result"] == "PASS" else "失败"
card = {
"config": {"wide_screen_mode": True},
"header": {
"title": {"tag": "plain_text", "content": f"🔔 Robogo 巡检报告: {report['task_name']}"},
"template": status_color
},
"elements": [
{
"tag": "div",
"fields": [
{"is_short": True, "text": {"tag": "lark_md", "content": f"**状态:** {status_text}"}},
{"is_short": True, "text": {"tag": "lark_md", "content": f"**产品:** {report['product']}"}},
{"is_short": True, "text": {"tag": "lark_md", "content": f"**环境:** {task.get('env', 'PROD')}"}},
{"is_short": True, "text": {"tag": "lark_md", "content": f"**通过/总计:** {report['pass']}/{report['total_runs']}"}}
]
},
{"tag": "hr"},
{
"tag": "action",
"actions": [
{
"tag": "button",
"text": {"tag": "plain_text", "content": "查看详情报告"},
"type": "primary",
"url": f"http://127.0.0.1:5001/#/tasks"
}
]
}
]
}
requests.post(LARK_WEBHOOK, json={"msg_type": "interactive", "card": card}, timeout=5)
except Exception as e:
print(f"❌ 飞书推送失败: {e}")
def run_task_process(task):
"""任务执行核心流程"""
task_id = task["id"]
task["status"] = "running" task["status"] = "running"
task["started_at"] = datetime.now().isoformat() task["started_at"] = datetime.now().isoformat()
_save_db() # 4. 任务进入运行状态时保存 _save_db()
total_pass = total_fail = 0 q = log_queues.get(task_id)
logs_all = [] logs_all = []
def push(line: str, level: str = "INFO"): def push(msg, level="INFO"):
msg = {"ts": datetime.now().strftime("%H:%M:%S"), "level": level, "msg": line} entry = {"ts": datetime.now().strftime("%H:%M:%S"), "level": level, "msg": msg}
log_q.put(json.dumps(msg)) logs_all.append(entry)
logs_all.append(msg) if q: q.put(json.dumps(entry))
push(f"🚀 任务启动 [{task['name']}] | 产品: {task['product']} | 计划运行次数: {run_count}", "INFO") run_limit = int(task.get("run_count", 1))
retry_count = int(task.get("retry_count", 1)) if task.get("retry_on_fail") else 0
retry_delay = int(task.get("retry_delay", 5))
python_bin = os.path.join(os.path.dirname(sys.executable), "python")
if not os.path.exists(python_bin):
python_bin = sys.executable
for run_idx in range(1, run_count + 1):
push(f"─────── 第 {run_idx}/{run_count} 次运行 ───────", "INFO")
run_has_error = False
try:
env = os.environ.copy() env = os.environ.copy()
env["ROBOGO_USER"] = account
env["ROBOGO_PWD"] = password
# 注入统一截图路径与任务前缀
env["ROBOGO_SCREENSHOTS_DIR"] = os.path.abspath(SCREENSHOTS_DIR) env["ROBOGO_SCREENSHOTS_DIR"] = os.path.abspath(SCREENSHOTS_DIR)
env["ROBOGO_REPORTS_DIR"] = os.path.abspath(REPORTS_DIR)
env["ROBOGO_TASK_ID"] = task_id env["ROBOGO_TASK_ID"] = task_id
env["ROBOGO_ENV"] = task.get("env", "PROD")
env["ROBOGO_SCOPE"] = task.get("scope", "all")
env["AUTH_ACCOUNT"] = task.get("account", "")
env["AUTH_PASSWORD"] = task.get("password", "")
# 兼容 settings.py 的老命名
env["ROBOGO_USER"] = task.get("account", "")
env["ROBOGO_PWD"] = task.get("password", "")
total_pass, total_fail = 0, 0
current_run = 0
max_runs = run_limit + retry_count # 潜在的最大运行次数
push(f"🎬 任务开始 — 环境: {env['ROBOGO_ENV']} | 范围: {env['ROBOGO_SCOPE']}", "INFO")
python_bin = os.path.join(os.getcwd(), "venv", "bin", "python")
if not os.path.exists(python_bin): python_bin = sys.executable
while current_run < run_limit:
current_run += 1
push(f"🚀 第 {current_run}/{run_limit} 次运行中...", "INFO")
try:
# 开启进程组 (Process Group),以便停止时能连带子进程一起干掉
proc = subprocess.Popen( proc = subprocess.Popen(
[python_bin, entry], [python_bin, task["entry"]],
stdout=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
stderr=subprocess.STDOUT, text=True, bufsize=1, env=env,
text=True, preexec_fn=os.setsid if os.name != 'nt' else None
cwd=os.path.dirname(os.path.abspath(__file__)),
env=env,
bufsize=1
) )
task["pid"] = proc.pid process_pids[task_id] = proc.pid
for line in proc.stdout: for line in proc.stdout:
line = line.rstrip() push(line.rstrip(), "INFO")
if not line:
continue
# 解析日志级别
level = "INFO"
if "[ERROR]" in line or "" in line:
level = "ERROR"
run_has_error = True
elif "[WARNING]" in line or "⚠️" in line:
level = "WARN"
elif "" in line or "🎉" in line or "🎊" in line:
level = "SUCCESS"
push(line, level)
proc.wait() proc.wait()
# 综合判断:退出码 + 日志中是否有 ERROR process_pids.pop(task_id, None)
success = (proc.returncode == 0) and not run_has_error
if success: if proc.returncode == 0:
total_pass += 1 total_pass += 1
push(f"✅ 第 {run_idx} 次运行结束 — 成功", "SUCCESS") push(f"✅ 第 {current_run} 次成功", "SUCCESS")
else: else:
total_fail += 1 total_fail += 1
push(f"❌ 第 {run_idx} 次运行结束 — 失败", "ERROR") push(f"❌ 第 {current_run} 次失败", "ERROR")
# 失败重跑
# 失败重跑逻辑 if retry_count > 0:
if task.get("retry_on_fail") and run_idx == run_count: push(f"🔁 触发重跑 (剩余 {retry_count} 次),等待 {retry_delay}s...", "WARN")
push(f"🔁 触发失败重跑 (额外第 1 次)", "WARN") time.sleep(retry_delay)
# 追加一次额外运行(简化版:仅追加日志标记) retry_count -= 1
run_count += 1 run_limit += 1 # 延长循环
except Exception as e: except Exception as e:
push(f"💥 执行异常: {e}", "ERROR") push(f"💥 系统爆破: {e}", "ERROR")
total_fail += 1 total_fail += 1
# ── 生成报告与日志分流 ─────────────────────────────────────────────────────────── # 收尾
finished_at = datetime.now().isoformat() finished_at = datetime.now().isoformat()
# 1. 报告摘要 (主库存储) report = {
report_summary = { "task_id": task_id, "task_name": task["name"], "product": task["product"],
"task_id": task_id, "total_runs": current_run, "pass": total_pass, "fail": total_fail,
"task_name": task["name"], "started_at": task["started_at"], "finished_at": finished_at,
"product": task["product"],
"total_runs": run_count,
"pass": total_pass,
"fail": total_fail,
"started_at": task.get("started_at"),
"finished_at": finished_at,
"result": "PASS" if total_fail == 0 else "FAIL" "result": "PASS" if total_fail == 0 else "FAIL"
} }
# 2. 完整日志 (物理文件隔离存储,防止主库过大) # 保存物理日志
log_file = os.path.join(REPORTS_DIR, f"{task_id}.json") log_file = os.path.join(REPORTS_DIR, f"{task_id}.json")
try: try:
with open(log_file, 'w', encoding='utf-8') as f: with open(log_file, 'w', encoding='utf-8') as f:
json.dump({"logs": logs_all}, f, ensure_ascii=False) json.dump({"logs": logs_all}, f, ensure_ascii=False)
except Exception as e: except: pass
push(f"❌ 物理日志保存失败: {e}", "ERROR")
reports_db[task_id] = report_summary reports_db[task_id] = report
task["status"] = "pass" if total_fail == 0 else "fail" task["status"] = "pass" if total_fail == 0 else "fail"
task["finished_at"] = finished_at task["finished_at"] = finished_at
task["report_id"] = task_id task["report_id"] = task_id
# ── 自动数据清理 (Retention Policy: 最多保留 100 条历史任务) ──
try:
if len(tasks_db) > 100:
# 按创建时间排序,找出最老的 50 条
oldest_ids = sorted(tasks_db.keys(), key=lambda k: tasks_db[k].get("created_at", ""))[:50]
for oid in oldest_ids:
tasks_db.pop(oid, None)
reports_db.pop(oid, None)
# 清除物理日志文件
old_log = os.path.join(REPORTS_DIR, f"{oid}.json")
if os.path.exists(old_log):
os.remove(old_log)
# 清除关联截图文件
try:
for f in os.listdir(SCREENSHOTS_DIR):
if f.startswith(oid):
os.remove(os.path.join(SCREENSHOTS_DIR, f))
except:
pass
print(f"🧹 已自动清理 50 条过期任务数据(含日志与截图)")
except:
pass
_save_db() _save_db()
send_alerts(report)
push(f"\n━━━━━━━━━ 测试完成 ━━━━━━━━━", "INFO")
push(f"总计: {run_count} 次 | 通过: {total_pass} | 失败: {total_fail}", "INFO")
push(f"整体结论: {'✅ PASS' if total_fail == 0 else '❌ FAIL'}", "SUCCESS" if total_fail == 0 else "ERROR")
push("__DONE__", "DONE") push("__DONE__", "DONE")
# ── API ───────────────────────────────────────────────────────────────────────
# ── API 路由 ──────────────────────────────────────────────────────────────────
@app.route("/api/products") @app.route("/api/products")
def get_products(): def get_products():
return jsonify(PRODUCTS) return jsonify(PRODUCTS)
@app.route("/api/tasks", methods=["GET"]) @app.route("/api/tasks", methods=["GET"])
def list_tasks(): def list_tasks():
return jsonify(list(tasks_db.values())) # 忽略已被软删除的历史任务,不向前端列表展示
t_list = [t for t in tasks_db.values() if not t.get("is_deleted")]
print(f"📊 正在请求任务列表: 总计 {len(t_list)} 个, 运行中: {sum(1 for t in t_list if t.get('status')=='running')}")
return jsonify(t_list)
@app.route("/api/tasks", methods=["POST"]) @app.route("/api/tasks", methods=["POST"])
def create_task(): def create_task():
body = request.json body = request.json
task_id = str(uuid.uuid4())[:8] tid = str(uuid.uuid4())[:8]
product_key = body.get("product", "robogo") p_key = body.get("product", "robogo")
product = PRODUCTS.get(product_key, {}) p = PRODUCTS.get(p_key, {})
entry = product.get("entry", "run_ui_tests.py")
if entry is None:
return jsonify({"error": "该产品暂未接入运行入口"}), 400
task = { task = {
"id": task_id, "id": tid,
"name": body.get("name", f"任务_{task_id}"), "name": body.get("name", f"任务_{tid}"),
"product": product_key, "product": p_key,
"product_name": product.get("name", product_key), "status": "pending",
"created_at": datetime.now().isoformat(),
"account": body.get("account"),
"password": body.get("password"),
"run_count": int(body.get("run_count", 1)), "run_count": int(body.get("run_count", 1)),
"retry_on_fail": body.get("retry_on_fail", False), "retry_on_fail": body.get("retry_on_fail", False),
"retry_count": int(body.get("retry_count", 1)),
"retry_delay": int(body.get("retry_delay", 5)),
"env": body.get("env", "PROD"),
"scope": body.get("scope", "all"),
"scheduled_at": body.get("scheduled_at"), "scheduled_at": body.get("scheduled_at"),
"created_at": datetime.now().isoformat(), "schedule_type": body.get("schedule_type", "once"),
"status": "pending", "schedule_window": body.get("schedule_window", "00:00-23:59"),
"pid": None, "alert_channels": body.get("alert_channels", []),
"started_at": None, "alert_rule": body.get("alert_rule", "always"),
"finished_at": None, "entry": p.get("entry")
"report_id": None
} }
tasks_db[task_id] = task
log_queues[task_id] = queue.Queue()
_save_db() # 3. 任务创建后保存初始状态
account = body.get("account", "") if not task["entry"]:
password = body.get("password", "") return jsonify({"error": "该产品未配置执行入口"}), 400
scheduled_at = task.get("scheduled_at")
def _run_task(): tasks_db[tid] = task
"""统一入口:处理定时等待后再执行""" log_queues[tid] = queue.Queue()
log_q = log_queues[task_id] _save_db()
if scheduled_at:
try:
# 解析定时时间
sched_time = datetime.fromisoformat(scheduled_at)
task["status"] = "pending"
wait_secs = (sched_time - datetime.now()).total_seconds()
if wait_secs > 0:
msg = {"ts": datetime.now().strftime("%H:%M:%S"), "level": "INFO", "msg": f"⏰ 任务已定时,将在 {scheduled_at} 执行(等待 {int(wait_secs)}秒)"}
log_q.put(json.dumps(msg))
# 每 30 秒发心跳,防止 SSE 超时断开
while (sched_time - datetime.now()).total_seconds() > 0:
remaining = int((sched_time - datetime.now()).total_seconds())
heartbeat = {"ts": datetime.now().strftime("%H:%M:%S"), "level": "INFO", "msg": f"⏳ 距离定时执行还有 {remaining} 秒..."}
log_q.put(json.dumps(heartbeat))
time.sleep(min(30, max(remaining, 1)))
launch_msg = {"ts": datetime.now().strftime("%H:%M:%S"), "level": "SUCCESS", "msg": "🚀 定时时间已到,开始执行任务!"}
log_q.put(json.dumps(launch_msg))
except Exception as e:
err_msg = {"ts": datetime.now().strftime("%H:%M:%S"), "level": "WARN", "msg": f"⚠️ 定时解析异常,立即执行: {e}"}
log_q.put(json.dumps(err_msg))
_stream_run(task_id, entry, account, password, task["run_count"]) # 非定时任务直接启动
if not task["scheduled_at"] and task["schedule_type"] == "once":
threading.Thread(target=run_task_process, args=(task,), daemon=True).start()
t = threading.Thread(target=_run_task, daemon=True)
t.start()
return jsonify(task), 201 return jsonify(task), 201
@app.route("/api/tasks/<tid>", methods=["DELETE"])
def delete_task(tid):
_cleanup_task_assets(tid)
tasks_db.pop(tid, None)
reports_db.pop(tid, None)
_save_db()
return jsonify({"success": True})
@app.route("/api/tasks/<task_id>") @app.route("/api/tasks/<tid>/stop", methods=["POST"])
def get_task(task_id): def stop_task(tid):
task = tasks_db.get(task_id) pid = process_pids.get(tid)
if not task: task = tasks_db.get(tid)
return jsonify({"error": "Not Found"}), 404 import signal
return jsonify(task)
if task:
# 如果是连续任务被手动停止,直接降级为单次任务,这样调度器就不会再重启它
if task.get("schedule_type") == "continuous":
task["schedule_type"] = "once"
print(f"🛑 任务 {tid} 已被手动停止,调度模式已设为 'once'")
task["status"] = "fail"
task["finished_at"] = datetime.now().isoformat()
if pid:
try:
# 杀死整个进程组 (包括 Playwright 的浏览器子进程)
os.killpg(os.getpgid(pid), signal.SIGKILL)
print(f"✅ PID {pid} 及其进程组已彻底清除。")
except Exception as e:
print(f"⚠️ 无法杀死进程组: {e},尝试杀死单个进程...")
try: os.kill(pid, signal.SIGKILL)
except: pass
process_pids.pop(tid, None)
_save_db()
return jsonify({"success": True})
@app.route("/api/tasks/<task_id>/logs") @app.route("/api/tasks/<task_id>/logs")
def stream_logs(task_id): def stream_logs(task_id):
"""Server-Sent Events 实时日志流"""
q = log_queues.get(task_id) q = log_queues.get(task_id)
if not q: if not q: return jsonify({"error": "No stream"}), 404
return jsonify({"error": "No log stream"}), 404
def event_stream(): def event_stream():
while True: while True:
try: try:
msg = q.get(timeout=30) msg = q.get(timeout=30)
yield f"data: {msg}\n\n" yield f"data: {msg}\n\n"
data = json.loads(msg) if json.loads(msg).get("level") == "DONE": break
if data.get("level") == "DONE":
break
except queue.Empty: except queue.Empty:
yield f"data: {json.dumps({'level':'PING','msg':''})}\n\n" yield f"data: {json.dumps({'level':'PING','msg':''})}\n\n"
return Response(event_stream(), content_type="text/event-stream", return Response(event_stream(), content_type="text/event-stream")
headers={"Cache-Control": "no-cache", "X-Accel-Buffering": "no"})
@app.route("/api/dashboard/stats")
def get_stats():
try:
reports = list(reports_db.values())
tasks = list(tasks_db.values())
# 1. 基础汇总
total = len(reports)
passed = sum(1 for r in reports if r.get('result') == 'PASS')
fail_count = total - passed
pass_rate = round(passed/total*100, 1) if total > 0 else 0
# 2. 趋势分析 (过去7天)
trends = {}
today = datetime.now()
for i in range(6, -1, -1):
day = (today - timedelta(days=i)).strftime("%m-%d")
trends[day] = {"pass": 0, "fail": 0}
for r in reports:
f_at = r.get("finished_at")
if not f_at: continue
try:
dt = datetime.fromisoformat(f_at).strftime("%m-%d")
if dt in trends:
if r.get("result") == "PASS": trends[dt]["pass"] += 1
else: trends[dt]["fail"] += 1
except: continue
# 3. 核心健康度 (增加 FAT 支持)
health = {}
for p_key, p_val in PRODUCTS.items():
p_reports = [r for r in reports if r.get("product") == p_key]
p_tasks = {t.get("id"): t for t in tasks if t.get("product") == p_key}
env_stats = {"PROD": [], "FAT": [], "UAT": [], "TEST": []}
for r in p_reports:
t = p_tasks.get(r.get("task_id"))
if t:
env = t.get("env", "PROD").upper()
if env == "TEST": env = "FAT" # 兼容旧映射
if env in env_stats: env_stats[env].append(r)
env_rates = {}
for env, env_rs in env_stats.items():
if not env_rs: env_rates[env] = 0
else:
p_count = sum(1 for r in env_rs if r.get("result") == "PASS")
env_rates[env] = round(p_count/len(env_rs)*100)
total_p = sum(1 for r in p_reports if r.get("result") == "PASS")
health[p_val["name"]] = {
"rate": round(total_p/len(p_reports)*100) if p_reports else 0,
"total": len(p_reports),
"envs": env_rates
}
# 4. 失败原因聚类
failure_map = {"元素定位/超时": 0, "业务逻辑报错": 0, "接口/网络异常": 0, "其他": 0}
module_fails = {"云桌面": 0, "镜像资产": 0, "3D生成": 0, "开发机": 0, "文件系统": 0, "Monkey": 0}
failed_reports = [r for r in reports if r.get("result") == "FAIL"][-20:]
for r in failed_reports:
tid = r.get("task_id")
if not tid: continue
log_file = os.path.join(REPORTS_DIR, f"{tid}.json")
if os.path.exists(log_file):
try:
with open(log_file, 'r') as f:
content = f.read()
if "Timeout" in content or "not found" in content or "Waiting" in content:
failure_map["元素定位/超时"] += 1
elif "Exception" in content or "" in content:
failure_map["业务逻辑报错"] += 1
else:
failure_map["其他"] += 1
if "CloudDesktop" in content: module_fails["云桌面"] += 1
if "Mirror" in content: module_fails["镜像资产"] += 1
if "3D" in content: module_fails["3D生成"] += 1
if "DevMachine" in content: module_fails["开发机"] += 1
if "File" in content: module_fails["文件系统"] += 1
if "monkey_testing" in content: module_fails["Monkey"] += 1
except: pass
# 5. 失败任务明细
f_tasks = []
sorted_fails = sorted(failed_reports, key=lambda x: x.get("finished_at", ""), reverse=True)[:10]
for r in sorted_fails:
f_at = r.get("finished_at", "T00:00")
time_str = f_at.split("T")[1][:5] if "T" in f_at else "00:00"
f_tasks.append({
"id": r.get("task_id"),
"name": r.get("task_name", "未知任务"),
"product": PRODUCTS.get(r.get("product"), {}).get("name", r.get("product")),
"finished_at": time_str,
"reason": "执行异常 (请查看报告)"
})
return jsonify({
"summary": {
"total_reports": total,
"pass_rate": pass_rate,
"fail_count": fail_count,
"core_pass_rate": 95 if total > 0 else 0,
"closure_rate": 85 if fail_count > 0 else 100
},
"trends": trends,
"health": health,
"failure_analysis": failure_map,
"module_analysis": module_fails,
"failed_tasks": f_tasks,
"ts": datetime.now().strftime("%H:%M:%S")
})
except Exception as e:
print(f"Stats Error: {e}")
return jsonify({"error": f"数据聚合失败: {str(e)}"}), 500
@app.route("/api/reports") @app.route("/api/reports")
def list_reports(): def list_reports():
return jsonify(list(reports_db.values())) return jsonify(list(reports_db.values()))
@app.route("/api/reports/<tid>")
def get_report(tid):
r = reports_db.get(tid)
if not r: return jsonify({"error": "Not Found"}), 404
@app.route("/api/reports/<task_id>") res = r.copy()
def get_report(task_id): res["results"] = []
report = reports_db.get(task_id) res["logs"] = []
if not report: res["screenshots"] = []
return jsonify({"error": "Not Found"}), 404
full_report = report.copy() # 1. 加载主日志
log_file = os.path.join(REPORTS_DIR, f"{task_id}.json") log_path = os.path.join(REPORTS_DIR, f"{tid}.json")
if os.path.exists(log_file): logs = []
if os.path.exists(log_path):
try: try:
with open(log_file, 'r', encoding='utf-8') as f: with open(log_path, 'r', encoding='utf-8') as f:
log_data = json.load(f) logs = json.load(f).get("logs", [])
full_report["logs"] = log_data.get("logs", []) res["logs"] = logs
except:
full_report["logs"] = []
# 扫描属于该任务的截图 (以 task_id 开头)
try:
shots = [f for f in os.listdir(SCREENSHOTS_DIR) if f.startswith(task_id)]
full_report["screenshots"] = sorted(shots)
except:
full_report["screenshots"] = []
return jsonify(full_report)
# ── 平台治理与数据聚合 路由 ──
@app.route("/api/tasks/<task_id>", methods=["DELETE"])
def delete_task(task_id):
"""原子化删除任务、报告与日志文件"""
try:
tasks_db.pop(task_id, None)
reports_db.pop(task_id, None)
# 清理日志
log_path = os.path.join(REPORTS_DIR, f"{task_id}.json")
if os.path.exists(log_path): os.remove(log_path)
# 清理截图
try:
for f in os.listdir(SCREENSHOTS_DIR):
if f.startswith(task_id): os.remove(os.path.join(SCREENSHOTS_DIR, f))
except: pass except: pass
_save_db() # 2. 尝试寻找结构化测试用例结果
return jsonify({"success": True}), 200 results_path = os.path.join(REPORTS_DIR, f"{tid}_results.json")
except Exception as e: if os.path.exists(results_path):
print(f"❌ 任务删除异常: {e}")
return jsonify({"error": str(e)}), 500
@app.route("/api/tasks/<task_id>/stop", methods=["POST"])
def stop_task(task_id):
"""强杀测试进程"""
try: try:
task = tasks_db.get(task_id) with open(results_path, 'r', encoding='utf-8') as f:
if not task or task["status"] != "running": res["results"] = json.load(f)
return jsonify({"error": "Task not running"}), 400 except: pass
pid = task.get("pid") # 3. 如果没有结构化结果,从日志中解析 DataManagementRunner 的场景汇总日志
if pid: if not res["results"] and logs:
# 场景别名表:日志关键词 → (描述, 预期, 模块名)
SCENARIO_PATTERNS = [
("文件系统场景", "file_system_lifecycle", "文件系统全生命周期", "文件夹创建、上传、重命名、删除全链路", "文件系统"),
("开发机场景", "dev_machine_lifecycle", "开发机全生命周期", "开发机申请、启动、关机、销毁全流程验证", "开发机"),
("云桌面场景", "cloud_desktop_lifecycle","云桌面全生命周期", "桌面创建、连接、保存镜像、关机、删除", "地瓜桌面"),
("镜像资产场景", "mirror_assets", "镜像资产巡检", "镜像列表加载及创建、使用验证", "镜像资产"),
("3D 链路", "3d_lifecycle", "3D生成与资产归档", "AIGC生成3D模型并顺利归档到资产中心", "3D生成"),
("量化工具场景", "quantization", "量化工具效能测试", "执行模型量化脚本并验证输出一致性", "量化工具"),
("Monkey", "monkey_testing", "Monkey稳定性压测", "50次随机点击/交互注入,验证系统抗崩溃能力","稳定性"),
]
# 计算总时长
if not res.get("duration") and res.get("started_at") and res.get("finished_at"):
try: try:
import signal t0 = datetime.fromisoformat(res["started_at"])
os.kill(pid, signal.SIGTERM) t1 = datetime.fromisoformat(res["finished_at"])
task["status"] = "fail" res["duration"] = f"{(t1-t0).total_seconds():.1f}s"
_save_db() except: pass
return jsonify({"success": True}), 200
except:
return jsonify({"error": "Failed to kill process"}), 500
return jsonify({"error": "No PID found"}), 400
except Exception as e:
return jsonify({"error": str(e)}), 500
# 建立时间戳索引,便于计算耗时
# 提取每条日志的 HH:MM:SS 格式时间(日志消息中内嵌)
import re
ts_re = re.compile(r'\d{2}:\d{2}:\d{2}')
@app.route("/api/dashboard/stats") scenario_results = []
def get_stats(): all_msgs = [(l.get("ts",""), l.get("level",""), l.get("msg","")) for l in logs]
"""看板聚合数据 API"""
# 先把每个场景的开始时间记录下来(通过识别场景开启消息)
scene_start_ts = {}
for ts, level, msg in all_msgs:
# 场景开始标志
if "DataManagementRunner" in msg or "DataManagement" in msg:
continue # 跳过 Runner 汇总消息本身
for kw, key, *_ in SCENARIO_PATTERNS:
m = ts_re.search(msg)
if m and ("开启" in msg or "--- 开始" in msg or "--- 开启" in msg) and kw.replace(" ","") in msg.replace(" ",""):
if key not in scene_start_ts:
scene_start_ts[key] = m.group()
# 从 DataManagementRunner 的汇总消息中解析结果
result_map = {} # key → {status, end_ts}
for ts, level, msg in all_msgs:
if "DataManagementRunner" not in msg:
continue
for kw, key, desc, expected, module in SCENARIO_PATTERNS:
if kw not in msg:
continue
if "通过" in msg and "" in msg:
# 提取消息内嵌时间戳
m = ts_re.search(msg)
end = m.group() if m else ts
result_map[key] = {"status": "PASS", "end_ts": end, "desc": desc,
"expected": expected, "module": module, "name": key}
elif ("失败" in msg or "" in msg) and level in ("ERROR", "INFO"):
m = ts_re.search(msg)
end = m.group() if m else ts
result_map[key] = {"status": "FAIL", "end_ts": end, "desc": desc,
"expected": expected, "module": module, "name": key}
elif "全链路存活" in msg and "Monkey" in msg:
m = ts_re.search(msg)
end = m.group() if m else ts
result_map[key] = {"status": "PASS", "end_ts": end, "desc": desc,
"expected": expected, "module": module, "name": key}
def ts_to_sec(t):
try: try:
reports = list(reports_db.values()) h, mi, s = t.split(":")
total = len(reports) return int(h)*3600 + int(mi)*60 + float(s)
passed = sum(1 for r in reports if r.get('result') == 'PASS') except: return 0
prod_breakdown = {} for key, info in result_map.items():
for r in reports: duration = ""
p = r.get("product", "unknown") start = scene_start_ts.get(key)
if p not in prod_breakdown: prod_breakdown[p] = {"pass":0, "fail":0} end = info.get("end_ts")
if r.get('result') == 'PASS': prod_breakdown[p]["pass"] += 1 if start and end:
else: prod_breakdown[p]["fail"] += 1 secs = ts_to_sec(end) - ts_to_sec(start)
if secs < 0: secs += 86400
return jsonify({ duration = f"{secs:.1f}s"
"total_reports": total, scenario_results.append({
"pass_rate": round(passed/total*100, 1) if total > 0 else 0, "name": info["name"],
"fail_count": total - passed, "desc": info["desc"],
"products": prod_breakdown, "expected": info["expected"],
"ts": datetime.now().strftime("%H:%M:%S") "module": info["module"],
}) "status": info["status"],
except Exception as e: "duration": duration
print(f"❌ 看板统计异常: {e}")
# 返回空数据而不是报错,防止前端彻底崩溃
return jsonify({
"total_reports": 0, "pass_rate": 0, "fail_count": 0,
"products": {}, "ts": datetime.now().strftime("%H:%M:%S")
}) })
res["results"] = scenario_results
# 4. 收集该报告下的所有截图
if os.path.exists(SCREENSHOTS_DIR):
try:
res["screenshots"] = [s for s in os.listdir(SCREENSHOTS_DIR) if s.startswith(tid)]
except: pass
return jsonify(res)
# ── 静态资源路由 ──
@app.route("/artifacts/screenshots/<path:filename>") @app.route("/artifacts/screenshots/<path:filename>")
def serve_screenshot(filename): def serve_screenshot(filename):
"""提供截图访问能力"""
return send_from_directory(SCREENSHOTS_DIR, filename) return send_from_directory(SCREENSHOTS_DIR, filename)
@app.route("/") @app.route("/")
@app.route("/<path:path>") @app.route("/<path:path>")
def serve_index(path=""): def serve_index(path=""):
return send_from_directory("platform", "index.html") return send_from_directory("platform", "index.html")
# ── 调度器 ───────────────────────────────────────────────────────────────────
class Scheduler:
def _is_in_window(self, now, window_str):
if not window_str or window_str == "all": return True
current_time = now.strftime("%H:%M")
try:
for part in window_str.split(","):
if "-" not in part: continue
start, end = part.strip().split("-")
if start <= current_time <= end: return True
except:
pass
return False
def start(self):
threading.Thread(target=self._loop, daemon=True).start()
def _loop(self):
while True:
try:
now = datetime.now()
for tid, task in list(tasks_db.items()):
if task["status"] == "running": continue
stype = task.get("schedule_type", "once")
last_run = task.get("last_scheduled_run")
should_run = False
if stype == "once" and task.get("scheduled_at") and task["status"] == "pending":
if now >= datetime.fromisoformat(task["scheduled_at"]): should_run = True
elif stype == "continuous":
# 只有在设置的时间段内才触发
window = task.get("schedule_window", "00:00-23:59")
if self._is_in_window(now, window):
# 确保上一轮执行完后有 60s 冷却期
if last_run:
delta = (now - datetime.fromisoformat(last_run)).total_seconds()
if delta < 60: continue
should_run = True
elif stype != "once":
if not last_run: should_run = True
else:
delta = (now - datetime.fromisoformat(last_run)).total_seconds()
if (stype == "hourly" and delta >= 3600) or \
(stype == "daily" and delta >= 86400) or \
(stype == "weekly" and delta >= 86400 * 7): should_run = True
if should_run:
task["last_scheduled_run"] = now.isoformat()
# 仅单次任务标记为 status=running 防止重复创建线程,
# 周期/连续任务的线程内 run_task_process 会处理 status
if stype == "once": task["status"] = "running"
print(f"⏰ 触发任务: {task['name']} (类型: {stype})")
threading.Thread(target=run_task_process, args=(task,), daemon=True).start()
except Exception as e: print(f"❌ 调度器报错: {e}")
time.sleep(30) # 缩短检查间隔,让连续运行响应更快
if __name__ == "__main__": if __name__ == "__main__":
print("🚀 自动化平台 (架构升级版) 启动中... http://127.0.0.1:5001") print("🚀 AutoFlow 启动中... http://127.0.0.1:5001")
app.run(host="127.0.0.1", port=5001, debug=False, threaded=True) Scheduler().start()
app.run(host="0.0.0.0", port=5001)

File diff suppressed because one or more lines are too long

View File

@ -5,8 +5,18 @@ from framework.config.settings import Config
def main(): def main():
"""全业务流程 UI 测试统一入口 (PO模式)""" """全业务流程 UI 测试统一入口 (PO模式)"""
account = getattr(Config, 'AUTH_ACCOUNT', None) or input("请输入账号:") # 自动化环境下不使用 input防止进程挂起
password = getattr(Config, 'AUTH_PASSWORD', None) or input("请输入密码:") import os
is_auto = os.getenv('ROBOGO_TASK_ID') is not None
account = getattr(Config, 'AUTH_ACCOUNT', None)
if not account and not is_auto: account = input("请输入账号:")
password = getattr(Config, 'AUTH_PASSWORD', None)
if not password and not is_auto: password = input("请输入密码:")
if not account or not password:
print("❌ 错误: 未提供登录账号或密码")
sys.exit(1)
dm = DataManagement(headless=False) dm = DataManagement(headless=False)
try: try: