增加log 文件的输出,汇总cell sn和问题项的分布

This commit is contained in:
2026-02-05 17:44:57 +08:00
parent 46ae47274d
commit e41bb71da8

View File

@@ -11,22 +11,148 @@ import multiprocessing as mp
from concurrent.futures import ProcessPoolExecutor
import threading
from collections import defaultdict
import logging
init(autoreset=True)
class LogManager:
"""日志管理器"""
def __init__(self, output_dir):
self.output_dir = output_dir
self.log_file = os.path.join(output_dir, "processing_log.txt")
self.setup_logging()
def setup_logging(self):
"""设置日志记录"""
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s',
handlers=[
logging.FileHandler(self.log_file, encoding='utf-8'),
logging.StreamHandler(sys.stdout)
]
)
self.logger = logging.getLogger(__name__)
def log_info(self, message):
"""记录信息日志"""
self.logger.info(message)
def log_warning(self, message):
"""记录警告日志"""
self.logger.warning(message)
def log_error(self, message):
"""记录错误日志"""
self.logger.error(message)
def log_statistics(self, statistics_data):
"""记录统计信息到日志文件"""
with open(self.log_file, 'a', encoding='utf-8') as f:
f.write("\n" + "=" * 80 + "\n")
f.write("处理统计汇总\n")
f.write("=" * 80 + "\n")
# 总体统计
f.write("\n=== 总体统计 ===\n")
f.write(f"处理时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
f.write(f"总SN数量: {statistics_data.get('total_sn_count', 0)}\n")
f.write(f"总处理文件数: {statistics_data.get('total_files_processed', 0)}\n")
f.write(f"成功处理文件数: {statistics_data.get('success_files', 0)}\n")
f.write(f"失败处理文件数: {statistics_data.get('failed_files', 0)}\n")
f.write(f"生成报告数: {statistics_data.get('generated_reports', 0)}\n")
f.write(f"失败报告数: {statistics_data.get('failed_reports', 0)}\n")
# Cell统计优化后的格式
if statistics_data.get('cell_statistics'):
f.write("\n=== Cell统计 ===\n")
cell_stats = statistics_data['cell_statistics']
f.write(f"涉及Cell总数: {len(cell_stats)}\n")
# 按Cell编号排序
sorted_cells = sorted(cell_stats.keys(), key=lambda x: int(x) if x.isdigit() else x)
for cell in sorted_cells:
stats = cell_stats[cell]
file_count = stats.get('file_count', 0)
sn_set = stats.get('sn_set', set())
fail_count = stats.get('fail_count', 0)
fail_details = stats.get('fail_details', {})
# SN列表逗号分隔
sn_list = ','.join(sorted(sn_set))
# 失败项详情
fail_items = []
if fail_details:
# 按失败次数排序取前5个
sorted_fail_items = sorted(fail_details.items(), key=lambda x: x[1], reverse=True)[:5]
for test_name, count in sorted_fail_items:
fail_items.append(f"{test_name}({count}次)")
# 写入Cell统计信息
f.write(f"Cell {cell}: 文件数={file_count}, SN{sn_list}, FAIL数={fail_count}\n")
# 如果有失败项,写入失败详情
if fail_items:
f.write(f" FAIL项: {', '.join(fail_items)}\n")
# SN统计
if statistics_data.get('sn_statistics'):
f.write("\n=== SN统计 ===\n")
sn_stats = statistics_data['sn_statistics']
for sn, stats in sn_stats.items():
f.write(f"SN {sn}: 文件数={stats.get('file_count', 0)}, "
f"Cell数={len(stats.get('cells', []))}, "
f"FAIL数={stats.get('fail_count', 0)}\n")
# 失败项统计
if statistics_data.get('failure_details'):
f.write("\n=== 失败项统计 ===\n")
failure_details = statistics_data['failure_details']
f.write(f"文件处理失败: {len(failure_details.get('file_failures', []))}\n")
f.write(f"报告生成失败: {len(failure_details.get('report_failures', []))}\n")
if failure_details.get('file_failures'):
f.write("\n文件处理失败详情:\n")
for failure in failure_details['file_failures']:
f.write(f" {failure}\n")
if failure_details.get('report_failures'):
f.write("\n报告生成失败详情:\n")
for failure in failure_details['report_failures']:
f.write(f" {failure}\n")
# 测试拔高数量统计
if statistics_data.get('test_elevation_stats'):
f.write("\n=== 测试拔高数量统计 ===\n")
elevation_stats = statistics_data['test_elevation_stats']
f.write(f"总测试拔高数量: {elevation_stats.get('total_elevations', 0)}\n")
f.write(f"平均每文件拔高数: {elevation_stats.get('avg_elevation_per_file', 0):.2f}\n")
if elevation_stats.get('elevation_by_cell'):
f.write("\n各Cell测试拔高数量:\n")
for cell, count in elevation_stats['elevation_by_cell'].items():
f.write(f" Cell {cell}: {count}\n")
f.write("\n" + "=" * 80 + "\n")
class ThreadSafeProgressTracker:
"""线程安全的多进程进度跟踪器"""
def __init__(self, total_files):
def __init__(self, total_files, log_manager):
self.lock = threading.Lock()
self.processed = 0
self.total = total_files
self.start_time = datetime.now()
self.success_count = 0
self.fail_count = 0
self.log_manager = log_manager
def update(self, success=True ,infor='', count=1):
def update(self, success=True, infor='', count=1, _display_progress = True):
"""线程安全地更新进度"""
with self.lock:
self.processed += count
@@ -37,9 +163,10 @@ class ThreadSafeProgressTracker:
# 每处理10个文件或进度有显著变化时更新显示
if self.processed % 10 == 0 or self.processed == self.total:
self._display_progress(infor)
if _display_progress == True:
self._display_progress(infor)
def _display_progress(self,infor=''):
def _display_progress(self, infor=''):
"""显示当前进度"""
time_used = datetime.now() - self.start_time
percent = self.processed / self.total * 100 if self.total > 0 else 0
@@ -64,8 +191,11 @@ class ThreadSafeProgressTracker:
def finish(self, phase_name="处理"):
"""完成进度跟踪"""
# self._display_progress()
print(f"\n{Fore.GREEN}{phase_name}完成! 总耗时: {(datetime.now() - self.start_time).total_seconds():.1f}")
self._display_progress()
completion_time = (datetime.now() - self.start_time).total_seconds()
self.log_manager.log_info(f"{phase_name}完成! 总耗时: {completion_time:.1f}")
self.log_manager.log_info(f"成功: {self.success_count}, 失败: {self.fail_count}")
print(f"\n{Fore.GREEN}{phase_name}完成! 总耗时: {completion_time:.1f}")
print(f"{Fore.CYAN}成功: {self.success_count}, 失败: {self.fail_count}")
def _format_timedelta(self, delta):
@@ -727,40 +857,198 @@ class ExcelReportWorker:
return df[cols]
class StatisticsCollector:
"""统计信息收集器"""
def __init__(self):
self.reset()
def reset(self):
"""重置统计信息"""
self.total_files_processed = 0
self.success_files = 0
self.failed_files = 0
self.generated_reports = 0
self.failed_reports = 0
self.total_sn_count = 0
# 详细统计
self.cell_statistics = defaultdict(lambda: {
'file_count': 0,
'sn_set': set(), # 改为set存储SN
'fail_count': 0,
'elevation_count': 0,
'fail_details': defaultdict(int) # 存储失败项详情test_name -> 失败次数
})
self.sn_statistics = defaultdict(lambda: {
'file_count': 0,
'cells': set(),
'fail_count': 0,
'elevation_count': 0
})
self.failure_details = {
'file_failures': [],
'report_failures': []
}
self.test_elevation_stats = {
'total_elevations': 0,
'elevation_by_cell': defaultdict(int),
'avg_elevation_per_file': 0
}
def add_file_processing_result(self, result):
"""添加文件处理结果统计"""
self.total_files_processed += 1
if result["success"]:
self.success_files += 1
sn = result.get("sn", "UNKNOWN_SN")
cell = result.get("cell", "UNKNOWN_CELL")
fail_count = result.get("fail_count", 0)
rows = result.get("rows", [])
# 更新SN统计
self.sn_statistics[sn]['file_count'] += 1
self.sn_statistics[sn]['cells'].add(cell)
self.sn_statistics[sn]['fail_count'] += fail_count
# 更新Cell统计
self.cell_statistics[cell]['file_count'] += 1
self.cell_statistics[cell]['sn_set'].add(sn)
self.cell_statistics[cell]['fail_count'] += fail_count
# 收集失败项详情
self._collect_fail_details(cell, rows, result.get("headers", []))
else:
self.failed_files += 1
self.failure_details['file_failures'].append(
f"{result.get('file', '未知文件')}: {result.get('error', '未知错误')}"
)
def _collect_fail_details(self, cell, rows, headers):
"""收集失败项详情"""
try:
# 找到状态列和测试名称列的索引
status_idx = -1
test_name_idx = -1
test_name_new_idx = -1
for i, header in enumerate(headers):
header_lower = str(header).lower()
if 'status' in header_lower or 'result' in header_lower:
status_idx = i
elif 'test name new' in header_lower:
test_name_new_idx = i
elif 'test name' in header_lower:
test_name_idx = i
# 优先使用Test Name New其次使用Test Name
test_name_col_idx = test_name_new_idx if test_name_new_idx != -1 else test_name_idx
if status_idx == -1 or test_name_col_idx == -1:
return
for row in rows:
if len(row) > max(status_idx, test_name_col_idx):
status_val = str(row[status_idx]).strip().upper()
test_name = str(row[test_name_col_idx]).strip()
if 'FAIL' in status_val and test_name:
self.cell_statistics[cell]['fail_details'][test_name] += 1
except Exception as e:
# 如果收集失败项详情出错,忽略继续处理
pass
def add_report_generation_result(self, result):
"""添加报告生成结果统计"""
if result["success"]:
self.generated_reports += 1
else:
self.failed_reports += 1
sn = result.get("sn", "UNKNOWN_SN")
cell = result.get("cell", "UNKNOWN_CELL")
error = result.get("error", "未知错误")
self.failure_details['report_failures'].append(
f"SN {sn} (Cell {cell}): {error}"
)
def add_test_elevation_data(self, cell, elevation_count):
"""添加测试拔高数量统计"""
if elevation_count > 0:
self.test_elevation_stats['total_elevations'] += elevation_count
self.test_elevation_stats['elevation_by_cell'][cell] += elevation_count
def finalize_statistics(self):
"""完成统计计算"""
self.total_sn_count = len(self.sn_statistics)
# 计算平均测试拔高数量
if self.success_files > 0:
self.test_elevation_stats['avg_elevation_per_file'] = (
self.test_elevation_stats['total_elevations'] / self.success_files
)
return {
'total_sn_count': self.total_sn_count,
'total_files_processed': self.total_files_processed,
'success_files': self.success_files,
'failed_files': self.failed_files,
'generated_reports': self.generated_reports,
'failed_reports': self.failed_reports,
'cell_statistics': dict(self.cell_statistics),
'sn_statistics': {sn: {
'file_count': stats['file_count'],
'cells': list(stats['cells']),
'fail_count': stats['fail_count']
} for sn, stats in self.sn_statistics.items()},
'failure_details': self.failure_details,
'test_elevation_stats': self.test_elevation_stats
}
class ParallelHTMLReportProcessor:
"""并行HTML报告处理器"""
def __init__(self):
def __init__(self, log_manager, statistics_collector):
self.sn_data_map = {}
self.sn_source_files = defaultdict(set)
self.sn_fail_counts = defaultdict(int)
self.sn_file_counts = defaultdict(int)
self.sn_cell_info = defaultdict(set) # 存储每个SN对应的cell编号
self.sn_test_cycle_times = defaultdict(set) # 存储每个SN对应的测试周期时间
self.log_manager = log_manager
self.statistics_collector = statistics_collector
def process_files(self, source_dir, max_workers=None):
"""并行处理目录中的所有文件"""
all_files = self._scan_files(source_dir)
if not all_files:
self.log_manager.log_warning("未找到HTML文件")
print(f"{Fore.YELLOW}⚠ 未找到HTML文件")
return self.sn_data_map
# 预扫描SN分布
self.log_manager.log_info(f"开始扫描文件分布,共{len(all_files)}个文件")
print(f"{Fore.YELLOW}⌛ 正在扫描文件分布...")
self._collect_sn_distribution(all_files)
# 显示文件分布
self._display_file_distribution()
# self._display_file_distribution()
# 设置工作进程数
if max_workers is None:
max_workers = min(mp.cpu_count(), len(all_files))
self.log_manager.log_info(f"开始并行处理 {len(all_files)} 个文件 (使用 {max_workers} 个进程)")
print(f"{Fore.CYAN}▶ 开始并行处理 {len(all_files)} 个文件 (使用 {max_workers} 个进程)")
# 创建进度跟踪器
progress_tracker = ThreadSafeProgressTracker(len(all_files))
progress_tracker = ThreadSafeProgressTracker(len(all_files), self.log_manager)
# 使用进程池并行处理
with ProcessPoolExecutor(max_workers=max_workers) as executor:
@@ -769,20 +1057,25 @@ class ParallelHTMLReportProcessor:
for file_path in all_files}
# 处理结果
infor =''
infor = ''
for future in future_to_file:
result = future.result()
success = result["success"]
# 添加到统计收集器
self.statistics_collector.add_file_processing_result(result)
if success:
self._store_result_data(result)
# progress_tracker.update(success)
if not success:
# print(f"\n{Fore.RED}⚠ 处理失败: {result['file']} - {result['error']}")
infor = infor + f"{Fore.RED}⚠ 处理失败: {result['file']} - {result['error']}|"
progress_tracker.update(success,infor)
error_msg = f"处理失败: {result['file']} - {result['error']}"
self.log_manager.log_error(error_msg)
infor = infor + f"{Fore.RED}{error_msg}|"
progress_tracker.update(success, infor)
progress_tracker.finish(phase_name="HTML文件处理")
self._add_report_statistics()
@@ -840,6 +1133,7 @@ class ParallelHTMLReportProcessor:
if len(self.sn_file_counts) > 10:
dist_info.append(f"... 还有 {len(self.sn_file_counts) - 10} 个SN")
self.log_manager.log_info(f"SN文件分布: {chr(10).join(dist_info)}")
print(f"{Fore.MAGENTA}⚫ SN文件分布:\n{Fore.CYAN}{chr(10).join(dist_info)}")
def _store_result_data(self, result):
@@ -848,6 +1142,7 @@ class ParallelHTMLReportProcessor:
cell = result.get("cell", "UNKNOWN_CELL")
test_cycle_time = result.get("test_cycle_time", "UNKNOWN_TIME")
filename = result["filename"]
fail_count = result.get("fail_count", 0)
# 记录文件来源和cell信息
self.sn_source_files[sn].add(filename)
@@ -861,7 +1156,10 @@ class ParallelHTMLReportProcessor:
self.sn_data_map[sn]['data'].extend(result["rows"])
# 累加FAIL数量
self.sn_fail_counts[sn] += result["fail_count"]
self.sn_fail_counts[sn] += fail_count
# 添加测试拔高统计这里使用fail_count作为拔高数量示例您可以根据实际需求调整
self.statistics_collector.add_test_elevation_data(cell, fail_count)
def _add_report_statistics(self):
"""添加报告统计信息包含cell和测试周期时间信息"""
@@ -893,14 +1191,17 @@ class ParallelHTMLReportProcessor:
class ParallelExcelReportGenerator:
"""并行Excel报告生成器"""
def __init__(self, output_dir):
def __init__(self, output_dir, log_manager, statistics_collector):
self.output_dir = output_dir
self.log_manager = log_manager
self.statistics_collector = statistics_collector
def generate_reports(self, sn_data_map, max_workers=None):
"""并行生成Excel报告"""
total_reports = len(sn_data_map)
if total_reports == 0:
self.log_manager.log_warning("没有数据可生成报告")
print(f"{Fore.YELLOW}⚠ 没有数据可生成报告")
return [], []
@@ -908,10 +1209,11 @@ class ParallelExcelReportGenerator:
if max_workers is None:
max_workers = min(mp.cpu_count(), total_reports)
self.log_manager.log_info(f"开始并行生成Excel报告 (共{total_reports}个,使用 {max_workers} 个进程)")
print(f"{Fore.CYAN}▶ 开始并行生成Excel报告 (共{total_reports}个,使用 {max_workers} 个进程)")
# 创建进度跟踪器
progress_tracker = ThreadSafeProgressTracker(total_reports)
progress_tracker = ThreadSafeProgressTracker(total_reports, self.log_manager)
# 准备报告数据包含cell和测试周期时间信息
report_tasks = []
@@ -947,11 +1249,12 @@ class ParallelExcelReportGenerator:
for future in future_to_report:
result = future.result()
# 添加到统计收集器
self.statistics_collector.add_report_generation_result(result)
if result["success"]:
success_reports.append(result)
progress_tracker.update(success=True, count=1)
# 安全显示成功信息包含cell和测试周期时间信息
progress_tracker.update(success=True, count=1,_display_progress = False)
self._show_success_info(result)
else:
failed_reports.append(result)
@@ -997,11 +1300,14 @@ class ParallelExcelReportGenerator:
f"来源文件: {result.get('source_files_count', 0):>2}",
f"FAIL数量: {result.get('fail_count', 0):>3}"
]
print('\x1b[2K\r' + ' | '.join(success_info).ljust(120))
info_msg = ' | '.join(success_info)
self.log_manager.log_info(info_msg)
# print('\x1b[2K\r' + info_msg.ljust(120))
except Exception as e:
# 如果显示信息时出错,使用简化显示
print(
f"{Fore.GREEN}✓ 报告生成成功 (SN: {result.get('sn', 'UNKNOWN_SN')}, Cell: {result.get('cell', 'UNKNOWN_CELL')}, Time: {result.get('test_cycle_time', 'UNKNOWN_TIME')})")
msg = f"✓ 报告生成成功 (SN: {result.get('sn', 'UNKNOWN_SN')}, Cell: {result.get('cell', 'UNKNOWN_CELL')}, Time: {result.get('test_cycle_time', 'UNKNOWN_TIME')})"
self.log_manager.log_info(msg)
print(f"{Fore.GREEN}{msg}")
def _show_error_info(self, result):
"""安全显示单个失败报告信息包含cell和测试周期时间信息"""
@@ -1029,11 +1335,14 @@ class ParallelExcelReportGenerator:
f"时间: {time_display}",
f"错误: {error_msg}"
]
print('\x1b[2K\r' + ' | '.join(error_info).ljust(100))
info_msg = ' | '.join(error_info)
self.log_manager.log_error(info_msg)
print('\x1b[2K\r' + info_msg.ljust(100))
except Exception as e:
# 如果显示信息时出错,使用简化显示
print(
f"{Fore.RED}✗ 报告生成失败 (SN: {result.get('sn', 'UNKNOWN_SN')}, Cell: {result.get('cell', 'UNKNOWN_CELL')}, Time: {result.get('test_cycle_time', 'UNKNOWN_TIME')})")
msg = f"✗ 报告生成失败 (SN: {result.get('sn', 'UNKNOWN_SN')}, Cell: {result.get('cell', 'UNKNOWN_CELL')}, Time: {result.get('test_cycle_time', 'UNKNOWN_TIME')})"
self.log_manager.log_error(msg)
print(f"{Fore.RED}{msg}")
def _show_final_stats(self, success_reports, failed_reports):
"""显示最终统计信息包含cell和测试周期时间信息"""
@@ -1048,33 +1357,44 @@ class ParallelExcelReportGenerator:
unique_times = set(
report.get('test_cycle_time', 'UNKNOWN_TIME') for report in success_reports + failed_reports)
print(f"\n{Fore.CYAN}=== 最终统计 ===")
print(f"{Fore.GREEN}成功生成报告: {len(success_reports)}")
print(f"{Fore.RED}失败报告: {len(failed_reports)}")
print(f"{Fore.BLUE}总记录数: {total_records}")
print(f"{Fore.BLUE}总来源文件: {total_sources}")
print(f"{Fore.BLUE}总FAIL数量: {total_fails}")
print(f"{Fore.BLUE}涉及Cell数量: {len(unique_cells)}")
print(f"{Fore.BLUE}涉及测试周期时间数量: {len(unique_times)}")
print(f"{Fore.CYAN}输出目录: {self.output_dir}")
stats_msg = (
f"\n=== 最终统计 ===\n"
f"成功生成报告: {len(success_reports)}\n"
f"失败报告: {len(failed_reports)}\n"
f"总记录数: {total_records}\n"
f"总来源文件: {total_sources}\n"
f"总FAIL数量: {total_fails}\n"
f"涉及Cell数量: {len(unique_cells)}\n"
f"涉及测试周期时间数量: {len(unique_times)}\n"
f"输出目录: {self.output_dir}"
)
self.log_manager.log_info(stats_msg)
print(f"\n{Fore.CYAN}{stats_msg}")
if failed_reports:
print(f"\n{Fore.YELLOW}失败报告详情:")
failure_details = "\n失败报告详情:\n"
for report in failed_reports:
sn = report.get('sn', 'UNKNOWN_SN')
cell = report.get('cell', 'UNKNOWN_CELL')
time = report.get('test_cycle_time', 'UNKNOWN_TIME')
error = report.get('error', '未知错误')
print(f" {sn} (Cell {cell}, Time {time}): {error}")
failure_details += f" {sn} (Cell {cell}, Time {time}): {error}\n"
self.log_manager.log_warning(failure_details)
print(f"\n{Fore.YELLOW}{failure_details}")
except Exception as e:
print(f"{Fore.RED}统计信息显示出错: {e}")
error_msg = f"统计信息显示出错: {e}"
self.log_manager.log_error(error_msg)
print(f"{Fore.RED}{error_msg}")
class ReportProcessor:
"""主报告处理器"""
def __init__(self):
pass
self.log_manager = None
self.statistics_collector = StatisticsCollector()
def process_reports(self, html_max_workers=None, excel_max_workers=None):
"""处理完整流程"""
@@ -1085,27 +1405,40 @@ class ReportProcessor:
output_dir = self._create_output_dir(source_dir)
# 初始化日志管理器
self.log_manager = LogManager(output_dir)
self.log_manager.log_info(f"开始处理报告,源目录: {source_dir}, 输出目录: {output_dir}")
try:
# 阶段1并行处理HTML文件
self.log_manager.log_info("=== 阶段1: HTML文件处理 ===")
print(f"\n{Fore.CYAN}=== 阶段1: HTML文件处理 ===")
html_processor = ParallelHTMLReportProcessor()
html_processor = ParallelHTMLReportProcessor(self.log_manager, self.statistics_collector)
processed_data = html_processor.process_files(source_dir, html_max_workers)
if not processed_data:
self.log_manager.log_warning("没有处理任何数据,程序结束")
print(f"{Fore.YELLOW}⚠ 没有处理任何数据,程序结束")
return
# 阶段2并行生成Excel报告
self.log_manager.log_info("=== 阶段2: Excel报告生成 ===")
print(f"\n{Fore.CYAN}=== 阶段2: Excel报告生成 ===")
excel_generator = ParallelExcelReportGenerator(output_dir)
excel_generator = ParallelExcelReportGenerator(output_dir, self.log_manager, self.statistics_collector)
success_reports, failed_reports = excel_generator.generate_reports(
processed_data, excel_max_workers)
# 记录最终统计信息到日志
statistics_data = self.statistics_collector.finalize_statistics()
self.log_manager.log_statistics(statistics_data)
# 安全显示总体结果
self._show_overall_result(len(processed_data), (success_reports), (failed_reports))
except Exception as e:
print(f"\n{Fore.RED}程序执行出错: {type(e).__name__}: {str(e)}")
error_msg = f"程序执行出错: {type(e).__name__}: {str(e)}"
self.log_manager.log_error(error_msg)
print(f"\n{Fore.RED}{error_msg}")
import traceback
traceback.print_exc()
@@ -1136,21 +1469,37 @@ class ReportProcessor:
"""创建输出目录"""
output_dir = os.path.join(source_dir, f"Html文件分析_带Cell编号_{datetime.now().strftime('%Y%m%d%H%M%S')}")
os.makedirs(output_dir, exist_ok=True)
# 只有在log_manager初始化后才能记录日志
if self.log_manager:
self.log_manager.log_info(f"输出目录创建成功: {output_dir}")
print(f"{Fore.GREEN}✔ 输出目录创建成功: {output_dir}")
return output_dir
def _show_overall_result(self, total_sn, success_reports, failed_reports):
"""安全显示总体结果包含cell和测试周期时间信息"""
print(f"\n{Fore.CYAN}=== 程序执行完成 ===")
print(f"{Fore.GREEN}✓ 处理完成!")
print(f"{Fore.BLUE}总SN数量: {total_sn}")
print(f"{Fore.GREEN}成功报告: {len(success_reports)}")
print(f"{Fore.RED}失败报告: {len(failed_reports)}")
result_msg = (
f"\n=== 程序执行完成 ===\n"
f"✓ 处理完成!\n"
f"总SN数量: {total_sn}\n"
f"成功报告: {len(success_reports)}\n"
f"失败报告: {len(failed_reports)}"
)
if self.log_manager:
self.log_manager.log_info(result_msg)
print(f"\n{Fore.CYAN}{result_msg}")
if len(failed_reports) == 0:
print(f"{Fore.GREEN}🎉 所有报告生成成功!")
completion_msg = "🎉 所有报告生成成功!"
self.log_manager.log_info(completion_msg)
print(f"{Fore.GREEN}{completion_msg}")
else:
print(f"{Fore.YELLOW}⚠ 有 {len(failed_reports)} 个报告生成失败,请查看上述错误信息")
warning_msg = f"⚠ 有 {len(failed_reports)} 个报告生成失败,请查看上述错误信息"
self.log_manager.log_warning(warning_msg)
print(f"{Fore.YELLOW}{warning_msg}")
if __name__ == "__main__":