Python脚本开发文件初始化
This commit is contained in:
475
dataProcess/dataProcessMerge_V1.py
Normal file
475
dataProcess/dataProcessMerge_V1.py
Normal file
@@ -0,0 +1,475 @@
|
||||
import os
|
||||
import pandas as pd
|
||||
from tkinter import filedialog, Tk
|
||||
import logging
|
||||
import datetime
|
||||
# --- 新增导入 ---
|
||||
from colorama import init, Fore, Style
|
||||
import sys
|
||||
|
||||
# 初始化 colorama,autoreset=True 使得每次打印后自动恢复默认颜色
|
||||
init(autoreset=True)
|
||||
|
||||
# --- 自定义日志格式化器 ---
|
||||
class ColoredFormatter(logging.Formatter):
|
||||
"""根据日志级别为控制台输出添加颜色"""
|
||||
|
||||
# 定义颜色
|
||||
COLORS = {
|
||||
'DEBUG': Fore.CYAN,
|
||||
'INFO': Fore.GREEN,
|
||||
'WARNING': Fore.YELLOW,
|
||||
'ERROR': Fore.RED,
|
||||
'CRITICAL': Fore.RED + Style.BRIGHT,
|
||||
}
|
||||
|
||||
def format(self, record):
|
||||
# 获取对应级别的颜色
|
||||
log_color = self.COLORS.get(record.levelname, '')
|
||||
# 应用颜色到整个记录
|
||||
record.levelname = f"{log_color}{record.levelname}{Style.RESET_ALL}"
|
||||
record.msg = f"{log_color}{record.msg}{Style.RESET_ALL}"
|
||||
# 使用父类的格式化方法
|
||||
return super().format(record)
|
||||
|
||||
# --- 配置日志 ---
|
||||
# 创建 logger 对象
|
||||
logger = logging.getLogger() # 获取根 logger
|
||||
logger.setLevel(logging.INFO)
|
||||
|
||||
# 移除默认的 handlers(如果有的话),避免重复输出
|
||||
if logger.handlers:
|
||||
logger.handlers.clear()
|
||||
|
||||
# 创建控制台 handler
|
||||
console_handler = logging.StreamHandler(sys.stdout) # 使用 sys.stdout 通常更好
|
||||
console_handler.setLevel(logging.INFO)
|
||||
|
||||
# 创建并设置 formatter
|
||||
formatter = ColoredFormatter('%(asctime)s - %(levelname)s - %(message)s')
|
||||
console_handler.setFormatter(formatter)
|
||||
|
||||
# 将 handler 添加到 logger
|
||||
logger.addHandler(console_handler)
|
||||
# --- 日志配置结束 ---
|
||||
|
||||
|
||||
class DataProcessor:
|
||||
def __init__(self):
|
||||
self.spec_file = None
|
||||
self.data_folder = None
|
||||
self.spec_data = None
|
||||
self.data_files = []
|
||||
self.merged_data = pd.DataFrame()
|
||||
|
||||
def select_spec_file(self):
|
||||
"""选择上限和下限规格要求文件"""
|
||||
root = Tk()
|
||||
root.withdraw()
|
||||
self.spec_file = filedialog.askopenfilename(
|
||||
title="选择上限和下限规格要求文件",
|
||||
filetypes=[("CSV files", "*.csv"), ("All files", "*.*")]
|
||||
)
|
||||
if not self.spec_file:
|
||||
logging.error("未选择规格文件")
|
||||
return False
|
||||
logging.info(f"已选择规格文件: {self.spec_file}")
|
||||
return True
|
||||
|
||||
def select_data_folder(self):
|
||||
"""选择实际数据文件所在的文件夹"""
|
||||
root = Tk()
|
||||
root.withdraw()
|
||||
self.data_folder = filedialog.askdirectory(title="选择实际数据文件所在的文件夹")
|
||||
if not self.data_folder:
|
||||
logging.error("未选择数据文件夹")
|
||||
return False
|
||||
logging.info(f"已选择数据文件夹: {self.data_folder}")
|
||||
return True
|
||||
|
||||
def clean_column_names(self, df):
|
||||
"""清理列名,去除前后空格和特殊字符"""
|
||||
df.columns = [col.strip() for col in df.columns]
|
||||
return df
|
||||
|
||||
def load_spec_data(self):
|
||||
"""加载规格数据,标题行为第3行"""
|
||||
try:
|
||||
# 读取CSV文件,跳过前2行,第3行作为标题
|
||||
self.spec_data = pd.read_csv(self.spec_file, header=2)
|
||||
|
||||
# 清理列名
|
||||
self.spec_data = self.clean_column_names(self.spec_data)
|
||||
|
||||
# 确保PAD ID列是字符串类型
|
||||
if 'PAD ID' in self.spec_data.columns:
|
||||
self.spec_data['PAD ID'] = self.spec_data['PAD ID'].astype(str).str.strip()
|
||||
|
||||
# 检查必要的列是否存在
|
||||
required_columns = ["PAD ID", "Component ID", "Vol_Min(%)", "Vol_Max(%)",
|
||||
"Height_Low(mil)", "Height_High(mil)", "Area_Min(%)", "Area_Max(%)"]
|
||||
|
||||
missing_columns = [col for col in required_columns if col not in self.spec_data.columns]
|
||||
if missing_columns:
|
||||
logging.warning(f"规格文件中缺少以下列: {missing_columns}")
|
||||
# 尝试查找相似的列名
|
||||
for missing_col in missing_columns:
|
||||
similar_cols = [col for col in self.spec_data.columns if missing_col.lower() in col.lower()]
|
||||
if similar_cols:
|
||||
logging.info(f"可能匹配的列: {similar_cols}")
|
||||
|
||||
# 特别检查 Component ID 是否存在
|
||||
if "Component ID" not in self.spec_data.columns:
|
||||
logging.warning("'Component ID' 列在规格文件中缺失,这可能导致输出文件中也缺少该列。")
|
||||
|
||||
logging.info(f"规格数据加载成功,共 {len(self.spec_data)} 行")
|
||||
logging.info(f"规格文件列名: {list(self.spec_data.columns)}")
|
||||
logging.info(
|
||||
f"规格文件PAD ID数据类型: {self.spec_data['PAD ID'].dtype if 'PAD ID' in self.spec_data.columns else 'N/A'}")
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"加载规格数据失败: {e}")
|
||||
return False
|
||||
return True
|
||||
|
||||
def scan_data_files(self):
|
||||
"""扫描数据文件夹中的CSV文件,并检查标题行是否包含有效字段"""
|
||||
try:
|
||||
# 定义有效的字段名称(去除前后空格)
|
||||
required_fields = [
|
||||
"PAD ID", "Component ID", "Height(mil)", "Volume(%)",
|
||||
"Area(%)", "Volume(mil3)", "Area(mil2)"
|
||||
]
|
||||
|
||||
# 可选:定义字段匹配的宽松程度
|
||||
field_match_threshold = 0.8 # 80%的字段匹配即认为有效
|
||||
|
||||
# 扫描CSV文件
|
||||
valid_files = []
|
||||
for file in os.listdir(self.data_folder):
|
||||
if file.endswith(".csv") and "F27140015X3K" in file:
|
||||
file_path = os.path.join(self.data_folder, file)
|
||||
|
||||
# 检查文件是否可读且包含有效字段
|
||||
if self._is_valid_csv_file(file_path, required_fields, field_match_threshold):
|
||||
valid_files.append(file_path)
|
||||
|
||||
self.data_files = valid_files
|
||||
logging.info(
|
||||
f"找到 {len(self.data_files)} 个有效数据文件: {[os.path.basename(f) for f in self.data_files]}")
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"扫描数据文件失败: {e}")
|
||||
return False
|
||||
|
||||
return True if self.data_files else False
|
||||
|
||||
def _is_valid_csv_file(self, file_path, required_fields, threshold=1.0):
|
||||
"""检查CSV文件是否包含必需的字段"""
|
||||
try:
|
||||
# 尝试不同的编码
|
||||
encodings = ['utf-8', 'gbk', 'latin-1']
|
||||
|
||||
for encoding in encodings:
|
||||
try:
|
||||
with open(file_path, 'r', encoding=encoding) as f:
|
||||
first_line = f.readline().strip()
|
||||
|
||||
# 解析CSV标题行
|
||||
headers = [header.strip() for header in first_line.split(',')]
|
||||
|
||||
# 计算匹配的字段数量
|
||||
matched_fields = 0
|
||||
missing_fields = []
|
||||
|
||||
for required_field in required_fields:
|
||||
if required_field in headers:
|
||||
matched_fields += 1
|
||||
else:
|
||||
missing_fields.append(required_field)
|
||||
|
||||
# 计算匹配比例
|
||||
match_ratio = matched_fields / len(required_fields)
|
||||
|
||||
if match_ratio >= threshold:
|
||||
if missing_fields:
|
||||
logging.warning(
|
||||
f"文件 {os.path.basename(file_path)} 部分字段缺失: {missing_fields},但满足阈值要求")
|
||||
else:
|
||||
logging.info(f"文件 {os.path.basename(file_path)} 所有字段完整")
|
||||
return True
|
||||
else:
|
||||
logging.warning(
|
||||
f"文件 {os.path.basename(file_path)} 字段匹配率不足: {match_ratio:.1%},缺失字段: {missing_fields}")
|
||||
return False
|
||||
|
||||
except UnicodeDecodeError:
|
||||
continue # 尝试下一个编码
|
||||
|
||||
logging.error(f"无法读取文件 {os.path.basename(file_path)},尝试了所有编码")
|
||||
return False
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"检查文件 {os.path.basename(file_path)} 时发生错误: {e}")
|
||||
return False
|
||||
|
||||
def load_and_clean_data_file(self, data_file):
|
||||
"""加载并清理数据文件"""
|
||||
try:
|
||||
# 读取数据文件,第一行作为标题
|
||||
# 处理可能的编码问题
|
||||
try:
|
||||
data_df = pd.read_csv(data_file, header=0, encoding='utf-8')
|
||||
except UnicodeDecodeError:
|
||||
try:
|
||||
data_df = pd.read_csv(data_file, header=0, encoding='gbk')
|
||||
except UnicodeDecodeError:
|
||||
data_df = pd.read_csv(data_file, header=0, encoding='latin-1')
|
||||
|
||||
# 清理列名
|
||||
data_df = self.clean_column_names(data_df)
|
||||
|
||||
logging.info(f"数据文件列名: {list(data_df.columns)}")
|
||||
|
||||
# --- 关键修改:创建副本以避免 SettingWithCopyWarning ---
|
||||
data_df = data_df.copy()
|
||||
|
||||
# 确保PAD ID列是字符串类型
|
||||
if 'PAD ID' in data_df.columns:
|
||||
data_df['PAD ID'] = data_df['PAD ID'].astype(str).str.strip()
|
||||
logging.info(f"数据文件PAD ID数据类型: {data_df['PAD ID'].dtype}")
|
||||
|
||||
# 检查必要的列是否存在
|
||||
required_columns = ["PAD ID", "Component ID", "Height(mil)", "Volume(%)", "Area(%)"]
|
||||
|
||||
# 处理可能的列名变体
|
||||
column_mapping = {}
|
||||
for required_col in required_columns:
|
||||
if required_col not in data_df.columns:
|
||||
# 查找相似的列名
|
||||
# 更宽松的匹配方式:忽略空格和大小写
|
||||
similar_cols = [col for col in data_df.columns if
|
||||
required_col.lower().replace(" ", "") in col.lower().replace(" ", "")]
|
||||
if similar_cols:
|
||||
column_mapping[required_col] = similar_cols[0]
|
||||
logging.info(f"映射列: {required_col} -> {similar_cols[0]}")
|
||||
|
||||
# 重命名列
|
||||
if column_mapping:
|
||||
data_df = data_df.rename(columns=column_mapping)
|
||||
|
||||
missing_columns = [col for col in required_columns if col not in data_df.columns]
|
||||
if missing_columns:
|
||||
logging.error(f"数据文件中缺少以下列: {missing_columns}")
|
||||
logging.info(f"数据文件所有列: {list(data_df.columns)}")
|
||||
return None
|
||||
|
||||
return data_df # 返回处理好的副本
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"加载数据文件失败: {e}")
|
||||
return None
|
||||
|
||||
def process_data(self):
|
||||
"""处理数据并合并"""
|
||||
all_data = []
|
||||
total_files = len(self.data_files)
|
||||
|
||||
if total_files == 0:
|
||||
logging.error("未找到任何数据文件")
|
||||
return False
|
||||
|
||||
for idx, data_file in enumerate(self.data_files, 1):
|
||||
logging.info(f"处理数据文件 {idx}/{total_files}: {os.path.basename(data_file)}")
|
||||
try:
|
||||
# 加载并清理数据文件
|
||||
data_df = self.load_and_clean_data_file(data_file)
|
||||
if data_df is None:
|
||||
logging.error(f"无法加载文件: {os.path.basename(data_file)}")
|
||||
continue
|
||||
|
||||
# 选择需要的字段
|
||||
required_columns = ["PAD ID", "Component ID", "Height(mil)", "Volume(%)", "Area(%)"]
|
||||
|
||||
# 检查数据文件中是否存在所有必需的列
|
||||
available_columns = [col for col in required_columns if col in data_df.columns]
|
||||
if len(available_columns) != len(required_columns):
|
||||
missing = set(required_columns) - set(available_columns)
|
||||
logging.warning(f"文件 {os.path.basename(data_file)} 缺少列: {missing}")
|
||||
logging.info(f"可用的列: {available_columns}")
|
||||
# --- 关键修改:使用可用的列继续处理 (再次创建副本) ---
|
||||
data_df = data_df[available_columns].copy()
|
||||
else:
|
||||
# --- 关键修改:选择所需的列 (创建副本) ---
|
||||
data_df = data_df[required_columns].copy()
|
||||
|
||||
# 添加数据来源字段
|
||||
data_df["数据来源"] = os.path.basename(data_file)
|
||||
data_df["限制来源"] = os.path.basename(self.spec_file)
|
||||
|
||||
# 调试信息:显示合并前的数据类型
|
||||
logging.info(
|
||||
f"合并前 - 数据文件PAD ID示例: {data_df['PAD ID'].head(3).tolist() if 'PAD ID' in data_df.columns else 'N/A'}")
|
||||
logging.info(
|
||||
f"合并前 - 规格文件PAD ID示例: {self.spec_data['PAD ID'].head(3).tolist() if 'PAD ID' in self.spec_data.columns else 'N/A'}")
|
||||
|
||||
# 从规格文件中选择需要的字段
|
||||
spec_columns = ["PAD ID", "Component ID", "Vol_Min(%)", "Vol_Max(%)",
|
||||
"Height_Low(mil)", "Height_High(mil)", "Area_Min(%)", "Area_Max(%)"]
|
||||
|
||||
# 只选择存在的列
|
||||
available_spec_columns = [col for col in spec_columns if col in self.spec_data.columns]
|
||||
# --- 关键修改:使用 .copy() 创建一个独立的副本,避免 SettingWithCopyWarning ---
|
||||
spec_df = self.spec_data[available_spec_columns].copy()
|
||||
|
||||
# 确保规格文件的PAD ID也是字符串类型
|
||||
if 'PAD ID' in spec_df.columns:
|
||||
spec_df['PAD ID'] = spec_df['PAD ID'].astype(str).str.strip()
|
||||
|
||||
# 合并规格数据
|
||||
merged_df = pd.merge(data_df, spec_df, on="PAD ID", how="inner", suffixes=('_data', '_spec'))
|
||||
|
||||
if merged_df.empty:
|
||||
logging.warning(f"文件 {os.path.basename(data_file)} 与规格数据无匹配项")
|
||||
# 显示一些调试信息
|
||||
data_pad_ids = set(data_df['PAD ID'].unique()) if 'PAD ID' in data_df.columns else set()
|
||||
spec_pad_ids = set(spec_df['PAD ID'].unique()) if 'PAD ID' in spec_df.columns else set()
|
||||
common_ids = data_pad_ids.intersection(spec_pad_ids)
|
||||
logging.info(
|
||||
f"数据文件PAD ID数量: {len(data_pad_ids)}, 规格文件PAD ID数量: {len(spec_pad_ids)}, 共同ID数量: {len(common_ids)}")
|
||||
logging.info(f"数据文件前5个PAD ID: {list(data_pad_ids)[:5] if data_pad_ids else 'N/A'}")
|
||||
logging.info(f"规格文件前5个PAD ID: {list(spec_pad_ids)[:5] if spec_pad_ids else 'N/A'}")
|
||||
continue
|
||||
|
||||
# --- 优化开始:确保 Component ID 来自数据文件 ---
|
||||
# 即使合并产生了两个 Component ID (_data 和 _spec),我们也明确使用来自 data_df 的那个
|
||||
if 'Component ID_data' in merged_df.columns:
|
||||
merged_df['Component ID'] = merged_df['Component ID_data']
|
||||
# 可选:删除来自规格文件的 Component ID 列
|
||||
# merged_df.drop(columns=['Component ID_spec'], inplace=True, errors='ignore')
|
||||
# 或者保留它以便对比,这里我们先注释掉删除操作
|
||||
|
||||
# 如果因为某种原因没有 _data 后缀(例如只有一个 Component ID),则默认就是 data_df 的
|
||||
# (这种情况在 merge 时不会发生,因为我们用了 suffixes)
|
||||
# --- 优化结束 ---
|
||||
|
||||
# --- 新增:对规格高度字段执行单位转换(除以 25.4) ---
|
||||
# 为避免意外字符导致转换失败,先清洗再转换为数值
|
||||
convert_cols = ["Height_Low(mil)", "Height_High(mil)"]
|
||||
for col in convert_cols:
|
||||
if col in merged_df.columns:
|
||||
before_non_null = merged_df[col].notna().sum()
|
||||
# 清洗非数字字符(保留数字、小数点和负号)
|
||||
cleaned = merged_df[col].astype(str).str.replace(r'[^\d\.\-]+', '', regex=True)
|
||||
merged_df[col] = pd.to_numeric(cleaned, errors='coerce') / 25.4
|
||||
after_non_null = merged_df[col].notna().sum()
|
||||
logging.info(
|
||||
f"字段 {col} 已除以 25.4 完成单位转换,非空值数: 转换前 {before_non_null} -> 转换后 {after_non_null}"
|
||||
)
|
||||
else:
|
||||
logging.warning(f"规格高度字段缺失,无法进行单位转换: {col}")
|
||||
|
||||
# 选择最终输出的字段(按照要求的顺序)
|
||||
output_columns = [
|
||||
"PAD ID", "Component ID", "Vol_Min(%)", "Vol_Max(%)", "Height_Low(mil)",
|
||||
"Height_High(mil)", "Area_Min(%)", "Area_Max(%)", "Height(mil)", "Volume(%)", "Area(%)",
|
||||
"数据来源", "限制来源"
|
||||
]
|
||||
|
||||
# --- 优化开始 ---
|
||||
# 只选择存在的列
|
||||
available_output_columns = [col for col in output_columns if col in merged_df.columns]
|
||||
|
||||
# 检查是否有列缺失并打印警告
|
||||
missing_output_columns = [col for col in output_columns if col not in merged_df.columns]
|
||||
if missing_output_columns:
|
||||
logging.warning(
|
||||
f"文件 {os.path.basename(data_file)} 的最终输出中缺少以下预期列: {missing_output_columns}")
|
||||
|
||||
# 如果没有任何可用列,则跳过此文件
|
||||
if not available_output_columns:
|
||||
logging.error(f"文件 {os.path.basename(data_file)} 没有任何预期的输出列,将跳过此文件。")
|
||||
continue
|
||||
|
||||
merged_df = merged_df[available_output_columns].copy() # 再次使用.copy()确保安全
|
||||
# --- 优化结束 ---
|
||||
|
||||
all_data.append(merged_df)
|
||||
logging.info(f"文件 {os.path.basename(data_file)} 处理成功,匹配 {len(merged_df)} 行")
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"处理文件 {os.path.basename(data_file)} 时出错: {e}")
|
||||
# 显示更多调试信息
|
||||
if 'data_df' in locals() and 'PAD ID' in data_df.columns:
|
||||
logging.info(f"数据文件PAD ID数据类型: {data_df['PAD ID'].dtype}")
|
||||
logging.info(f"数据文件PAD ID示例: {data_df['PAD ID'].head(3).tolist()}")
|
||||
if hasattr(self, 'spec_data') and 'PAD ID' in self.spec_data.columns:
|
||||
logging.info(f"规格文件PAD ID数据类型: {self.spec_data['PAD ID'].dtype}")
|
||||
logging.info(f"规格文件PAD ID示例: {self.spec_data['PAD ID'].head(3).tolist()}")
|
||||
continue
|
||||
|
||||
if all_data:
|
||||
self.merged_data = pd.concat(all_data, ignore_index=True)
|
||||
logging.info(f"数据处理完成,共合并 {len(self.merged_data)} 行数据")
|
||||
logging.info(f"最终数据列名: {list(self.merged_data.columns)}")
|
||||
else:
|
||||
logging.error("未成功处理任何数据文件")
|
||||
return False
|
||||
return True
|
||||
|
||||
def save_to_excel(self):
|
||||
"""保存合并后的数据到Excel文件"""
|
||||
try:
|
||||
# 生成时间戳
|
||||
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||
output_filename = f"dataProcess_out_{timestamp}.xlsx"
|
||||
output_file = os.path.join(self.data_folder, output_filename)
|
||||
|
||||
self.merged_data.to_excel(output_file, index=False)
|
||||
logging.info(f"数据已保存到: {output_file}")
|
||||
|
||||
# 显示统计信息
|
||||
stats = f"处理统计:\n"
|
||||
stats += f"- 规格文件: {os.path.basename(self.spec_file)}\n"
|
||||
stats += f"- 处理的数据文件数: {len(self.data_files)}\n"
|
||||
stats += f"- 合并的总行数: {len(self.merged_data)}\n"
|
||||
stats += f"- 输出文件: {output_file}\n"
|
||||
stats += f"- 包含的列: {list(self.merged_data.columns)}"
|
||||
|
||||
logging.info(stats)
|
||||
# 原来的 message box 提示已移除,改为日志输出
|
||||
logging.info("处理完成。\n" + stats)
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"保存数据失败: {e}")
|
||||
# 原来的 error message box 已移除,改为日志输出
|
||||
logging.error(f"保存数据失败: {e}")
|
||||
|
||||
def run(self):
|
||||
"""运行整个数据处理流程"""
|
||||
logging.info("开始数据处理流程")
|
||||
|
||||
try:
|
||||
if not self.select_spec_file():
|
||||
return
|
||||
if not self.select_data_folder():
|
||||
return
|
||||
if not self.load_spec_data():
|
||||
return
|
||||
if not self.scan_data_files():
|
||||
return
|
||||
if not self.process_data():
|
||||
# 原来的 error message box 已移除,改为日志输出
|
||||
logging.error("数据处理失败,请检查日志信息")
|
||||
return
|
||||
self.save_to_excel()
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"处理流程出错: {e}")
|
||||
# 原来的 error message box 已移除,改为日志输出
|
||||
logging.error(f"处理过程中出现错误:\n{e}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
processor = DataProcessor()
|
||||
processor.run()
|
||||
Reference in New Issue
Block a user