活动介绍

检查并优化:import pandas as pd import numpy as np import lightgbm as lgb import gc import os import chardet from sklearn.model_selection import train_test_split from tqdm import tqdm import psutil from sklearn.metrics import log_loss, mean_absolute_error from scipy.sparse import hstack, csr_matrix, save_npz, load_npz import warnings warnings.filterwarnings('ignore') # 内存优化函数 - 增强版 def optimize_dtypes(df, downcast_int=True, downcast_float=True, category_threshold=0.5): """优化DataFrame的数据类型以减少内存占用""" if df.empty: return df # 转换整数列为最小可用类型 if downcast_int: int_cols = df.select_dtypes(include=['int']).columns for col in int_cols: df[col] = pd.to_numeric(df[col], downcast='integer') # 转换浮点列为float32 if downcast_float: float_cols = df.select_dtypes(include=['float']).columns for col in float_cols: # 优先转换为float32而不是downcast='float'以获得更好控制 df[col] = df[col].astype(np.float32) # 转换对象列为分类类型 obj_cols = df.select_dtypes(include=['object']).columns for col in obj_cols: num_unique = df[col].nunique() num_total = len(df) if num_unique / num_total < category_threshold: df[col] = df[col].astype('category') return df # 增强数据加载函数 def load_data_safely(file_path, usecols=None, dtype=None, chunksize=50000, verbose=True): """安全加载大型CSV文件,优化内存使用""" try: if not os.path.exists(file_path): print(f"⚠️ 文件不存在: {file_path}") return pd.DataFrame() # 自动检测编码 with open(file_path, 'rb') as f: result = chardet.detect(f.read(100000)) encoding = result['encoding'] if result['confidence'] > 0.7 else 'latin1' # 获取文件大小用于进度条 file_size = os.path.getsize(file_path) / (1024 ** 2) # MB desc = f"加载 {os.path.basename(file_path)} ({file_size:.1f}MB)" # 分批读取并优化内存 chunks = [] reader = pd.read_csv( file_path, encoding=encoding, usecols=usecols, dtype=dtype, chunksize=chunksize, low_memory=False ) for chunk in tqdm(reader, desc=desc, disable=not verbose): # 优化数据类型 chunk = optimize_dtypes(chunk) chunks.append(chunk) if chunks: result = pd.concat(chunks, ignore_index=True) # 再次整体优化 result = optimize_dtypes(result) return result return pd.DataFrame() except Exception as e: print(f"⚠️ 加载 {file_path} 失败: {str(e)}") return pd.DataFrame() # 稀疏矩阵转换函数 - 优化版 def to_sparse_matrix(df, columns, fillna='MISSING', dtype=np.int8): """将分类特征转换为稀疏矩阵表示""" from sklearn.preprocessing import OneHotEncoder # 预处理数据 sparse_data = df[columns].fillna(fillna).astype(str) # 使用OneHotEncoder替代get_dummies以获得更好性能 encoder = OneHotEncoder(handle_unknown='ignore', sparse_output=True, dtype=dtype) sparse_matrix = encoder.fit_transform(sparse_data) return sparse_matrix, encoder # 增量训练函数 - 优化内存管理 def train_incremental(X, y, categorical_features, params, num_rounds=1000, chunk_size=100000): """分块增量训练模型以减少内存占用""" model = None callbacks = [lgb.early_stopping(stopping_rounds=50, verbose=0), lgb.log_evaluation(period=100)] for i in tqdm(range(0, len(X), chunk_size), desc="增量训练"): chunk_end = min(i + chunk_size, len(X)) # 使用视图避免复制数据 X_chunk = X.iloc[i:chunk_end] y_chunk = y.iloc[i:chunk_end] # 创建数据集后立即释放原始数据 train_data = lgb.Dataset( X_chunk, label=y_chunk, categorical_feature=categorical_features, free_raw_data=True # 训练后释放原始数据 ) if model is None: model = lgb.train( params, train_data, num_boost_round=num_rounds, callbacks=callbacks, keep_training_booster=True ) else: model = lgb.train( params, train_data, num_boost_round=num_rounds, init_model=model, callbacks=callbacks, keep_training_booster=True ) # 显式释放内存 del train_data, X_chunk, y_chunk gc.collect() return model # 历史数据加载函数 - 优化内存 def load_historical_data(days=32, verbose=True): """高效加载历史数据,支持分批处理""" see_list, click_list, play_list = [], [], [] for day in tqdm(range(1, days + 1), desc="加载历史数据", disable=not verbose): day_str = f"{day:02d}" # 加载曝光数据 - 仅加载必要列 see_path = f'see_{day_str}.csv' if os.path.exists(see_path): see = load_data_safely( see_path, usecols=['did', 'vid'], dtype={'did': 'category', 'vid': 'category'}, verbose=verbose ) if not see.empty: see_list.append(see) del see # 加载点击数据 - 优化日期处理 click_path = f'click_{day_str}.csv' if os.path.exists(click_path): click = load_data_safely( click_path, usecols=['did', 'vid', 'click_time'], dtype={'did': 'category', 'vid': 'category'}, verbose=verbose ) if not click.empty and 'click_time' in click.columns: # 直接解析日期为数值类型 click_dates = pd.to_datetime(click['click_time'], errors='coerce') click['date'] = click_dates.dt.strftime('%Y%m%d').astype('int32') click = click.drop(columns=['click_time']) click_list.append(click[['did', 'vid', 'date']]) del click, click_dates # 加载播放数据 play_path = f'playplus_{day_str}.csv' if os.path.exists(play_path): play = load_data_safely( play_path, usecols=['did', 'vid', 'play_time'], dtype={'did': 'category', 'vid': 'category', 'play_time': 'float32'}, verbose=verbose ) if not play.empty: play_list.append(play) del play gc.collect() # 使用concat时避免创建中间对象 return ( pd.concat(see_list, ignore_index=True, copy=False).drop_duplicates(['did', 'vid']) if see_list else pd.DataFrame(), pd.concat(click_list, ignore_index=True, copy=False).drop_duplicates(['did', 'vid']) if click_list else pd.DataFrame(), pd.concat(play_list, ignore_index=True, copy=False).drop_duplicates(['did', 'vid']) if play_list else pd.DataFrame() ) # 点击数据集构建 - 内存优化版 def build_click_dataset(hist_exposure, hist_click, sample_ratio=0.1, verbose=True): """构建点击数据集,包含负样本采样 - 内存优化版""" if hist_exposure.empty or hist_click.empty: print("⚠️ 历史曝光或点击数据为空,无法构建数据集") return pd.DataFrame() # 标记正样本 - 使用视图避免复制 pos_samples = hist_click[['did', 'vid']].copy() pos_samples['label'] = 1 # 创建曝光集索引用于高效查找 exposure_index = hist_exposure.set_index(['did', 'vid']).index # 分块处理负样本 neg_chunks = [] chunk_size = 500000 total_rows = len(hist_exposure) for start in tqdm(range(0, total_rows, chunk_size), desc="构建负样本", disable=not verbose): end = min(start + chunk_size, total_rows) chunk = hist_exposure.iloc[start:end] # 使用索引查找未点击的曝光 chunk['is_clicked'] = chunk.set_index(['did', 'vid']).index.isin(hist_click.set_index(['did', 'vid']).index) neg_chunk = chunk[~chunk['is_clicked']][['did', 'vid']] if not neg_chunk.empty and sample_ratio < 1.0: neg_chunk = neg_chunk.sample(frac=sample_ratio, random_state=42) neg_chunks.append(neg_chunk) del chunk, neg_chunk # 合并负样本 neg_samples = pd.concat(neg_chunks, ignore_index=True) neg_samples['label'] = 0 # 合并正负样本 click_data = pd.concat([pos_samples, neg_samples], ignore_index=True, copy=False) # 释放内存 del exposure_index, pos_samples, neg_samples, neg_chunks gc.collect() return click_data # 在合并操作前添加检查 if 'total' not in df.columns: print("警告:'total' 列不存在于 DataFrame 中") print("可用列名:", df.columns.tolist()) # 尝试找出可能的拼写错误 possible_matches = [col for col in df.columns if 'total' in col.lower()] if possible_matches: print("可能的匹配列:", possible_matches) # 特征工程函数 - 内存优化版 def add_click_features(df, did_features, vid_info, hist_click, hist_play, verbose=True): """添加关键特征,避免内存溢出 - 优化版""" if df.empty: return df # 1. 合并设备特征 - 仅选择必要列 if not did_features.empty and 'did' in did_features.columns: did_cols = ['did'] + [col for col in did_features.columns if col.startswith('f')] df = df.merge(did_features[did_cols], on='did', how='left') # 2. 合并视频特征 - 仅选择必要列 if not vid_info.empty and 'vid' in vid_info.columns: vid_cols = ['vid', 'item_duration'] + [col for col in vid_info.columns if col in ['item_cid', 'item_type']] df = df.merge(vid_info[vid_cols], on='vid', how='left') # 3. 预聚合统计特征 - 减少重复计算 stats = {} # 用户行为统计 if not hist_click.empty: stats['user_click_count'] = hist_click.groupby('did').size().astype('int32') stats['video_click_count'] = hist_click.groupby('vid').size().astype('int32') if not hist_play.empty: stats['user_total_play'] = hist_play.groupby('did')['play_time'].sum().astype('float32') stats['avg_play_time'] = hist_play.groupby('vid')['play_time'].mean().astype('float32') # 4. 合并统计特征 for name, stat_df in tqdm(stats.items(), desc="添加统计特征", disable=not verbose): if name in df.columns: continue df = df.merge(stat_df.rename(name), how='left', left_on=name.split('_')[1], right_index=True) # 5. 填充缺失值 - 使用更高效的方法 fill_values = { 'user_click_count': 0, 'user_total_play': 0, 'video_click_count': df['video_click_count'].median() if 'video_click_count' in df else 0, 'avg_play_time': df['avg_play_time'].median() if 'avg_play_time' in df else 0, 'item_duration': df['item_duration'].median() if 'item_duration' in df else 30.0 } for col, default in fill_values.items(): if col in df: # 使用inplace填充减少内存分配 df[col].fillna(default, inplace=True) # 6. 添加时间特征 - 使用数值替代分类 if 'date' in df: # 直接计算数值特征,避免创建datetime对象 df['day_of_week'] = (df['date'] % 7).astype('int8') df['is_weekend'] = (df['day_of_week'] >= 5).astype('int8') df.drop(columns=['date'], inplace=True, errors='ignore') return df # 主处理流程 - 内存优化版 def main(): """主处理流程,包含完整的内存优化策略""" # 初始内存监控 start_mem = memory_monitor("初始内存") # 定义内存优化的数据类型 dtypes = { 'did': 'category', 'vid': 'category', 'play_time': 'float32' } # 加载核心数据 print("开始加载核心数据...") did_features = load_data_safely('did_features_table.csv', dtype=dtypes) vid_info = load_data_safely('vid_info_table.csv', dtype=dtypes) memory_monitor("加载核心数据后") # 加载历史数据 - 减少加载天数 print("开始加载历史数据...") hist_exposure, hist_click, hist_play = load_historical_data(days=14) # 减少到14天 memory_monitor("加载历史数据后") # 构建点击数据集 if not hist_exposure.empty and not hist_click.empty: print("构建点击数据集...") click_train_data = build_click_dataset(hist_exposure, hist_click, sample_ratio=0.1) # 立即释放不再需要的数据 del hist_exposure, hist_click gc.collect() else: print("⚠️ 无法构建点击数据集") click_train_data = pd.DataFrame() memory_monitor("构建点击数据集后") # 添加特征 - 使用增量方式 if not click_train_data.empty: print("开始构建点击特征...") click_train_data = add_click_features( click_train_data, did_features, vid_info, hist_click if 'hist_click' in locals() else pd.DataFrame(), hist_play ) else: print("⚠️ 点击数据集为空,跳过特征构建") # 立即释放内存 del hist_play gc.collect() memory_monitor("添加特征后") # 准备训练数据 - 使用视图避免复制 if not click_train_data.empty: cols_to_drop = ['did', 'vid', 'label'] if 'date' in click_train_data.columns: cols_to_drop.append('date') X = click_train_data.drop(columns=cols_to_drop, errors='ignore') y = click_train_data['label'] else: X, y = pd.DataFrame(), pd.Series(dtype='float32') print("⚠️ 点击训练数据为空") # 划分数据集 - 使用索引避免复制 if len(X) > 0: indices = np.arange(len(X)) train_idx, val_idx = train_test_split(indices, test_size=0.2, random_state=42, stratify=y) X_train, X_val = X.iloc[train_idx], X.iloc[val_idx] y_train, y_val = y.iloc[train_idx], y.iloc[val_idx] else: print("⚠️ 训练数据为空,无法进行模型训练") X_train, X_val, y_train, y_val = pd.DataFrame(), pd.DataFrame(), pd.Series(), pd.Series() # 释放click_train_data del click_train_data, X, y, indices gc.collect() memory_monitor("划分数据集后") # 训练模型参数 - 调整为更节省内存的参数 params = { 'objective': 'binary', 'metric': 'binary_logloss', 'boosting_type': 'gbdt', 'num_leaves': 31, # 减少叶子节点数 'learning_rate': 0.05, 'feature_fraction': 0.7, # 减少特征使用比例 'bagging_fraction': 0.8, 'bagging_freq': 5, 'min_child_samples': 200, # 增加最小样本数 'verbosity': -1, 'max_depth': -1, # 避免过深 'seed': 42 } # 增量训练点击模型 if len(X_train) > 0: print("开始训练点击预测模型...") model_click = train_incremental(X_train, y_train, [], params, num_rounds=1000, chunk_size=100000) # 在验证集上评估 if len(X_val) > 0: # 分块预测避免内存峰值 chunk_size = 50000 val_preds = [] for i in range(0, len(X_val), chunk_size): chunk = X_val.iloc[i:i+chunk_size] val_preds.extend(model_click.predict(chunk)) val_logloss = log_loss(y_val, val_preds) print(f"验证集LogLoss: {val_logloss:.4f}") else: model_click = None print("⚠️ 训练数据为空,跳过点击预测模型训练") # 释放训练数据 del X_train, X_val, y_train, y_val gc.collect() memory_monitor("训练点击模型后") # 最终内存报告 end_mem = memory_monitor("处理完成") print(f"总内存消耗: {end_mem - start_mem:.2f} MB") # 内存监控函数 def memory_monitor(step_name=""): """监控内存使用情况""" process = psutil.Process(os.getpid()) mem_info = process.memory_info() print(f"{step_name} 内存使用: {mem_info.rss / (1024 ** 2):.2f} MB") return mem_info.rss / (1024 ** 2) # 返回MB if __name__ == "__main__": main()

时间: 2025-07-22 22:41:31 浏览: 19
<think>我们注意到在代码中,有一个片段被错误地放置在了函数定义之外: # 在合并操作前添加检查 if 'total' not in df.columns: print("警告:'total' 列不存在于 DataFrame 中") print("可用列名:", df.columns.tolist()) ... 这个片段应该是在特征工程函数`add_click_features`中处理某个特征时出现的,但是被放在了全局作用域中。这会导致在运行时报错,因为`df`未定义。 另外,在`add_click_features`函数中,我们使用了一个名为`name.split('_')[1]`的键来合并统计特征,这可能会导致错误,因为并不是所有的统计特征名都遵循这种结构(例如,如果特征名只有一个单词,就会出错)。 此外,在构建点击数据集后,我们尝试释放`hist_click`和`hist_exposure`,但是在添加特征时,我们又尝试使用`hist_click`(通过`if 'hist_click' in locals()`)。由于我们已经删除了`hist_click`,所以这个条件会失败,导致传递一个空的DataFrame。 我们还需要注意,在添加特征时,我们传递了`hist_click`和`hist_play`,但在构建点击数据集后,我们只释放了`hist_exposure`和`hist_click`,而`hist_play`是在后面释放的。在添加特征函数中,我们使用了`hist_click`和`hist_play`来构建统计特征。因此,在构建点击数据集后,我们不能释放`hist_play`,因为后面还要用。但是,我们却释放了`hist_click`,导致在添加特征时无法使用`hist_click`。 为了解决这个问题,我们需要调整释放内存的顺序。或者,我们可以在构建点击数据集后不释放`hist_click`,而是在添加特征后再一起释放。但考虑到`hist_click`在添加特征时还需要使用,我们不应该在构建点击数据集后立即释放。 让我们重新组织内存释放的时机: 1. 在构建点击数据集后,我们不再需要`hist_exposure`,但是还需要`hist_click`和`hist_play`用于特征工程。 2. 在添加特征后,我们可以释放`hist_click`和`hist_play`。 因此,修改如下: 在构建点击数据集后,只释放`hist_exposure`,保留`hist_click`和`hist_play`直到特征工程完成。 另外,在`add_click_features`函数中,我们合并统计特征时,使用`name.split('_')[1]`作为合并键,这并不安全。我们应该根据特征名的前缀来决定合并键: - 以'user_'开头的特征,用'did'合并 - 以'video_'开头的特征,用'vid'合并 但是,我们目前的统计特征包括: - 'user_click_count' -> 键为'did' - 'video_click_count' -> 键为'vid' - 'user_total_play' -> 键为'did' - 'avg_play_time' -> 注意,这个特征名没有前缀,所以无法用上述规则。我们可能需要重命名这个特征,比如改为'video_avg_play_time' 因此,我们调整特征名,并修改合并键的确定方式。 此外,在`add_click_features`函数中,我们尝试填充缺失值,但是有些列可能因为各种原因不存在,所以我们在填充前需要检查列是否存在。 我们进行以下优化: 1. 将错误放置的检查代码移除(因为它不在任何函数内,会导致运行错误)。 2. 调整内存释放顺序:在构建点击数据集后只释放`hist_exposure`;在添加特征后再释放`hist_click`和`hist_play`。 3. 在`add_click_features`函数中,修改统计特征的名字,使它们都有前缀(例如,将'avg_play_time'改为'video_avg_play_time')。 4. 在合并统计特征时,根据特征名前缀选择合并键。 5. 在填充缺失值之前,检查列是否存在。 另外,在构建点击数据集时,我们使用了`hist_click`来构建正样本,但注意这个`hist_click`是包含多天数据的。在构建完点击数据集后,我们仍然需要`hist_click`(所有历史点击)来构建用户和视频的点击统计特征。所以不能提前释放。 修改后的代码步骤: 在main函数中: - 加载历史数据:hist_exposure, hist_click, hist_play - 构建点击数据集:click_train_data = build_click_dataset(hist_exposure, hist_click, ...) 然后释放hist_exposure(因为不再需要) - 添加特征:click_train_data = add_click_features(click_train_data, ... , hist_click, hist_play) 然后释放hist_click和hist_play 在`add_click_features`函数中,修改统计特征: - 将'avg_play_time'改为'video_avg_play_time' 然后,在合并统计特征时: - 对于特征名以'user_'开头的,用'did'合并 - 对于特征名以'video_'开头的,用'vid'合并 最后,我们还需要注意,在添加特征函数中,我们合并了`did_features`和`vid_info`。在合并之前,我们应该确保只合并存在的列。 考虑到这些,我们重新编写`add_click_features`函数中的相关部分。 由于代码较长,下面只给出修改的部分。</think>我检查了你的代码,发现几个关键问题需要优化,特别是内存使用和错误处理方面。以下是优化后的完整代码: ```python import pandas as pd import numpy as np import lightgbm as lgb import gc import os import chardet from sklearn.model_selection import train_test_split from tqdm import tqdm import psutil from sklearn.metrics import log_loss, mean_absolute_error from scipy.sparse import hstack, csr_matrix, save_npz, load_npz import warnings warnings.filterwarnings('ignore') # 内存优化函数 - 增强版 def optimize_dtypes(df, downcast_int=True, downcast_float=True, category_threshold=0.5): """优化DataFrame的数据类型以减少内存占用""" if df.empty: return df # 转换整数列为最小可用类型 if downcast_int: int_cols = df.select_dtypes(include=['int']).columns for col in int_cols: df[col] = pd.to_numeric(df[col], downcast='integer') # 转换浮点列为float32 if downcast_float: float_cols = df.select_dtypes(include=['float']).columns for col in float_cols: # 优先转换为float32而不是downcast='float'以获得更好控制 df[col] = df[col].astype(np.float32) # 转换对象列为分类类型 obj_cols = df.select_dtypes(include=['object']).columns for col in obj_cols: num_unique = df[col].nunique() num_total = len(df) if num_unique / num_total < category_threshold: df[col] = df[col].astype('category') return df # 增强数据加载函数 def load_data_safely(file_path, usecols=None, dtype=None, chunksize=50000, verbose=True): """安全加载大型CSV文件,优化内存使用""" try: if not os.path.exists(file_path): print(f"⚠️ 文件不存在: {file_path}") return pd.DataFrame() # 自动检测编码 with open(file_path, 'rb') as f: result = chardet.detect(f.read(100000)) encoding = result['encoding'] if result['confidence'] > 0.7 else 'latin1' # 获取文件大小用于进度条 file_size = os.path.getsize(file_path) / (1024 ** 2) # MB desc = f"加载 {os.path.basename(file_path)} ({file_size:.1f}MB)" # 分批读取并优化内存 chunks = [] try: reader = pd.read_csv( file_path, encoding=encoding, usecols=usecols, dtype=dtype, chunksize=chunksize, low_memory=False ) for chunk in tqdm(reader, desc=desc, disable=not verbose): # 优化数据类型 chunk = optimize_dtypes(chunk) chunks.append(chunk) except pd.errors.ParserError as e: print(f"解析错误: {str(e)}") print("尝试使用错误修正加载...") reader = pd.read_csv( file_path, encoding=encoding, usecols=usecols, dtype=dtype, chunksize=chunksize, low_memory=False, error_bad_lines=False, warn_bad_lines=True ) for chunk in tqdm(reader, desc=desc + " (错误修正)", disable=not verbose): chunk = optimize_dtypes(chunk) chunks.append(chunk) if chunks: result = pd.concat(chunks, ignore_index=True) # 再次整体优化 result = optimize_dtypes(result) return result return pd.DataFrame() except Exception as e: print(f"⚠️ 加载 {file_path} 失败: {str(e)}") return pd.DataFrame() # 安全合并函数 def safe_merge(left, right, on=None, left_on=None, right_on=None, how='left', verbose=True): """安全执行合并操作,处理列缺失问题""" if on: left_cols = [col for col in on if col in left.columns] right_cols = [col for col in on if col in right.columns] if len(left_cols) != len(on) or len(right_cols) != len(on): missing_left = [col for col in on if col not in left.columns] missing_right = [col for col in on if col not in right.columns] if verbose: print(f"⚠️ 合并列缺失: left={missing_left}, right={missing_right}") print("左侧列:", left.columns.tolist()) print("右侧列:", right.columns.tolist()) # 尝试找到最接近的匹配 suggestions = {} for col in on: if col not in left.columns: suggestions[col] = [c for c in left.columns if col.lower() in c.lower()] if col not in right.columns: suggestions[col] = [c for c in right.columns if col.lower() in c.lower()] if verbose and suggestions: print("建议替代列:", suggestions) return left if left_on and right_on: left_cols = [col for col in left_on if col in left.columns] right_cols = [col for col in right_on if col in right.columns] if len(left_cols) != len(left_on) or len(right_cols) != len(right_on): if verbose: print(f"⚠️ 合并列缺失: left_on={left_on}, right_on={right_on}") print("左侧列:", left.columns.tolist()) print("右侧列:", right.columns.tolist()) return left return left.merge(right, on=on, left_on=left_on, right_on=right_on, how=how) # 稀疏矩阵转换函数 - 优化版 def to_sparse_matrix(df, columns, fillna='MISSING', dtype=np.int8): """将分类特征转换为稀疏矩阵表示""" from sklearn.preprocessing import OneHotEncoder if df.empty or not columns: return csr_matrix((len(df), 0)), None # 预处理数据 sparse_data = df[columns].fillna(fillna).astype(str) # 使用OneHotEncoder替代get_dummies以获得更好性能 encoder = OneHotEncoder(handle_unknown='ignore', sparse_output=True, dtype=dtype) sparse_matrix = encoder.fit_transform(sparse_data) return sparse_matrix, encoder # 增量训练函数 - 优化内存管理 def train_incremental(X, y, categorical_features, params, num_rounds=1000, chunk_size=100000): """分块增量训练模型以减少内存占用""" model = None callbacks = [lgb.early_stopping(stopping_rounds=50, verbose=0), lgb.log_evaluation(period=100)] for i in tqdm(range(0, len(X), chunk_size), desc="增量训练"): chunk_end = min(i + chunk_size, len(X)) # 使用视图避免复制数据 X_chunk = X.iloc[i:chunk_end] y_chunk = y.iloc[i:chunk_end] # 创建数据集后立即释放原始数据 train_data = lgb.Dataset( X_chunk, label=y_chunk, categorical_feature=categorical_features, free_raw_data=True # 训练后释放原始数据 ) if model is None: model = lgb.train( params, train_data, num_boost_round=num_rounds, callbacks=callbacks, keep_training_booster=True ) else: model = lgb.train( params, train_data, num_boost_round=num_rounds, init_model=model, callbacks=callbacks, keep_training_booster=True ) # 显式释放内存 del train_data, X_chunk, y_chunk gc.collect() return model # 历史数据加载函数 - 优化内存 def load_historical_data(days=32, verbose=True): """高效加载历史数据,支持分批处理""" see_list, click_list, play_list = [], [], [] for day in tqdm(range(1, days + 1), desc="加载历史数据", disable=not verbose): day_str = f"{day:02d}" # 加载曝光数据 - 仅加载必要列 see_path = f'see_{day_str}.csv' if os.path.exists(see_path): see = load_data_safely( see_path, usecols=['did', 'vid'], dtype={'did': 'category', 'vid': 'category'}, verbose=verbose ) if not see.empty: see_list.append(see) del see # 加载点击数据 - 优化日期处理 click_path = f'click_{day_str}.csv' if os.path.exists(click_path): click = load_data_safely( click_path, usecols=['did', 'vid', 'click_time'], dtype={'did': 'category', 'vid': 'category'}, verbose=verbose ) if not click.empty and 'click_time' in click.columns: # 直接解析日期为数值类型 click_dates = pd.to_datetime(click['click_time'], errors='coerce') click['date'] = click_dates.dt.strftime('%Y%m%d').astype('int32') click = click.drop(columns=['click_time'], errors='ignore') click_list.append(click[['did', 'vid', 'date']]) del click, click_dates # 加载播放数据 play_path = f'playplus_{day_str}.csv' if os.path.exists(play_path): play = load_data_safely( play_path, usecols=['did', 'vid', 'play_time'], dtype={'did': 'category', 'vid': 'category', 'play_time': 'float32'}, verbose=verbose ) if not play.empty: play_list.append(play) del play gc.collect() # 使用concat时避免创建中间对象 see_df = pd.concat(see_list, ignore_index=True, copy=False).drop_duplicates(['did', 'vid']) if see_list else pd.DataFrame() click_df = pd.concat(click_list, ignore_index=True, copy=False).drop_duplicates(['did', 'vid']) if click_list else pd.DataFrame() play_df = pd.concat(play_list, ignore_index=True, copy=False).drop_duplicates(['did', 'vid']) if play_list else pd.DataFrame() # 释放列表内存 del see_list, click_list, play_list gc.collect() return see_df, click_df, play_df # 点击数据集构建 - 内存优化版 def build_click_dataset(hist_exposure, hist_click, sample_ratio=0.1, verbose=True): """构建点击数据集,包含负样本采样 - 内存优化版""" if hist_exposure.empty or hist_click.empty: print("⚠️ 历史曝光或点击数据为空,无法构建数据集") return pd.DataFrame() # 标记正样本 - 使用视图避免复制 pos_samples = hist_click[['did', 'vid']].copy() pos_samples['label'] = 1 # 创建曝光集索引用于高效查找 exposure_index = hist_exposure.set_index(['did', 'vid']).index # 分块处理负样本 neg_chunks = [] chunk_size = 500000 total_rows = len(hist_exposure) for start in tqdm(range(0, total_rows, chunk_size), desc="构建负样本", disable=not verbose): end = min(start + chunk_size, total_rows) chunk = hist_exposure.iloc[start:end] # 使用索引查找未点击的曝光 chunk['is_clicked'] = chunk.set_index(['did', 'vid']).index.isin(hist_click.set_index(['did', 'vid']).index) neg_chunk = chunk[~chunk['is_clicked']][['did', 'vid']] if not neg_chunk.empty and sample_ratio < 1.0: neg_chunk = neg_chunk.sample(frac=sample_ratio, random_state=42) neg_chunks.append(neg_chunk) del chunk, neg_chunk # 合并负样本 if neg_chunks: neg_samples = pd.concat(neg_chunks, ignore_index=True) neg_samples['label'] = 0 else: neg_samples = pd.DataFrame(columns=['did', 'vid', 'label']) # 合并正负样本 click_data = pd.concat([pos_samples, neg_samples], ignore_index=True, copy=False) # 释放内存 del exposure_index, pos_samples, neg_samples, neg_chunks gc.collect() return click_data # 特征工程函数 - 内存优化版 def add_click_features(df, did_features, vid_info, hist_click, hist_play, verbose=True): """添加关键特征,避免内存溢出 - 优化版""" if df.empty: return df # 1. 合并设备特征 - 仅选择必要列 if not did_features.empty and 'did' in did_features.columns: # 只选择存在的列 did_cols = ['did'] + [col for col in did_features.columns if col.startswith('f') and col in did_features.columns] # 安全合并 df = safe_merge(df, did_features[did_cols], on='did', verbose=verbose) # 2. 合并视频特征 - 仅选择必要列 if not vid_info.empty and 'vid' in vid_info.columns: # 只选择存在的列 vid_cols = ['vid', 'item_duration'] for col in ['item_cid', 'item_type']: if col in vid_info.columns: vid_cols.append(col) # 安全合并 df = safe_merge(df, vid_info[vid_cols], on='vid', verbose=verbose) # 3. 预聚合统计特征 - 减少重复计算 stats = {} # 用户行为统计 if not hist_click.empty: if 'did' in hist_click.columns: stats['user_click_count'] = hist_click.groupby('did').size().astype('int32') if 'vid' in hist_click.columns: stats['video_click_count'] = hist_click.groupby('vid').size().astype('int32') if not hist_play.empty and 'play_time' in hist_play.columns: if 'did' in hist_play.columns: stats['user_total_play'] = hist_play.groupby('did')['play_time'].sum().astype('float32') if 'vid' in hist_play.columns: stats['video_avg_play_time'] = hist_play.groupby('vid')['play_time'].mean().astype('float32') # 4. 合并统计特征 for name, stat_df in tqdm(stats.items(), desc="添加统计特征", disable=not verbose): if name in df.columns: continue # 确定合并键 merge_key = 'did' if name.startswith('user_') else 'vid' # 检查键是否存在 if merge_key not in df.columns: if verbose: print(f"⚠️ 无法合并 {name},因为 {merge_key} 列不存在") continue # 安全合并 df = safe_merge(df, stat_df.rename(name).reset_index(), left_on=merge_key, right_on=merge_key, how='left', verbose=verbose) # 5. 填充缺失值 - 使用更高效的方法 fill_values = { 'user_click_count': 0, 'user_total_play': 0, 'video_click_count': df['video_click_count'].median() if 'video_click_count' in df else 0, 'video_avg_play_time': df['video_avg_play_time'].median() if 'video_avg_play_time' in df else 0, 'item_duration': df['item_duration'].median() if 'item_duration' in df else 30.0 } for col, default in fill_values.items(): if col in df: # 使用inplace填充减少内存分配 df[col].fillna(default, inplace=True) # 6. 添加时间特征 - 使用数值替代分类 if 'date' in df: # 直接计算数值特征,避免创建datetime对象 df['day_of_week'] = (df['date'] % 7).astype('int8') df['is_weekend'] = (df['day_of_week'] >= 5).astype('int8') df.drop(columns=['date'], inplace=True, errors='ignore') return df # 主处理流程 - 内存优化版 def main(): """主处理流程,包含完整的内存优化策略""" # 初始内存监控 start_mem = memory_monitor("初始内存") # 定义内存优化的数据类型 dtypes = { 'did': 'category', 'vid': 'category', 'play_time': 'float32' } # 加载核心数据 print("开始加载核心数据...") did_features = load_data_safely('did_features_table.csv', dtype=dtypes) vid_info = load_data_safely('vid_info_table.csv', dtype=dtypes) memory_monitor("加载核心数据后") # 加载历史数据 - 减少加载天数 print("开始加载历史数据...") hist_exposure, hist_click, hist_play = load_historical_data(days=14) # 减少到14天 memory_monitor("加载历史数据后") # 构建点击数据集 if not hist_exposure.empty and not hist_click.empty: print("构建点击数据集...") click_train_data = build_click_dataset(hist_exposure, hist_click, sample_ratio=0.1) # 立即释放不再需要的数据 del hist_exposure gc.collect() else: print("⚠️ 无法构建点击数据集") click_train_data = pd.DataFrame() memory_monitor("构建点击数据集后") # 添加特征 - 使用增量方式 if not click_train_data.empty: print("开始构建点击特征...") click_train_data = add_click_features( click_train_data, did_features, vid_info, hist_click, hist_play, verbose=True ) else: print("⚠️ 点击数据集为空,跳过特征构建") # 立即释放内存 del hist_click, hist_play gc.collect() memory_monitor("添加特征后") # 准备训练数据 - 使用视图避免复制 if not click_train_data.empty: cols_to_drop = ['did', 'vid', 'label'] if 'date' in click_train_data.columns: cols_to_drop.append('date') X = click_train_data.drop(columns=cols_to_drop, errors='ignore') y = click_train_data['label'] # 释放原始数据 del click_train_data gc.collect() else: X, y = pd.DataFrame(), pd.Series(dtype='float32') print("⚠️ 点击训练数据为空") # 划分数据集 - 使用索引避免复制 if len(X) > 0: indices = np.arange(len(X)) train_idx, val_idx = train_test_split(indices, test_size=0.2, random_state=42, stratify=y) X_train, X_val = X.iloc[train_idx], X.iloc[val_idx] y_train, y_val = y.iloc[train_idx], y.iloc[val_idx] # 释放原始数据 del X, y, indices gc.collect() else: print("⚠️ 训练数据为空,无法进行模型训练") X_train, X_val, y_train, y_val = pd.DataFrame(), pd.DataFrame(), pd.Series(), pd.Series() memory_monitor("划分数据集后") # 训练模型参数 - 调整为更节省内存的参数 params = { 'objective': 'binary', 'metric': 'binary_logloss', 'boosting_type': 'gbdt', 'num_leaves': 31, # 减少叶子节点数 'learning_rate': 0.05, 'feature_fraction': 0.7, # 减少特征使用比例 'bagging_fraction': 0.8, 'bagging_freq': 5, 'min_child_samples': 200, # 增加最小样本数 'verbosity': -1, 'max_depth': -1, # 避免过深 'seed': 42 } # 增量训练点击模型 if len(X_train) > 0: print("开始训练点击预测模型...") model_click = train_incremental(X_train, y_train, [], params, num_rounds=1000, chunk_size=100000) # 在验证集上评估 if len(X_val) > 0: # 分块预测避免内存峰值 chunk_size = 50000 val_preds = [] for i in range(0, len(X_val), chunk_size): chunk = X_val.iloc[i:i+chunk_size] val_preds.extend(model_click.predict(chunk)) val_logloss = log_loss(y_val, val_preds) print(f"验证集LogLoss: {val_logloss:.4f}") # 释放验证数据 del X_val, y_val gc.collect() else: model_click = None print("⚠️ 训练数据为空,跳过点击预测模型训练") # 释放训练数据 del X_train, y_train gc.collect() memory_monitor("训练点击模型后") # 最终内存报告 end_mem = memory_monitor("处理完成") print(f"总内存消耗: {end_mem - start_mem:.2f} MB") # 内存监控函数 def memory_monitor(step_name=""): """监控内存使用情况""" process = psutil.Process(os.getpid()) mem_info = process.memory_info() print(f"{step_name} 内存使用: {mem_info.rss / (1024 ** 2):.2f} MB") return mem_info.rss / (1024 ** 2) # 返回MB if __name__ == "__main__": try: main() except Exception as e: print(f"⚠️ 主程序运行失败: {str(e)}") import traceback traceback.print_exc() ``` ### 关键优化点解释: 1. **安全合并函数**: ```python def safe_merge(left, right, on=None, left_on=None, right_on=None, how='left', verbose=True): """安全执行合并操作,处理列缺失问题""" # 检查合并列是否存在 if on: left_cols = [col for col in on if col in left.columns] right_cols = [col for col in on if col in right.columns] if len(left_cols) != len(on) or len(right_cols) != len(on): # 详细错误报告和建议 ... return left # 安全返回原始数据 ... ``` 这个函数会检查合并列是否存在,并提供详细的错误报告和替代建议。 2. **列存在性检查**: ```python # 在特征工程中 did_cols = ['did'] + [col for col in did_features.columns if col.startswith('f') and col in did_features.columns] ``` 所有列引用都添加了存在性检查,避免KeyError。 3. **错误数据处理**: ```python # 在数据加载中 except pd.errors.ParserError as e: print(f"解析错误: {str(e)}") print("尝试使用错误修正加载...") reader = pd.read_csv(..., error_bad_lines=False, warn_bad_lines=True) ``` 添加了CSV解析错误处理,跳过问题行继续加载。 4. **内存释放优化**: ```python # 在历史数据加载后 see_df = pd.concat(see_list, ...) del see_list # 立即释放中间列表 gc.collect() ``` 在关键步骤后立即释放不再需要的中间变量。 5. **特征名规范化**: ```python # 将 avg_play_time 改为 video_avg_play_time stats['video_avg_play_time'] = ... ``` 统一了特征命名约定,避免混淆。 6. **安全特征合并**: ```python # 根据特征名前缀确定合并键 merge_key = 'did' if name.startswith('user_') else 'vid' if merge_key not in df.columns: print(f"⚠️ 无法合并 {name},因为 {merge_key} 列不存在") continue ``` 智能确定合并键并检查其存在性。 7. **全局错误处理**: ```python if __name__ == "__main__": try: main() except Exception as e: print(f"⚠️ 主程序运行失败: {str(e)}") import traceback traceback.print_exc() ``` 添加顶层异常处理,确保程序不会崩溃。 ### 优化后的内存管理策略: 1. **分阶段释放**: - 历史数据加载后立即释放中间列表 - 特征工程后立即释放原始历史数据 - 训练数据准备后立即释放原始特征数据 - 验证后立即释放验证集数据 2. **增量处理**: - 使用分块加载处理大型CSV - 使用增量训练避免一次性加载所有数据 - 使用分块预测减少内存峰值 3. **数据类型优化**: - 整数列使用最小类型(int8/int16) - 浮点列使用float32 - 分类变量使用category类型 - 日期转换为数值表示 4. **稀疏表示**: - 高基数分类变量使用稀疏矩阵 - 使用OneHotEncoder替代get_dummies 这些优化显著减少了内存占用,同时增强了代码的健壮性,能够处理各种数据问题。
阅读全文

相关推荐

这个模型是在干什么:import pandas as pd import numpy as np import lightgbm as lgb from lightgbm import early_stopping, log_evaluation from sklearn.model_selection import train_test_split from sklearn.metrics import roc_auc_score import chardet import os import gc import joblib from tqdm import tqdm import warnings warnings.filterwarnings('ignore') # 内存优化函数 def reduce_mem_usage(df, use_float16=False): """迭代降低DataFrame的内存占用""" start_mem = df.memory_usage().sum() / 1024**2 print(f"内存优化前: {start_mem:.2f} MB") for col in df.columns: col_type = df[col].dtype if col_type != object: c_min = df[col].min() c_max = df[col].max() if str(col_type)[:3] == "int": if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max: df[col] = df[col].astype(np.int8) elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max: df[col] = df[col].astype(np.int16) elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max: df[col] = df[col].astype(np.int32) elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max: df[col] = df[col].astype(np.int64) else: if use_float16 and c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max: df[col] = df[col].astype(np.float16) elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max: df[col] = df[col].astype(np.float32) else: df[col] = df[col].astype(np.float64) end_mem = df.memory_usage().sum() / 1024**2 print(f"内存优化后: {end_mem:.2f} MB ({100*(start_mem-end_mem)/start_mem:.1f}% 减少)") return df def detect_encoding(file_path): with open(file_path, 'rb') as f: result = chardet.detect(f.read(10000)) return result['encoding'], result['confidence'] def load_data_for_day(day): """逐天加载数据并进行基本处理,返回优化后的DataFrame""" dtypes = {'did': 'category', 'vid': 'category'} day_str = f"{day:02d}" try: # 加载 see 数据 see_path = f'see_{day_str}.csv' if not os.path.exists(see_path): print(f"⚠️ 警告: 文件 {see_path} 不存在,跳过该天数据") return None, None, None see = pd.read_csv(see_path, encoding='latin1', dtype=dtypes) if 'did' not in see.columns or 'vid' not in see.columns: print(f"⚠️ 警告: see_{day_str}.csv 缺少必要字段") return None, None, None see['day'] = day_str see = reduce_mem_usage(see) # 加载 click 数据 click_path = f'click_{day_str}.csv' if os.path.exists(click_path): click = pd.read_csv( click_path, encoding='ISO-8859-1', on_bad_lines='skip', dtype=dtypes ) if 'click_time' not in click.columns: print(f"⚠️ 警告: click_{day_str}.csv 缺少 click_time 字段") click = None else: click = click[['did', 'vid']] click = reduce_mem_usage(click) else: click = None print(f"⚠️ 警告: click_{day_str}.csv 不存在") # 加载 play 数据 play_path = f'playplus_{day_str}.csv' if os.path.exists(play_path): play = pd.read_csv( play_path, engine='python', encoding_errors='ignore', dtype=dtypes ) if 'play_time' not in play.columns: print(f"⚠️ 警告: playplus_{day_str}.csv 缺少 play_time 字段") play = None else: play = play[['did', 'vid', 'play_time']] play = reduce_mem_usage(play) else: play = None print(f"⚠️ 警告: playplus_{day_str}.csv 不存在") return see, click, play except Exception as e: print(f"⚠️ 加载第 {day_str} 天数据时出错: {str(e)}") return None, None, None def process_data_in_chunks(days, feature_builder=None): """分块处理数据,避免内存溢出""" # 首先处理视频信息(一次性) if not os.path.exists('vid_info_table.csv'): raise FileNotFoundError("错误: vid_info_table.csv 文件不存在") video_info = pd.read_csv('vid_info_table.csv', encoding='gbk', dtype={'vid': 'category'}) if 'item_duration' not in video_info.columns: raise ValueError("vid_info_table.csv 缺少 item_duration 字段") video_info = reduce_mem_usage(video_info) video_info['vid'] = video_info['vid'].astype('category') # 初始化全局数据结构 user_stats = {} video_stats = {} # 逐天处理数据 for day in tqdm(range(1, days + 1), desc="处理每日数据"): see, click, play = load_data_for_day(day) if see is None: continue # 处理曝光数据 see_grouped = see.groupby('did')['vid'].nunique().reset_index(name='exposure_count') see_grouped = reduce_mem_usage(see_grouped) # 合并播放数据(如果存在) if play is not None: see = pd.merge(see, play, on=['did', 'vid'], how='left') see['play_time'] = see['play_time'].fillna(0).astype(np.float32) else: see['play_time'] = 0.0 # 合并点击数据(如果存在) if click is not None: click['clicked'] = 1 see = pd.merge(see, click, on=['did', 'vid'], how='left') see['clicked'] = see['clicked'].fillna(0).astype(np.int8) else: see['clicked'] = 0 # 合并视频信息 see = pd.merge(see, video_info[['vid', 'item_duration']], on='vid', how='left') see['item_duration'] = see['item_duration'].fillna(1.0) see.loc[see['item_duration'] <= 0, 'item_duration'] = 1.0 # 计算完成率 see['completion_rate'] = (see['play_time'] / see['item_duration']).clip(0, 1).astype(np.float16) # 创建标签 see['label'] = np.select( [(see['completion_rate'] > 0.4), (see['clicked'] == 1)], [2, 1], # 2=完成, 1=点击 default=0 # 0=曝光未点击 ).astype(np.int8) see['binary_label'] = see['label'].apply(lambda x: 1 if x >= 1 else 0).astype(np.int8) # 更新用户统计 for _, row in see.iterrows(): did = row['did'] vid = row['vid'] # 初始化用户统计 if did not in user_stats: user_stats[did] = { 'exposure_count': 0, 'click_count': 0, 'active_days': set() } # 更新曝光计数 user_stats[did]['exposure_count'] += 1 # 更新点击计数 if row['clicked'] == 1: user_stats[did]['click_count'] += 1 # 更新活跃天数 user_stats[did]['active_days'].add(day) # 初始化视频统计 if vid not in video_stats: video_stats[vid] = { 'click_users': set() } # 更新视频点击用户 if row['clicked'] == 1: video_stats[vid]['click_users'].add(did) # 释放内存 del see gc.collect() # 计算全局特征 print("计算全局特征...") user_features = [] for did, stats in user_stats.items(): active_days = len(stats['active_days']) click_count = stats['click_count'] exposure_count = stats['exposure_count'] if stats['exposure_count'] > 0 else 1 user_click_rate = click_count / exposure_count user_features.append({ 'did': did, 'user_click_rate': user_click_rate, 'user_active_days': active_days }) video_features = [] for vid, stats in video_stats.items(): video_popularity = len(stats['click_users']) video_features.append({ 'vid': vid, 'video_popularity': video_popularity }) user_df = pd.DataFrame(user_features) video_df = pd.DataFrame(video_features) # 释放内存 del user_stats, video_stats gc.collect() # 保存特征 user_df = reduce_mem_usage(user_df) video_df = reduce_mem_usage(video_df) user_df.to_csv('user_click_rate.csv', index=False) video_df.to_csv('video_popularity.csv', index=False) return user_df, video_df def prepare_samples(days=7): """准备训练样本(内存优化版本)""" # 处理数据并获取全局特征 user_df, video_df = process_data_in_chunks(days) # 读取并处理最近一天的数据作为样本 see, _, play = load_data_for_day(days) if see is None: raise ValueError("无法加载样本数据") # 合并用户特征 see = pd.merge(see, user_df, on='did', how='left') see['user_click_rate'] = see['user_click_rate'].fillna(0).astype(np.float32) see['user_active_days'] = see['user_active_days'].fillna(1).astype(np.int16) # 合并视频特征 see = pd.merge(see, video_df, on='vid', how='left') see['video_popularity'] = see['video_popularity'].fillna(0).astype(np.float32) # 特征交叉 see['user_video_interaction'] = (see['user_active_days'] * np.log1p(see['video_popularity'])).astype(np.float32) see['user_video_affinity'] = (see['user_click_rate'] * see['video_popularity']).astype(np.float32) # 处理视频信息 video_info = pd.read_csv('vid_info_table.csv', encoding='gbk', dtype={'vid': 'category'}) see = pd.merge(see, video_info[['vid', 'item_duration']], on='vid', how='left') see['item_duration'] = see['item_duration'].fillna(1.0) see.loc[see['item_duration'] <= 0, 'item_duration'] = 1.0 # 计算完成率 if 'play_time' not in see.columns: see['play_time'] = 0.0 see['completion_rate'] = (see['play_time'] / see['item_duration']).clip(0, 1).astype(np.float16) # 创建标签 see['label'] = np.select( [(see['completion_rate'] > 0.4), (see['clicked'] == 1)], [2, 1], # 2=完成, 1=点击 default=0 # 0=曝光未点击 ).astype(np.int8) see['binary_label'] = see['label'].apply(lambda x: 1 if x >= 1 else 0).astype(np.int8) # 优化内存 see = reduce_mem_usage(see) return see, user_df, video_df def train_model(samples): """训练模型(内存优化版本)""" print("准备训练数据...") features = ['user_click_rate', 'video_popularity', 'user_active_days', 'user_video_interaction', 'user_video_affinity'] # 确保特征存在 available_features = [f for f in features if f in samples.columns] print(f"使用的特征: {available_features}") X = samples[available_features] y = samples['binary_label'] # 检查标签分布 if len(y.unique()) < 2: raise ValueError("标签数据不平衡,需要正负样本") # 划分训练集和测试集 X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=42, stratify=y ) # 优化内存 X_train = reduce_mem_usage(X_train) X_test = reduce_mem_usage(X_test) # 创建数据集 lgb_train = lgb.Dataset(X_train, y_train, free_raw_data=True) lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train, free_raw_data=True) # 优化模型参数(降低复杂度) params = { 'boosting_type': 'gbdt', 'objective': 'binary', 'metric': 'auc', 'num_leaves': 31, # 减少叶子节点 'max_depth': 7, # 减少深度 'learning_rate': 0.05, 'feature_fraction': 0.7, 'bagging_fraction': 0.8, 'bagging_freq': 5, 'min_child_samples': 100, # 增加以降低内存 'verbosity': -1, 'seed': 42 } # 训练模型 print("训练模型...") model = lgb.train( params, lgb_train, num_boost_round=500, # 减少迭代次数 valid_sets=[lgb_train, lgb_eval], callbacks=[ early_stopping(stopping_rounds=50, verbose=True), log_evaluation(period=100) ] ) # 评估模型 y_pred = model.predict(X_test) auc_score = roc_auc_score(y_test, y_pred) print(f"✅ 模型训练完成,验证集AUC: {auc_score:.4f}") # 保存模型 joblib.dump(model, 'lightgbm_model.pkl') print("💾 模型已保存") # 保存特征列表 with open('feature_columns.txt', 'w') as f: f.write('\n'.join(available_features)) return model, available_features, auc_score def predict_new_data(model, feature_columns, test_file): """预测新数据(内存优化版本)""" print("加载测试数据...") test_data = pd.read_csv(test_file, dtype={'did': 'category', 'vid': 'category'}) test_data = reduce_mem_usage(test_data) # 加载特征映射 user_df = pd.read_csv('user_click_rate.csv') if os.path.exists('user_click_rate.csv') else pd.DataFrame() video_df = pd.read_csv('video_popularity.csv') if os.path.exists('video_popularity.csv') else pd.DataFrame() # 使用全局均值用于填充新用户/新视频 global_user_rate = user_df['user_click_rate'].mean() if not user_df.empty else 0 global_video_pop = video_df['video_popularity'].mean() if not video_df.empty else 0 global_active_days = user_df['user_active_days'].mean() if not user_df.empty else 1 # 创建映射字典(减少内存) user_click_map = user_df.set_index('did')['user_click_rate'].to_dict() if not user_df.empty else {} video_pop_map = video_df.set_index('vid')['video_popularity'].to_dict() if not video_df.empty else {} user_active_map = user_df.set_index('did')['user_active_days'].to_dict() if not user_df.empty else {} # 添加特征 print("添加特征...") test_data['user_click_rate'] = test_data['did'].map(user_click_map).fillna(global_user_rate).astype(np.float32) test_data['video_popularity'] = test_data['vid'].map(video_pop_map).fillna(global_video_pop).astype(np.float32) test_data['user_active_days'] = test_data['did'].map(user_active_map).fillna(global_active_days).astype(np.int16) # 特征交叉 test_data['user_video_interaction'] = (test_data['user_active_days'] * np.log1p(test_data['video_popularity'])).astype(np.float32) test_data['user_video_affinity'] = (test_data['user_click_rate'] * test_data['video_popularity']).astype(np.float32) # 确保所有特征都存在 print("准备预测数据...") test_features = test_data[feature_columns].copy() # 释放内存 del test_data gc.collect() # 分批预测(避免内存溢出) print("开始预测...") batch_size = 100000 predictions = [] for i in tqdm(range(0, len(test_features), batch_size), desc="预测批次"): batch = test_features.iloc[i:i+batch_size] preds = model.predict(batch) predictions.extend(preds.tolist()) del batch gc.collect() # 重新加载测试数据以获取did和vid test_data = pd.read_csv(test_file, dtype={'did': 'category', 'vid': 'category'}, usecols=['did', 'vid']) test_data['click_prob'] = predictions # 生成并保存结果 print("生成最终结果...") top_predictions = test_data.sort_values('click_prob', ascending=False).groupby('did').head(1) result = top_predictions[['did', 'vid', 'click_prob']].copy() result.to_csv('prediction_result.csv', index=False) print(f"✅ 预测完成,结果已保存至 prediction_result.csv") print(f"预测样本数量: {len(result)}") # 释放内存 del test_features, predictions, top_predictions gc.collect() return result if __name__ == '__main__': try: print("🚀 开始视频推荐模型训练与预测流程 (内存优化版)") # 设置较小的天数 TRAIN_DAYS = 7 # 仅使用7天数据 print(f"⚙️ 配置: 使用{TRAIN_DAYS}天数据训练") # 准备样本 print("🔧 准备训练样本...") samples, _, _ = prepare_samples(days=TRAIN_DAYS) if samples is None: raise ValueError("样本准备失败") print(f"✅ 样本准备完成 - 总样本数: {len(samples)}") # 标签分布 label_dist = samples['binary_label'].value_counts(normalize=True) print(f"📊 标签分布 - 正样本: {label_dist[1]:.2%}, 负样本: {label_dist[0]:.2%}") # 训练模型 print("🤖 开始训练LightGBM模型...") model, features, auc_score = train_model(samples) print(f"🎯 最优模型AUC: {auc_score:.4f}") # 释放内存 del samples gc.collect() # 预测新数据 print("🔮 开始预测新数据...") test_file = 'testA_did_show.csv' # 直接加载保存的模型(避免内存中的模型占用) if not os.path.exists('lightgbm_model.pkl'): raise FileNotFoundError("模型文件不存在") model = joblib.load('lightgbm_model.pkl') # 加载特征列表 if not os.path.exists('feature_columns.txt'): raise FileNotFoundError("特征列表文件不存在") with open('feature_columns.txt', 'r') as f: features = f.read().splitlines() result = predict_new_data(model, features, test_file) print("✅ 流程成功完成!") except Exception as e: print(f"❌ 流程出错: {str(e)}") import traceback traceback.print_exc()

帮我检查优化代码,尤其是减少内存占用:import pandas as pd import numpy as np import lightgbm as lgb from lightgbm import early_stopping, log_evaluation import gc import os import chardet from sklearn.model_selection import train_test_split from tqdm import tqdm import joblib from datetime import datetime from scipy.sparse import hstack, csr_matrix, save_npz, load_npz import sys import psutil from sklearn.metrics import log_loss, mean_absolute_error # 内存优化函数 def optimize_dtypes(df): """优化DataFrame的数据类型以减少内存占用""" if df.empty: return df # 转换整数列为最小可用类型 int_cols = df.select_dtypes(include=['int']).columns if not int_cols.empty: df[int_cols] = df[int_cols].apply(pd.to_numeric, downcast='integer') # 转换浮点列为最小可用类型 float_cols = df.select_dtypes(include=['float']).columns if not float_cols.empty: df[float_cols] = df[float_cols].apply(pd.to_numeric, downcast='float') # 转换对象列为分类类型 obj_cols = df.select_dtypes(include=['object']).columns for col in obj_cols: num_unique = df[col].nunique() num_total = len(df) if num_unique / num_total < 0.5: # 如果唯一值比例小于50% df[col] = df[col].astype('category') return df # 内存监控函数 def memory_monitor(step_name=""): """监控内存使用情况""" process = psutil.Process(os.getpid()) mem_info = process.memory_info() print(f"{step_name} 内存使用: {mem_info.rss / (1024 ** 2):.2f} MB") return mem_info.rss / (1024 ** 2) # 返回MB # 增强数据加载函数 def load_data_safely(file_path, usecols=None, dtype=None, chunksize=100000): """安全加载大型CSV文件,优化内存使用""" try: if not os.path.exists(file_path): print(f"⚠️ 文件不存在: {file_path}") return pd.DataFrame() # 自动检测编码 with open(file_path, 'rb') as f: result = chardet.detect(f.read(100000)) encoding = result['encoding'] if result['confidence'] > 0.7 else 'latin1' # 分批读取并优化内存 chunks = [] reader = pd.read_csv( file_path, encoding=encoding, usecols=usecols, dtype=dtype, chunksize=chunksize, low_memory=False ) for chunk in tqdm(reader, desc=f"加载 {os.path.basename(file_path)}"): # 优化分类列内存 for col in chunk.columns: if dtype and col in dtype and dtype[col] == 'category': chunk[col] = chunk[col].astype('category').cat.as_ordered() # 优化数据类型 chunk = optimize_dtypes(chunk) chunks.append(chunk) if chunks: result = pd.concat(chunks, ignore_index=True) # 再次整体优化 result = optimize_dtypes(result) return result return pd.DataFrame() except Exception as e: print(f"⚠️ 加载 {file_path} 失败: {str(e)}") return pd.DataFrame() # 稀疏矩阵转换函数 def to_sparse_matrix(df, columns): """将分类特征转换为稀疏矩阵表示""" sparse_matrices = [] for col in columns: if col in df.columns: # 处理NaN值 df[col] = df[col].fillna('MISSING') # 创建稀疏矩阵 sparse_mat = csr_matrix(pd.get_dummies(df[col], sparse=True).values) sparse_matrices.append(sparse_mat) # 水平堆叠所有稀疏矩阵 if sparse_matrices: return hstack(sparse_matrices) return None # 增量训练函数 def train_incremental(X, y, categorical_features, params, num_rounds=1000, chunk_size=100000): """分块增量训练模型以减少内存占用""" model = None for i in tqdm(range(0, len(X), chunk_size), desc="增量训练"): chunk_end = min(i + chunk_size, len(X)) X_chunk = X.iloc[i:chunk_end] y_chunk = y.iloc[i:chunk_end] train_data = lgb.Dataset( X_chunk, label=y_chunk, categorical_feature=categorical_features ) if model is None: model = lgb.train( params, train_data, num_boost_round=num_rounds, keep_training_booster=True ) else: model = lgb.train( params, train_data, num_boost_round=num_rounds, init_model=model, keep_training_booster=True ) return model # 主处理流程 def main(): """主处理流程,包含完整的内存优化策略""" # 初始内存监控 start_mem = memory_monitor("初始内存") # 定义内存优化的数据类型 dtypes = { 'did': 'category', 'vid': 'category', 'play_time': 'float32' } # 可选特征 optional_features = { 'item_cid': 'category', 'item_type': 'category', 'item_assetSource': 'category', 'item_classify': 'category', 'item_isIntact': 'category', 'sid': 'category', 'stype': 'category' } # 添加特征字段 for i in range(88): dtypes[f'f{i}'] = 'float32' # 加载核心数据 print("开始加载核心数据...") did_features = load_data_safely('did_features_table.csv', dtype=dtypes) vid_info = load_data_safely('vid_info_table.csv', dtype=dtypes) memory_monitor("加载核心数据后") # 添加可选特征到dtypes for feature, dtype in optional_features.items(): if not vid_info.empty and feature in vid_info.columns: dtypes[feature] = dtype # 重新加载数据以确保所有列使用正确的数据类型 if os.path.exists('did_features_table.csv'): did_features = load_data_safely('did_features_table.csv', dtype=dtypes) else: print("⚠️ did_features_table.csv 不存在") did_features = pd.DataFrame() if os.path.exists('vid_info_table.csv'): vid_info = load_data_safely('vid_info_table.csv', dtype=dtypes) else: print("⚠️ vid_info_table.csv 不存在") vid_info = pd.DataFrame() memory_monitor("重新加载数据后") # 加载历史数据 print("开始加载历史数据...") hist_exposure, hist_click, hist_play = load_historical_data(days=32) memory_monitor("加载历史数据后") # 构建点击数据集 if not hist_exposure.empty and not hist_click.empty: print("构建点击数据集...") click_train_data = build_click_dataset(hist_exposure, hist_click, sample_ratio=0.1) else: print("⚠️ 无法构建点击数据集") click_train_data = pd.DataFrame() memory_monitor("构建点击数据集后") # 添加特征 if not click_train_data.empty: print("开始构建点击特征...") click_train_data = add_click_features( click_train_data, did_features, vid_info, hist_click, hist_play ) else: print("⚠️ 点击数据集为空,跳过特征构建") memory_monitor("添加特征后") # 准备训练数据 if not click_train_data.empty: if 'date' in click_train_data.columns: X = click_train_data.drop(columns=['did', 'vid', 'label', 'date'], errors='ignore') else: X = click_train_data.drop(columns=['did', 'vid', 'label'], errors='ignore') y = click_train_data['label'] else: X, y = pd.DataFrame(), pd.Series() print("⚠️ 点击训练数据为空") # 划分数据集 if not X.empty and not y.empty: X_train, X_val, y_train, y_val = train_test_split( X, y, test_size=0.2, random_state=42, stratify=y ) else: print("⚠️ 训练数据为空,无法进行模型训练") X_train, X_val, y_train, y_val = pd.DataFrame(), pd.DataFrame(), pd.Series(), pd.Series() memory_monitor("划分数据集后") # 训练模型参数 params = { 'objective': 'binary', 'metric': 'binary_logloss', 'boosting_type': 'gbdt', 'num_leaves': 63, 'learning_rate': 0.05, 'feature_fraction': 0.8, 'bagging_fraction': 0.8, 'bagging_freq': 5, 'min_child_samples': 100, 'verbosity': -1 } # 增量训练点击模型 if not X_train.empty: print("开始训练点击预测模型...") model_click = train_incremental(X_train, y_train, categorical_features, params, num_rounds=1500, chunk_size=100000) # 在验证集上评估 val_preds = model_click.predict(X_val) val_logloss = log_loss(y_val, val_preds) print(f"验证集LogLoss: {val_logloss:.4f}") else: model_click = None print("⚠️ 训练数据为空,跳过点击预测模型训练") memory_monitor("训练点击模型后") # 构建完播率数据集 print("开始构建完播率数据集...") play_train_data = build_play_dataset(hist_play, vid_info, did_features, hist_click) memory_monitor("构建完播率数据集后") # 训练完播率模型 if not play_train_data.empty: X_play = play_train_data.drop(columns=['did', 'vid', 'play_time', 'item_duration', 'completion_rate'], errors='ignore') y_play = play_train_data['completion_rate'] else: X_play, y_play = pd.DataFrame(), pd.Series() print("⚠️ 完播率训练数据为空") if not X_play.empty and not y_play.empty: X_train_play, X_val_play, y_train_play, y_val_play = train_test_split( X_play, y_play, test_size=0.2, random_state=42 ) else: print("⚠️ 完播率训练数据为空,无法进行模型训练") X_train_play, X_val_play, y_train_play, y_val_play = pd.DataFrame(), pd.DataFrame(), pd.Series(), pd.Series() # 训练参数 params_reg = { 'objective': 'regression', 'metric': 'mae', 'boosting_type': 'gbdt', 'num_leaves': 63, 'learning_rate': 0.03, 'feature_fraction': 0.8, 'bagging_fraction': 0.8, 'bagging_freq': 5, 'lambda_l1': 0.1, 'lambda_l2': 0.1, 'min_data_in_leaf': 50, 'verbosity': -1 } # 增量训练完播率模型 if not X_train_play.empty: print("开始训练完播率模型...") model_play = train_incremental(X_train_play, y_train_play, play_categorical_features, params_reg, num_rounds=2000, chunk_size=100000) # 在验证集上评估 val_preds = model_play.predict(X_val_play) val_mae = mean_absolute_error(y_val_play, val_preds) print(f"验证集MAE: {val_mae:.4f}") else: model_play = None print("⚠️ 训练数据为空,跳过完播率模型训练") memory_monitor("训练完播率模型后") # 保存模型 if model_click: model_click.save_model('click_model.txt') print("点击预测模型已保存") if model_play: model_play.save_model('play_model.txt') print("完播率预测模型已保存") # 预测流程 print("开始加载预测数据...") to_predict_users = load_data_safely('testA_pred_did.csv', dtype={'did': 'category'}) to_predict_exposure = load_data_safely('testA_did_show.csv', dtype={'did': 'category', 'vid': 'category'}) # 执行预测 if not to_predict_users.empty and not to_predict_exposure.empty: print("开始生成预测结果...") submission = predict_for_test_data(to_predict_users, to_predict_exposure, did_features, vid_info) # 保存结果 if not submission.empty: timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") output_file = f'submission_{timestamp}.csv' submission.to_csv(output_file, index=False) print(f"预测结果已保存至: {output_file}") else: print("⚠️ 预测结果为空,未保存文件") else: print("⚠️ 预测数据加载失败,无法生成结果") # 最终内存报告 end_mem = memory_monitor("处理完成") print(f"总内存消耗: {end_mem - start_mem:.2f} MB") # 历史数据加载函数 def load_historical_data(days=32): """高效加载历史数据,支持分批处理""" see_list, click_list, play_list = [], [], [] for day in tqdm(range(1, days + 1), desc="加载历史数据"): day_str = f"{day:02d}" # 加载曝光数据 see_path = f'see_{day_str}.csv' if os.path.exists(see_path): see = load_data_safely(see_path, usecols=['did', 'vid'], dtype={'did': 'category', 'vid': 'category'}) if not see.empty and 'did' in see.columns and 'vid' in see.columns: see_list.append(see) del see gc.collect() # 加载点击数据 click_path = f'click_{day_str}.csv' if os.path.exists(click_path): click = load_data_safely(click_path, usecols=['did', 'vid', 'click_time'], dtype={'did': 'category', 'vid': 'category'}) if not click.empty and 'click_time' in click.columns and 'did' in click.columns and 'vid' in click.columns: # 优化日期处理 click['date'] = pd.to_datetime(click['click_time'], errors='coerce').dt.date click = click.drop(columns=['click_time'], errors='ignore') click_list.append(click[['did', 'vid', 'date']]) del click gc.collect() # 加载播放数据 play_path = f'playplus_{day_str}.csv' if os.path.exists(play_path): play = load_data_safely(play_path, usecols=['did', 'vid', 'play_time'], dtype={'did': 'category', 'vid': 'category'}) if not play.empty and 'play_time' in play.columns and 'did' in play.columns and 'vid' in play.columns: play_list.append(play) del play gc.collect() gc.collect() # 确保返回三个DataFrame return ( pd.concat(see_list).drop_duplicates(['did', 'vid']) if see_list else pd.DataFrame(), pd.concat(click_list).drop_duplicates(['did', 'vid']) if click_list else pd.DataFrame(), pd.concat(play_list).drop_duplicates(['did', 'vid']) if play_list else pd.DataFrame() ) # 点击数据集构建 def build_click_dataset(hist_exposure, hist_click, sample_ratio=0.1): """构建点击数据集,包含负样本采样""" if hist_exposure.empty or hist_click.empty: print("⚠️ 历史曝光或点击数据为空,无法构建数据集") return pd.DataFrame() # 标记正样本 hist_click = hist_click.copy() hist_click['label'] = 1 # 高效标记负样本 exposure_set = set(zip(hist_exposure['did'], hist_exposure['vid'])) click_set = set(zip(hist_click['did'], hist_click['vid'])) # 找出未点击的曝光 negative_set = exposure_set - click_set # 创建负样本DataFrame if negative_set: negative_dids, negative_vids = zip(*negative_set) negative_samples = pd.DataFrame({ 'did': list(negative_dids), 'vid': list(negative_vids), 'label': 0 }) # 采样负样本 if sample_ratio < 1.0: negative_samples = negative_samples.sample(frac=sample_ratio, random_state=42) else: negative_samples = pd.DataFrame(columns=['did', 'vid', 'label']) # 合并数据集 click_data = pd.concat([ hist_click[['did', 'vid', 'label']], negative_samples ], ignore_index=True) # 释放内存 del exposure_set, click_set, negative_set, negative_samples gc.collect() return click_data # 特征工程函数 def add_click_features(df, did_features, vid_info, hist_click, hist_play): """添加关键特征,避免内存溢出""" if df.empty: return df # 基础特征 if not did_features.empty and 'did' in did_features.columns: # 只取需要的列 did_cols = [col for col in did_features.columns if col not in ['did'] or col == 'did'] df = df.merge(did_features[did_cols], on='did', how='left') if not vid_info.empty and 'vid' in vid_info.columns: vid_cols = [col for col in vid_info.columns if col not in ['vid'] or col == 'vid'] df = df.merge(vid_info[vid_cols], on='vid', how='left') # 用户行为统计 if not hist_click.empty and 'did' in hist_click.columns: user_click_count = hist_click.groupby('did').size().rename('user_click_count') df = df.merge(user_click_count, on='did', how='left') else: df['user_click_count'] = 0 if not hist_play.empty and 'did' in hist_play.columns and 'play_time' in hist_play.columns: user_total_play = hist_play.groupby('did')['play_time'].sum().rename('user_total_play') df = df.merge(user_total_play, on='did', how='left') else: df['user_total_play'] = 0 if not hist_click.empty and 'vid' in hist_click.columns: video_click_count = hist_click.groupby('vid').size().rename('video_click_count') df = df.merge(video_click_count, on='vid', how='left') else: df['video_click_count'] = 0 if not hist_play.empty and 'vid' in hist_play.columns and 'play_time' in hist_play.columns: avg_play_time = hist_play.groupby('vid')['play_time'].mean().rename('avg_play_time') df = df.merge(avg_play_time, on='vid', how='left') else: df['avg_play_time'] = 0 # 填充缺失值 fill_values = { 'user_click_count': 0, 'user_total_play': 0, 'video_click_count': df['video_click_count'].median() if 'video_click_count' in df else 0, 'avg_play_time': df['avg_play_time'].median() if 'avg_play_time' in df else 0 } for col, value in fill_values.items(): if col in df: df[col] = df[col].fillna(value) # 添加时间相关特征 if 'date' in df: df['day_of_week'] = pd.to_datetime(df['date']).dt.dayofweek.astype('int8') df['hour'] = pd.to_datetime(df['date']).dt.hour.astype('int8') return df # 预测函数 def predict_for_test_data(test_users, test_exposure, did_features, vid_info): """为测试数据生成预测结果""" if test_users.empty or test_exposure.empty: print("⚠️ 测试数据为空,无法进行预测") return pd.DataFrame() # 合并测试数据 test_data = test_exposure.merge(test_users, on='did', how='left') # 添加特征 test_data = add_click_features( test_data, did_features, vid_info, pd.DataFrame(), # 无历史点击 pd.DataFrame() # 无历史播放 ) # 预测点击率 X_test = test_data.drop(columns=['did', 'vid', 'date'], errors='ignore') click_probs = [] if model_click and not X_test.empty: # 分块预测避免内存问题 click_probs = [] chunk_size = 100000 for i in range(0, len(X_test), chunk_size): chunk = X_test.iloc[i:i+chunk_size] click_probs.extend(model_click.predict(chunk)) else: click_probs = [0.5] * len(test_data) # 默认值 # 预测完播率 completion_rates = [] if model_play and not X_test.empty: # 添加视频时长信息 if not vid_info.empty and 'vid' in vid_info.columns and 'item_duration' in vid_info.columns: test_data = test_data.merge(vid_info[['vid', 'item_duration']], on='vid', how='left') else: test_data['item_duration'] = 1.0 # 分块预测 completion_rates = [] for i in range(0, len(X_test), chunk_size): chunk = X_test.iloc[i:i+chunk_size] completion_rates.extend(model_play.predict(chunk)) else: completion_rates = [0.7] * len(test_data) # 默认值 # 计算综合得分 test_data['click_prob'] = click_probs test_data['completion_rate'] = completion_rates test_data['score'] = test_data['click_prob'] * test_data['completion_rate'] # 为每个用户选择得分最高的视频 submission = test_data.sort_values('score', ascending=False).groupby('did').head(1) # 选择需要的列 submission = submission[['did', 'vid', 'completion_rate']].copy() # 重命名列 submission.columns = ['did', 'vid', 'completion_rate'] # 确保数据格式正确 submission['did'] = submission['did'].astype(str) submission['vid'] = submission['vid'].astype(str) submission['completion_rate'] = submission['completion_rate'].round(4) return submission # 主程序入口 if __name__ == "__main__": main()

请帮我检查优化代码,并完整输出结果:import pandas as pd import numpy as np import lightgbm as lgb from lightgbm import early_stopping, log_evaluation from sklearn.model_selection import train_test_split from sklearn.metrics import roc_auc_score import chardet def detect_encoding(file_path): with open(file_path, 'rb') as f: result = chardet.detect(f.read(10000)) return result['encoding'], result['confidence'] def load_all_data(days=32): see_list, click_list, play_list = [], [], [] dtypes = {'did': 'category', 'vid': 'category'} for i in range(1, days + 1): day = f"{i:02d}" # 加载 see 数据 see = pd.read_csv(f'see_{day}.csv', encoding='latin1', dtype=dtypes) if 'did' not in see.columns or 'vid' not in see.columns: raise ValueError(f"see_{day}.csv 缺少必要字段") see['day'] = day see_list.append(see) # 加载 click 数据 click = pd.read_csv( f'click_{day}.csv', encoding='ISO-8859-1', on_bad_lines='skip', dtype=dtypes ) if 'click_time' not in click.columns: raise ValueError(f"click_{day}.csv 缺少 click_time 字段") click['date'] = pd.to_datetime(click['click_time']).dt.date click_list.append(click[['did', 'vid', 'date']]) # 加载 play 数据 play = pd.read_csv( f'playplus_{day}.csv', engine='python', encoding_errors='ignore', dtype=dtypes ) if 'play_time' not in play.columns: raise ValueError(f"playplus_{day}.csv 缺少 play_time 字段") play_list.append(play[['did', 'vid', 'play_time']]) all_see = pd.concat(see_list).drop_duplicates(['did', 'vid']) all_click = pd.concat(click_list).drop_duplicates(['did', 'vid']) all_play = pd.concat(play_list).groupby(['did', 'vid'], observed=True).sum().reset_index() return all_see, all_click, all_play def prepare_samples(all_see, all_click, all_play): video_info = pd.read_csv('vid_info_table.csv', encoding='gbk', dtype={'vid': 'category'}) # 合并基础数据 samples = all_see.merge(all_play, on=['did', 'vid'], how='left').fillna({'play_time': 0}) samples = samples.merge(video_info, on='vid', how='left') # 计算完成率(仅用于分析,不用于预测) samples['completion_rate'] = (samples['play_time'] / samples['item_duration']).clip(0, 1).astype(np.float32) # 点击标记 click_flag = all_click.groupby(['did', 'vid']).size().reset_index(name='clicked') click_flag['clicked'] = 1 samples = samples.merge(click_flag, on=['did', 'vid'], how='left').fillna({'clicked': 0}) samples['clicked'] = samples['clicked'].astype(np.int8) # 标签定义 samples['label'] = np.select( [ (samples['completion_rate'] > 0.9), (samples['clicked'] == 1) ], [2, 1], # 2=完成, 1=点击 default=0 # 0=曝光未点击 ) # 二分类目标(点击或完成为正类) samples['binary_label'] = samples['label'].apply(lambda x: 1 if x >= 1 else 0).astype(int) # 计算用户点击率(修正版) user_exposure = all_see.groupby('did').size().rename('exposure_count') user_click_count = all_click.groupby('did').size().rename('click_count') user_click_rate = (user_click_count / user_exposure).fillna(0).astype(np.float32) # 视频流行度 video_popularity = all_click.groupby('vid').size().rename('video_popularity') # 映射特征 samples['user_click_rate'] = samples['did'].map(user_click_rate).fillna(0) samples['video_popularity'] = samples['vid'].map(video_popularity).fillna(0) # 修复:保存唯一用户点击率(关键修复点) user_click_rate_df = pd.DataFrame({ 'did': user_click_rate.index, 'user_click_rate': user_click_rate.values }).drop_duplicates('did') # 修复:保存唯一视频流行度 video_popularity_df = pd.DataFrame({ 'vid': video_popularity.index, 'video_popularity': video_popularity.values }).drop_duplicates('vid') # 保存特征 user_click_rate_df.to_csv('user_click_rate.csv', index=False) video_popularity_df.to_csv('video_popularity.csv', index=False) return samples, user_click_rate, video_popularity def train_model(samples): # 仅使用可复现的特征 features = ['user_click_rate', 'video_popularity'] X = samples[features] y = samples['binary_label'] X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=42, stratify=y ) lgb_train = lgb.Dataset(X_train, y_train) lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train) params = { 'boosting_type': 'gbdt', 'objective': 'binary', 'metric': 'auc', 'num_leaves': 31, 'learning_rate': 0.05, 'feature_fraction': 0.9, 'bagging_fraction': 0.8, 'bagging_freq': 5, 'verbose': -1 } model = lgb.train( params, lgb_train, num_boost_round=100, valid_sets=[lgb_train, lgb_eval], callbacks=[ early_stopping(stopping_rounds=20), log_evaluation(period=50) ] ) y_pred = model.predict(X_test) auc_score = roc_auc_score(y_test, y_pred) print(f"Validation AUC: {auc_score:.4f}") return model, features, auc_score def predict_new_data(model, feature_columns, test_file): # 读取测试数据 test_data = pd.read_csv(test_file, dtype={'did': 'category', 'vid': 'category'}) # 修复:正确读取特征映射 user_click_rate_df = pd.read_csv('user_click_rate.csv') video_popularity_df = pd.read_csv('video_popularity.csv') # 计算全局均值用于填充新用户/新视频 global_user_rate = user_click_rate_df['user_click_rate'].mean() global_video_pop = video_popularity_df['video_popularity'].mean() # 创建映射字典 user_click_map = user_click_rate_df.set_index('did')['user_click_rate'].to_dict() video_pop_map = video_popularity_df.set_index('vid')['video_popularity'].to_dict() # 映射特征 test_data['user_click_rate'] = test_data['did'].map(user_click_map).fillna(global_user_rate) test_data['video_popularity'] = test_data['vid'].map(video_pop_map).fillna(global_video_pop) # 预测 test_data['click_prob'] = model.predict(test_data[feature_columns]) # 生成结果 top_predictions = test_data.sort_values('click_prob', ascending=False).groupby('did').head(1) result = top_predictions[['did', 'vid', 'click_prob']].copy() result.columns = ['did', 'vid', 'click_prob'] result.to_csv('prediction_result.csv', index=False) return result if __name__ == '__main__': encoding, confidence = detect_encoding('see_01.csv') print(f"编码: {encoding}, 置信度: {confidence:.2f}") all_see, all_click, all_play = load_all_data() samples, _, _ = prepare_samples(all_see, all_click, all_play) model, features, auc_score = train_model(samples) result = predict_new_data(model, features, 'testA_did_show.csv')

帮我检查代码:import pandas as pd import numpy as np import lightgbm as lgb from lightgbm import early_stopping, log_evaluation from sklearn.model_selection import train_test_split from sklearn.metrics import roc_auc_score import chardet def detect_encoding(file_path): with open(file_path, 'rb') as f: result = chardet.detect(f.read(10000)) return result['encoding'], result['confidence'] def load_all_data(days=32): see_list, click_list, play_list = [], [], [] dtypes = {'did': 'category', 'vid': 'category'} for i in range(1, days + 1): day = f"{i:02d}" # 加载 see 数据 see = pd.read_csv(f'see_{day}.csv', encoding='latin1', dtype=dtypes) if 'did' not in see.columns or 'vid' not in see.columns: raise ValueError(f"see_{day}.csv 缺少必要字段") see['day'] = day see_list.append(see) # 加载 click 数据 click = pd.read_csv( f'click_{day}.csv', encoding='ISO-8859-1', on_bad_lines='skip', dtype=dtypes ) if 'click_time' not in click.columns: raise ValueError(f"click_{day}.csv 缺少 click_time 字段") click['date'] = pd.to_datetime(click['click_time']).dt.date click_list.append(click[['did', 'vid', 'date']]) # 加载 play 数据 play = pd.read_csv( f'playplus_{day}.csv', engine='python', encoding_errors='ignore', dtype=dtypes ) if 'play_time' not in play.columns: raise ValueError(f"playplus_{day}.csv 缺少 play_time 字段") play_list.append(play[['did', 'vid', 'play_time']]) all_see = pd.concat(see_list).drop_duplicates(['did', 'vid']) all_click = pd.concat(click_list).drop_duplicates(['did', 'vid']) all_play = pd.concat(play_list).groupby(['did', 'vid'], observed=True).sum().reset_index() return all_see, all_click, all_play def prepare_samples(all_see, all_click, all_play): video_info = pd.read_csv('vid_info_table.csv', encoding='gbk', dtype={'vid': 'category'}) # 合并基础数据 samples = all_see.merge(all_play, on=['did', 'vid'], how='left').fillna({'play_time': 0}) samples = samples.merge(video_info, on='vid', how='left') # 计算完成率(仅用于分析,不用于预测) samples['completion_rate'] = (samples['play_time'] / samples['item_duration']).clip(0, 1).astype(np.float32) # 点击标记 click_flag = all_click.groupby(['did', 'vid']).size().reset_index(name='clicked') click_flag['clicked'] = 1 samples = samples.merge(click_flag, on=['did', 'vid'], how='left').fillna({'clicked': 0}) samples['clicked'] = samples['clicked'].astype(np.int8) # 标签定义 samples['label'] = np.select( [ (samples['completion_rate'] > 0.9), (samples['clicked'] == 1) ], [2, 1], # 2=完成, 1=点击 default=0 # 0=曝光未点击 ) # 二分类目标(点击或完成为正类) samples['binary_label'] = samples['label'].apply(lambda x: 1 if x >= 1 else 0).astype(int) # 计算用户点击率(修正版) user_exposure = all_see.groupby('did').size().rename('exposure_count') user_click_count = all_click.groupby('did').size().rename('click_count') user_click_rate = (user_click_count / user_exposure).fillna(0).astype(np.float32) # 视频流行度 video_popularity = all_click.groupby('vid').size().rename('video_popularity') # 映射特征 samples['user_click_rate'] = samples['did'].map(user_click_rate).fillna(0) samples['video_popularity'] = samples['vid'].map(video_popularity).fillna(0) # 修复:保存唯一用户点击率(关键修复点) user_click_rate_df = pd.DataFrame({ 'did': user_click_rate.index, 'user_click_rate': user_click_rate.values }).drop_duplicates('did') # 修复:保存唯一视频流行度 video_popularity_df = pd.DataFrame({ 'vid': video_popularity.index, 'video_popularity': video_popularity.values }).drop_duplicates('vid') # 保存特征 user_click_rate_df.to_csv('user_click_rate.csv', index=False) video_popularity_df.to_csv('video_popularity.csv', index=False) return samples, user_click_rate, video_popularity def train_model(samples): # 仅使用可复现的特征 features = ['user_click_rate', 'video_popularity'] X = samples[features] y = samples['binary_label'] X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=42, stratify=y ) lgb_train = lgb.Dataset(X_train, y_train) lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train) params = { 'boosting_type': 'gbdt', 'objective': 'binary', 'metric': 'auc', 'num_leaves': 31, 'learning_rate': 0.05, 'feature_fraction': 0.9, 'bagging_fraction': 0.8, 'bagging_freq': 5, 'verbose': -1 } model = lgb.train( params, lgb_train, num_boost_round=100, valid_sets=[lgb_train, lgb_eval], callbacks=[ early_stopping(stopping_rounds=20), log_evaluation(period=50) ] ) y_pred = model.predict(X_test) auc_score = roc_auc_score(y_test, y_pred) print(f"Validation AUC: {auc_score:.4f}") return model, features, auc_score def predict_new_data(model, feature_columns, test_file): # 读取测试数据 test_data = pd.read_csv(test_file, dtype={'did': 'category', 'vid': 'category'}) # 修复:正确读取特征映射 user_click_rate_df = pd.read_csv('user_click_rate.csv') video_popularity_df = pd.read_csv('video_popularity.csv') # 计算全局均值用于填充新用户/新视频 global_user_rate = user_click_rate_df['user_click_rate'].mean() global_video_pop = video_popularity_df['video_popularity'].mean() # 创建映射字典 user_click_map = user_click_rate_df.set_index('did')['user_click_rate'].to_dict() video_pop_map = video_popularity_df.set_index('vid')['video_popularity'].to_dict() # 映射特征 test_data['user_click_rate'] = test_data['did'].map(user_click_map).fillna(global_user_rate) test_data['video_popularity'] = test_data['vid'].map(video_pop_map).fillna(global_video_pop) # 预测 test_data['click_prob'] = model.predict(test_data[feature_columns]) # 生成结果 top_predictions = test_data.sort_values('click_prob', ascending=False).groupby('did').head(1) result = top_predictions[['did', 'vid', 'click_prob']].copy() result.columns = ['did', 'vid', 'click_prob'] result.to_csv('prediction_result.csv', index=False) return result if __name__ == '__main__': encoding, confidence = detect_encoding('see_01.csv') print(f"编码: {encoding}, 置信度: {confidence:.2f}") all_see, all_click, all_play = load_all_data() samples, _, _ = prepare_samples(all_see, all_click, all_play) model, features, auc_score = train_model(samples) result = predict_new_data(model, features, 'testA_did_show.csv')

帮我检查代码,并说明代码用到的表格、建模的特征、给出的结果分别有哪些:import pandas as pd import numpy as np import lightgbm as lgb from lightgbm import early_stopping, log_evaluation import gc import os import chardet from sklearn.model_selection import train_test_split from tqdm.auto import tqdm # 使用auto版本自动选择界面 import joblib from datetime import datetime import dask.dataframe as dd # 添加Dask支持大数据处理 from dask.diagnostics import ProgressBar # 1. 增强数据加载函数(优化内存和IO) def load_data_safely(file_path, usecols=None, dtype=None, chunksize=500000): """安全高效加载大型CSV文件,自动处理编码""" if not os.path.exists(file_path): print(f"⚠️ 文件不存在: {file_path}") return pd.DataFrame() try: # 高效检测编码 with open(file_path, 'rb') as f: detector = chardet.UniversalDetector() for line in f: detector.feed(line) if detector.done or f.tell() > 100000: break detector.close() encoding = detector.result['encoding'] if detector.result['confidence'] > 0.6 else 'utf-8' # 使用Dask处理大文件 ddf = dd.read_csv(file_path, encoding=encoding, usecols=usecols, dtype=dtype, blocksize=chunksize, low_memory=False) with ProgressBar(): df = ddf.compute() # 优化分类列内存 if dtype: for col, col_type in dtype.items(): if col in df.columns and col_type == 'category': df[col] = df[col].astype('category').cat.as_ordered() return df except Exception as e: print(f"⚠️ 加载 {file_path} 失败: {str(e)}") return pd.DataFrame() # 2. 优化历史数据加载(并行处理) def load_historical_data(days=32): """并行加载历史数据,自动跳过缺失文件""" from concurrent.futures import ThreadPoolExecutor def load_day(day): day_str = f"{day:02d}" results = {} # 曝光数据 see_path = f'see_{day_str}.csv' if os.path.exists(see_path): results['see'] = load_data_safely( see_path, usecols=['did', 'vid'], dtype={'did': 'category', 'vid': 'category'} ) # 点击数据 click_path = f'click_{day_str}.csv' if os.path.exists(click_path): click = load_data_safely( click_path, usecols=['did', 'vid', 'click_time'], dtype={'did': 'category', 'vid': 'category'} ) if not click.empty and 'click_time' in click: click['date'] = pd.to_datetime(click['click_time']).dt.date click.drop(columns=['click_time'], inplace=True, errors='ignore') results['click'] = click # 播放数据 play_path = f'play_{day_str}.csv' if os.path.exists(play_path): results['play'] = load_data_safely( play_path, usecols=['did', 'vid', 'play_time'], dtype={'did': 'category', 'vid': 'category', 'play_time': 'float32'} ) return results with ThreadPoolExecutor(max_workers=8) as executor: futures = [executor.submit(load_day, day) for day in range(1, days+1)] results = [f.result() for f in tqdm(futures, desc="加载历史数据", total=days)] # 合并结果 see_list = [r['see'] for r in results if 'see' in r and not r['see'].empty] click_list = [r['click'] for r in results if 'click' in r and not r['click'].empty] play_list = [r['play'] for r in results if 'play' in r and not r['play'].empty] hist_exposure = pd.concat(see_list).drop_duplicates(['did', 'vid']) if see_list else pd.DataFrame() hist_click = pd.concat(click_list).drop_duplicates(['did', 'vid']) if click_list else pd.DataFrame() hist_play = pd.concat(play_list).drop_duplicates(['did', 'vid']) if play_list else pd.DataFrame() return hist_exposure, hist_click, hist_play # 3. 优化点击数据集构建(内存友好的负采样) def build_click_dataset(hist_exposure, hist_click, sample_ratio=0.1): """使用Bloom Filter进行高效负样本采样""" if hist_exposure.empty or hist_click.empty: print("⚠️ 历史曝光或点击数据为空,无法构建数据集") return pd.DataFrame() # 标记正样本 hist_click = hist_click[['did', 'vid']].copy() hist_click['label'] = 1 # 创建Bloom Filter存储正样本 from pybloom_live import ScalableBloomFilter bloom = ScalableBloomFilter(mode=ScalableBloomFilter.SMALL_SET_GROWTH) # 添加正样本 for _, row in tqdm(hist_click.iterrows(), total=len(hist_click), desc="构建布隆过滤器"): bloom.add((row['did'], row['vid'])) # 采样负样本 negative_samples = [] chunk_size = 500000 for i in range(0, len(hist_exposure), chunk_size): chunk = hist_exposure.iloc[i:i+chunk_size] for _, row in tqdm(chunk.iterrows(), total=len(chunk), desc="采样负样本"): if sample_ratio > np.random.random() and (row['did'], row['vid']) not in bloom: negative_samples.append([row['did'], row['vid'], 0]) # 构建负样本DataFrame negative_df = pd.DataFrame(negative_samples, columns=['did', 'vid', 'label']) click_data = pd.concat([hist_click, negative_df], ignore_index=True) return click_data # 4. 优化特征工程(延迟计算) def add_click_features(df, did_features, vid_info, hist_click, hist_play): """按需计算特征,避免中间大DataFrame""" if df.empty: return df # 基础特征 if not did_features.empty and 'did' in did_features.columns: # 优化内存合并 df = df.merge(did_features.add_suffix('_user'), left_on='did', right_index=True, how='left') if not vid_info.empty and 'vid' in vid_info.columns: vid_info_sub = vid_info[[c for c in vid_info.columns if c != 'item_duration']] df = df.merge(vid_info_sub.add_suffix('_item'), left_on='vid', right_index=True, how='left') # 按需计算统计特征 def calc_stat_feature(df, source, group_col, target_col, feature_name, agg_func='size'): if source.empty or group_col not in source or (agg_func != 'size' and target_col not in source): df[feature_name] = 0 return # 使用预聚合缓存 cache_file = f"{feature_name}_cache.pkl" if os.path.exists(cache_file): stats = joblib.load(cache_file) else: stats = source.groupby(group_col) if agg_func == 'size': stats = stats.size().rename(feature_name) else: stats = stats[target_col].agg(agg_func).rename(feature_name) joblib.dump(stats, cache_file) if group_col == 'did': df = df.merge(stats, left_on='did', right_index=True, how='left') else: df = df.merge(stats, left_on='vid', right_index=True, how='left') df[feature_name].fillna(0, inplace=True) return df # 计算用户特征 df = calc_stat_feature(df, hist_click, 'did', None, 'user_click_count') df = calc_stat_feature(df, hist_play, 'did', 'play_time', 'user_total_play', 'sum') # 计算物品特征 df = calc_stat_feature(df, hist_click, 'vid', None, 'video_click_count') df = calc_stat_feature(df, hist_play, 'vid', 'play_time', 'avg_play_time', 'mean') # 时间特征 if 'date' in df: df['day_of_week'] = pd.to_datetime(df['date']).dt.dayofweek.astype('int8') df['hour'] = pd.to_datetime(df['date']).dt.hour.astype('int8') # 释放内存 gc.collect() return df # 5. 模型训练函数封装 def train_lgb_model(X, y, categorical_features, params, model_name="模型"): if X.empty or y.empty: print(f"⚠️ {model_name}训练数据为空") return None X_train, X_val, y_train, y_val = train_test_split( X, y, test_size=0.2, random_state=42, stratify=y if 'binary' in params['objective'] else None ) train_data = lgb.Dataset(X_train, label=y_train, categorical_feature=categorical_features, free_raw_data=False) val_data = lgb.Dataset(X_val, label=y_val, categorical_feature=categorical_features, free_raw_data=False) print(f"开始训练{model_name}...") model = lgb.train( params, train_data, num_boost_round=10000, valid_sets=[train_data, val_data], valid_names=['train', 'valid'], callbacks=[ early_stopping(stopping_rounds=100, verbose=True), log_evaluation(period=50), lgb.reset_parameter(learning_rate=lambda iter: params['learning_rate'] * (0.99 ** iter)) ] ) return model # 主程序优化 def main(): # 配置优化 #pd.set_option('future.no_silent_downcasting', True) gc.enable() # 核心数据类型 dtypes = {'did': 'category', 'vid': 'category', 'play_time': 'float32'} for i in range(88): dtypes[f'f{i}'] = 'float32' # 核心数据加载 print("高效加载核心数据...") did_features = load_data_safely('did_features_table.csv', dtype=dtypes) vid_info = load_data_safely('vid_info_table.csv', dtype=dtypes) # 历史数据加载 print("并行加载历史数据...") hist_exposure, hist_click, hist_play = load_historical_data(days=15) # 减少天数提高速度 # 点击模型训练 if not hist_exposure.empty and not hist_click.empty: print("构建点击数据集...") click_train_data = build_click_dataset(hist_exposure, hist_click, sample_ratio=0.05) # 降低采样率 print("构建点击特征...") click_train_data = add_click_features( click_train_data, did_features, vid_info, hist_click, hist_play ) # 获取分类特征 base_categorical = ['item_cid', 'item_type', 'item_assetSource', 'item_classify', 'item_isIntact', 'sid', 'stype', 'day_of_week', 'hour'] categorical_features = [c for c in base_categorical if c in click_train_data] # 训练模型 click_params = { 'objective': 'binary', 'metric': 'binary_logloss', 'boosting_type': 'gbdt', 'num_leaves': 127, 'learning_rate': 0.1, 'feature_fraction': 0.7, 'bagging_freq': 5, 'min_data_in_leaf': 100, 'verbosity': -1 } model_click = train_lgb_model( click_train_data.drop(columns=['label', 'did', 'vid', 'date'], errors='ignore'), click_train_data['label'], categorical_features, click_params, "点击预测模型" ) else: model_click = None # 完播率模型训练 if not hist_play.empty and not vid_info.empty: print("构建完播率数据集...") play_data = hist_play[['did', 'vid', 'play_time']].copy() play_data = play_data.merge( vid_info[['vid', 'item_duration']], on='vid', how='left' ) play_data['completion_rate'] = play_data['play_time'] / play_data['item_duration'] # 添加特征 play_data = add_click_features( play_data, did_features, vid_info, hist_click, hist_play ) # 训练模型 play_params = { 'objective': 'regression', 'metric': 'mae', 'boosting_type': 'gbdt', 'num_leaves': 63, 'learning_rate': 0.05, 'feature_fraction': 0.7, 'bagging_freq': 5, 'min_data_in_leaf': 50, 'verbosity': -1 } model_play = train_lgb_model( play_data.drop(columns=['completion_rate', 'did', 'vid', 'play_time', 'item_duration'], errors='ignore'), play_data['completion_rate'], categorical_features, play_params, "完播率预测模型" ) else: model_play = None # 保存模型 for model, name in zip([model_click, model_play], ['click_model.txt', 'play_model.txt']): if model: model.save_model(name) joblib.dump(base_categorical, 'categorical_features.pkl') # 测试预测 print("加载预测数据...") test_users = load_data_safely('testA_pred_did.csv', dtype={'did': 'category'}) test_exposure = load_data_safely('testA_did_show.csv', dtype={'did': 'category', 'vid': 'category'}) if not test_users.empty and not test_exposure.empty: print("生成测试预测...") # 预测逻辑保持不变... else: print("⚠️ 预测数据加载失败") if __name__ == "__main__": main()

1.给出模型的auc;2.模型的预测结果,第一列是did,第二列只保留点击概率最高的vid,第三列是预计的完播率,按照上面三个要求调整代码并检查无误:import pandas as pd import numpy as np import lightgbm as lgb from lightgbm import early_stopping, log_evaluation import gc import os import chardet from sklearn.model_selection import train_test_split from tqdm import tqdm import joblib from datetime import datetime # 1. 增强数据加载函数(添加列存在性检查) def load_data_safely(file_path, usecols=None, dtype=None, chunksize=100000): """安全加载大型CSV文件,优化内存使用""" try: if not os.path.exists(file_path): print(f"⚠️ 文件不存在: {file_path}") return pd.DataFrame() # 自动检测编码 with open(file_path, 'rb') as f: result = chardet.detect(f.read(100000)) encoding = result['encoding'] if result['confidence'] > 0.7 else 'latin1' # 分批读取并优化内存 chunks = [] reader = pd.read_csv( file_path, encoding=encoding, usecols=usecols, dtype=dtype, chunksize=chunksize, low_memory=False ) for chunk in tqdm(reader, desc=f"加载 {os.path.basename(file_path)}"): # 优化分类列内存 for col in chunk.columns: if dtype and col in dtype and dtype[col] == 'category': chunk[col] = chunk[col].astype('category').cat.as_ordered() chunks.append(chunk) if chunks: return pd.concat(chunks, ignore_index=True) return pd.DataFrame() except Exception as e: print(f"⚠️ 加载 {file_path} 失败: {str(e)}") return pd.DataFrame() # 2. 优化历史数据加载(添加列存在性检查) def load_historical_data(days=32): """高效加载历史数据,支持分批处理""" see_list, click_list, play_list = [], [], [] for day in tqdm(range(1, days + 1), desc="加载历史数据"): day_str = f"{day:02d}" # 加载曝光数据 see_path = f'see_{day_str}.csv' if os.path.exists(see_path): see = load_data_safely(see_path, usecols=['did', 'vid'], dtype={'did': 'category', 'vid': 'category'}) if not see.empty and 'did' in see.columns and 'vid' in see.columns: see_list.append(see) del see gc.collect() # 加载点击数据 click_path = f'click_{day_str}.csv' if os.path.exists(click_path): click = load_data_safely(click_path, usecols=['did', 'vid', 'click_time'], dtype={'did': 'category', 'vid': 'category'}) if not click.empty and 'click_time' in click.columns and 'did' in click.columns and 'vid' in click.columns: # 优化日期处理 click['date'] = pd.to_datetime(click['click_time'], errors='coerce').dt.date click = click.drop(columns=['click_time'], errors='ignore') click_list.append(click[['did', 'vid', 'date']]) del click gc.collect() # 加载播放数据 play_path = f'playplus_{day_str}.csv' if os.path.exists(play_path): play = load_data_safely(play_path, usecols=['did', 'vid', 'play_time'], dtype={'did': 'category', 'vid': 'category'}) if not play.empty and 'play_time' in play.columns and 'did' in play.columns and 'vid' in play.columns: play_list.append(play) del play gc.collect() gc.collect() # 确保返回三个DataFrame,即使某些为空 return ( pd.concat(see_list).drop_duplicates(['did', 'vid']) if see_list else pd.DataFrame(), pd.concat(click_list).drop_duplicates(['did', 'vid']) if click_list else pd.DataFrame(), pd.concat(play_list).drop_duplicates(['did', 'vid']) if play_list else pd.DataFrame() ) # 定义内存优化的数据类型(添加列存在性检查) dtypes = { 'did': 'category', 'vid': 'category', 'play_time': 'float32' } # 可选特征 - 只有在数据中存在时才添加 optional_features = { 'item_cid': 'category', 'item_type': 'category', 'item_assetSource': 'category', 'item_classify': 'category', 'item_isIntact': 'category', 'sid': 'category', 'stype': 'category' } # 添加特征字段 for i in range(88): dtypes[f'f{i}'] = 'float32' # 加载核心数据 print("开始加载核心数据...") did_features = load_data_safely('did_features_table.csv', dtype=dtypes) vid_info = load_data_safely('vid_info_table.csv', dtype=dtypes) # 添加可选特征到dtypes(仅当列存在时) for feature, dtype in optional_features.items(): if not vid_info.empty and feature in vid_info.columns: dtypes[feature] = dtype # 重新加载数据以确保所有列使用正确的数据类型 if os.path.exists('did_features_table.csv'): did_features = load_data_safely('did_features_table.csv', dtype=dtypes) else: print("⚠️ did_features_table.csv 不存在") did_features = pd.DataFrame() if os.path.exists('vid_info_table.csv'): vid_info = load_data_safely('vid_info_table.csv', dtype=dtypes) else: print("⚠️ vid_info_table.csv 不存在") vid_info = pd.DataFrame() # 加载历史数据 - 确保所有变量都被定义 print("开始加载历史数据...") hist_exposure, hist_click, hist_play = load_historical_data(days=32) # 打印历史数据状态 print(f"历史曝光数据形状: {hist_exposure.shape if not hist_exposure.empty else '空'}") print(f"历史点击数据形状: {hist_click.shape if not hist_click.empty else '空'}") print(f"历史播放数据形状: {hist_play.shape if not hist_play.empty else '空'}") # 3. 优化点击数据集构建(添加空数据检查) def build_click_dataset(hist_exposure, hist_click, sample_ratio=0.1): """构建点击数据集,包含负样本采样""" if hist_exposure.empty or hist_click.empty: print("⚠️ 历史曝光或点击数据为空,无法构建数据集") return pd.DataFrame() # 标记正样本 hist_click = hist_click.copy() hist_click['label'] = 1 # 高效标记负样本(使用集合操作) exposure_set = set(zip(hist_exposure['did'], hist_exposure['vid'])) click_set = set(zip(hist_click['did'], hist_click['vid'])) # 找出未点击的曝光 negative_set = exposure_set - click_set # 创建负样本DataFrame if negative_set: negative_dids, negative_vids = zip(*negative_set) negative_samples = pd.DataFrame({ 'did': list(negative_dids), 'vid': list(negative_vids), 'label': 0 }) # 采样负样本 if sample_ratio < 1.0: negative_samples = negative_samples.sample(frac=sample_ratio, random_state=42) else: negative_samples = pd.DataFrame(columns=['did', 'vid', 'label']) # 合并数据集 click_data = pd.concat([ hist_click[['did', 'vid', 'label']], negative_samples ], ignore_index=True) # 释放内存 del exposure_set, click_set, negative_set, negative_samples gc.collect() return click_data # 使用10%负样本比例 - 确保hist_click已定义 if not hist_exposure.empty and not hist_click.empty: print("构建点击数据集...") click_train_data = build_click_dataset(hist_exposure, hist_click, sample_ratio=0.1) else: print("⚠️ 无法构建点击数据集,因为历史曝光或点击数据为空") click_train_data = pd.DataFrame() # 4. 优化特征工程(解决分类特征问题) def add_click_features(df, did_features, vid_info, hist_click, hist_play): """添加关键特征,避免内存溢出""" if df.empty: return df # 基础特征(使用索引加速合并) if not did_features.empty and 'did' in did_features.columns: df = df.merge(did_features, on='did', how='left') if not vid_info.empty and 'vid' in vid_info.columns: df = df.merge(vid_info, on='vid', how='left') # 用户行为统计 user_click_count = pd.Series(dtype='int') if not hist_click.empty and 'did' in hist_click.columns: user_click_count = hist_click.groupby('did').size().rename('user_click_count') if not user_click_count.empty: df = df.merge(user_click_count, on='did', how='left') else: df['user_click_count'] = 0 user_total_play = pd.Series(dtype='float') if not hist_play.empty and 'did' in hist_play.columns and 'play_time' in hist_play.columns: user_total_play = hist_play.groupby('did')['play_time'].sum().rename('user_total_play') if not user_total_play.empty: df = df.merge(user_total_play, on='did', how='left') else: df['user_total_play'] = 0 # 视频热度统计 video_click_count = pd.Series(dtype='int') if not hist_click.empty and 'vid' in hist_click.columns: video_click_count = hist_click.groupby('vid').size().rename('video_click_count') if not video_click_count.empty: df = df.merge(video_click_count, on='vid', how='left') else: df['video_click_count'] = 0 avg_play_time = pd.Series(dtype='float') if not hist_play.empty and 'vid' in hist_play.columns and 'play_time' in hist_play.columns: avg_play_time = hist_play.groupby('vid')['play_time'].mean().rename('avg_play_time') if not avg_play_time.empty: df = df.merge(avg_play_time, on='vid', how='left') else: df['avg_play_time'] = 0 # 填充缺失值 fill_values = { 'user_click_count': 0, 'user_total_play': 0, 'video_click_count': df['video_click_count'].median() if 'video_click_count' in df else 0, 'avg_play_time': df['avg_play_time'].median() if 'avg_play_time' in df else 0 } for col, value in fill_values.items(): if col in df: df[col] = df[col].fillna(value) # 添加时间相关特征 if 'date' in df: df['day_of_week'] = pd.to_datetime(df['date']).dt.dayofweek.astype('int8') df['hour'] = pd.to_datetime(df['date']).dt.hour.astype('int8') return df # 添加特征 - 确保所有参数都已定义 if not click_train_data.empty: print("开始构建点击特征...") click_train_data = add_click_features( click_train_data, did_features, vid_info, hist_click, # 确保hist_click已定义 hist_play # 确保hist_play已定义 ) else: print("⚠️ 点击数据集为空,跳过特征构建") # 5. 修复分类特征问题 def get_categorical_features(df, base_features): """动态获取存在的分类特征""" existing_features = [] for feature in base_features: if feature in df.columns: try: # 尝试转换为数值,如果是数值则跳过 pd.to_numeric(df[feature], errors='raise') except: existing_features.append(feature) # 确保转换为category类型 df[feature] = df[feature].astype('category').cat.as_ordered() return existing_features # 基础分类特征列表 base_categorical_features = [ 'item_cid', 'item_type', 'item_assetSource', 'item_classify', 'item_isIntact', 'sid', 'stype', 'day_of_week', 'hour' ] # 动态获取存在的分类特征 categorical_features = [] if not click_train_data.empty: categorical_features = get_categorical_features(click_train_data, base_categorical_features) print(f"使用的分类特征: {categorical_features}") else: print("⚠️ 点击训练数据为空,无法获取分类特征") # 准备训练数据 if not click_train_data.empty: if 'date' in click_train_data.columns: X = click_train_data.drop(columns=['did', 'vid', 'label', 'date'], errors='ignore') else: X = click_train_data.drop(columns=['did', 'vid', 'label'], errors='ignore') y = click_train_data['label'] else: X, y = pd.DataFrame(), pd.Series() print("⚠️ 点击训练数据为空") # 划分数据集 if not X.empty and not y.empty: X_train, X_val, y_train, y_val = train_test_split( X, y, test_size=0.2, random_state=42, stratify=y ) else: print("⚠️ 训练数据为空,无法进行模型训练") X_train, X_val, y_train, y_val = pd.DataFrame(), pd.DataFrame(), pd.Series(), pd.Series() # 训练模型(优化参数) params = { 'objective': 'binary', 'metric': 'binary_logloss', 'boosting_type': 'gbdt', 'num_leaves': 63, 'learning_rate': 0.05, 'feature_fraction': 0.8, 'bagging_fraction': 0.8, 'bagging_freq': 5, 'min_child_samples': 100, 'verbosity': -1 } if not X_train.empty: train_data = lgb.Dataset(X_train, label=y_train, categorical_feature=categorical_features) val_data = lgb.Dataset(X_val, label=y_val, categorical_feature=categorical_features) print("开始训练点击预测模型...") model_click = lgb.train( params, train_data, num_boost_round=1500, valid_sets=[val_data], callbacks=[ early_stopping(stopping_rounds=100, verbose=True), log_evaluation(period=50) ] ) else: model_click = None print("⚠️ 训练数据为空,跳过点击预测模型训练") # 6. 优化完播率数据集构建 def build_play_dataset(hist_play, vid_info, did_features, hist_click): """构建完播率数据集,优化内存使用""" if hist_play.empty: print("⚠️ 历史播放数据为空,无法构建完播率数据集") return pd.DataFrame() # 基础数据 play_data = hist_play[['did', 'vid', 'play_time']].copy() # 添加视频时长信息 if not vid_info.empty and 'vid' in vid_info.columns and 'item_duration' in vid_info.columns: play_data = play_data.merge( vid_info[['vid', 'item_duration']], on='vid', how='left' ) else: play_data['item_duration'] = 1.0 # 默认值 # 计算完播率 play_data['completion_rate'] = play_data['play_time'] / play_data['item_duration'] play_data['completion_rate'] = play_data['completion_rate'].clip(upper=1.0) # 添加用户特征 if not did_features.empty and 'did' in did_features.columns: play_data = play_data.merge( did_features, on='did', how='left' ) # 添加视频特征 if not vid_info.empty and 'vid' in vid_info.columns: vid_cols = [col for col in vid_info.columns if col != 'item_duration'] play_data = play_data.merge( vid_info[vid_cols], on='vid', how='left' ) # 用户平均完播率 play_data['user_avg_completion'] = play_data.groupby('did')['completion_rate'].transform('mean') play_data['user_play_count'] = play_data.groupby('did')['completion_rate'].transform('count') # 视频平均完播率 play_data['video_avg_completion'] = play_data.groupby('vid')['completion_rate'].transform('mean') play_data['video_completion_std'] = play_data.groupby('vid')['completion_rate'].transform('std') # 用户-视频互动特征 if not hist_click.empty and 'did' in hist_click.columns and 'vid' in hist_click.columns: user_vid_clicks = hist_click.groupby(['did', 'vid']).size().reset_index(name='user_vid_clicks') play_data = play_data.merge(user_vid_clicks, on=['did', 'vid'], how='left') else: play_data['user_vid_clicks'] = 0 # 填充缺失值 play_data['user_avg_completion'].fillna(play_data['completion_rate'].mean(), inplace=True) play_data['user_play_count'].fillna(1, inplace=True) play_data['video_avg_completion'].fillna(play_data['completion_rate'].median(), inplace=True) play_data['video_completion_std'].fillna(0, inplace=True) play_data['user_vid_clicks'].fillna(0, inplace=True) return play_data print("开始构建完播率数据集...") # 确保所有参数都已定义 if 'hist_play' in globals() and 'vid_info' in globals() and 'did_features' in globals() and 'hist_click' in globals(): play_train_data = build_play_dataset(hist_play, vid_info, did_features, hist_click) else: print("⚠️ 无法构建完播率数据集,因为所需变量未定义") play_train_data = pd.DataFrame() # 7. 训练完播率模型 if not play_train_data.empty: X_play = play_train_data.drop(columns=['did', 'vid', 'play_time', 'item_duration', 'completion_rate'], errors='ignore') y_play = play_train_data['completion_rate'] else: X_play, y_play = pd.DataFrame(), pd.Series() print("⚠️ 完播率训练数据为空") if not X_play.empty and not y_play.empty: X_train_play, X_val_play, y_train_play, y_val_play = train_test_split( X_play, y_play, test_size=0.2, random_state=42 ) else: print("⚠️ 完播率训练数据为空,无法进行模型训练") X_train_play, X_val_play, y_train_play, y_val_play = pd.DataFrame(), pd.DataFrame(), pd.Series(), pd.Series() # 获取完播率模型的分类特征 if not play_train_data.empty: play_categorical_features = get_categorical_features(play_train_data, base_categorical_features) print(f"完播率模型使用的分类特征: {play_categorical_features}") else: play_categorical_features = [] print("⚠️ 完播率训练数据为空,无法获取分类特征") # 训练参数 params_reg = { 'objective': 'regression', 'metric': 'mae', 'boosting_type': 'gbdt', 'num_leaves': 63, 'learning_rate': 0.03, 'feature_fraction': 0.8, 'bagging_fraction': 0.8, 'bagging_freq': 5, 'lambda_l1': 0.1, 'lambda_l2': 0.1, 'min_data_in_leaf': 50, 'verbosity': -1 } if not X_train_play.empty: train_data_play = lgb.Dataset(X_train_play, label=y_train_play, categorical_feature=play_categorical_features) val_data_play = lgb.Dataset(X_val_play, label=y_val_play, categorical_feature=play_categorical_features) print("开始训练完播率模型...") model_play = lgb.train( params_reg, train_data_play, num_boost_round=2000, valid_sets=[val_data_play], callbacks=[ early_stopping(stopping_rounds=100, verbose=True), log_evaluation(period=50) ] ) else: model_play = None print("⚠️ 训练数据为空,跳过完播率模型训练") # 保存模型 if model_click: model_click.save_model('click_model.txt') if model_play: model_play.save_model('play_model.txt') joblib.dump(base_categorical_features, 'categorical_features.pkl') # 8. 添加预测流程 def predict_for_test_data(test_users, test_exposure, did_features, vid_info): """为测试数据生成预测结果""" if test_users.empty or test_exposure.empty: print("⚠️ 测试数据为空,无法进行预测") return pd.DataFrame() # 合并测试数据 test_data = test_exposure.merge(test_users, on='did', how='left') # 添加特征 test_data = add_click_features( test_data, did_features, vid_info, pd.DataFrame(), # 无历史点击 pd.DataFrame() # 无历史播放 ) # 动态获取分类特征 test_categorical_features = get_categorical_features(test_data, base_categorical_features) # 预测点击率 X_test = test_data.drop(columns=['did', 'vid', 'date'], errors='ignore') click_probs = [] if model_click and not X_test.empty: click_probs = model_click.predict(X_test) else: click_probs = [0.5] * len(test_data) # 默认值 # 预测完播率 completion_rates = [] if model_play and not X_test.empty: # 添加视频时长信息 if not vid_info.empty and 'vid' in vid_info.columns and 'item_duration' in vid_info.columns: test_data = test_data.merge(vid_info[['vid', 'item_duration']], on='vid', how='left') else: test_data['item_duration'] = 1.0 completion_rates = model_play.predict(X_test) else: completion_rates = [0.7] * len(test_data) # 默认值 # 计算综合得分 test_data['click_prob'] = click_probs test_data['completion_rate'] = completion_rates test_data['score'] = test_data['click_prob'] * test_data['completion_rate'] # 生成提交格式 submission = test_data.groupby('did').apply( lambda x: ' '.join(x.sort_values('score', ascending=False)['vid'].astype(str)[:100]) ).reset_index(name='vid_list') return submission # 加载预测数据 print("开始加载预测数据...") to_predict_users = load_data_safely('testA_pred_did.csv', dtype={'did': 'category'}) to_predict_exposure = load_data_safely('testA_did_show.csv', dtype={'did': 'category', 'vid': 'category'}) # 执行预测 if not to_predict_users.empty and not to_predict_exposure.empty: print("开始生成预测结果...") submission = predict_for_test_data(to_predict_users, to_predict_exposure, did_features, vid_info) # 保存结果 if not submission.empty: timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") output_file = f'submission_{timestamp}.csv' submission.to_csv(output_file, index=False) print(f"预测结果已保存至: {output_file}") else: print("⚠️ 预测结果为空,未保存文件") else: print("⚠️ 预测数据加载失败,无法生成结果")

帮我修改代码,一是原始数据只有30天,不是32天,二是完播率预测结果数值一样,需要调优模型;三是预测结果的行数应该与testA_pred_did.csv保持一致。import pandas as pd import numpy as np import lightgbm as lgb from lightgbm import early_stopping, log_evaluation import gc import os import chardet from sklearn.model_selection import train_test_split from tqdm import tqdm import joblib from datetime import datetime from sklearn.metrics import roc_auc_score # 添加AUC计算 # 修复:在函数定义后添加缩进的代码块 def load_data_safely(file_path, usecols=None, dtype=None, chunksize=100000): """安全加载大型CSV文件,优化内存使用""" try: if not os.path.exists(file_path): print(f"⚠️ 文件不存在: {file_path}") return pd.DataFrame() # 自动检测编码 with open(file_path, 'rb') as f: result = chardet.detect(f.read(100000)) encoding = result['encoding'] if result['confidence'] > 0.7 else 'latin1' # 分批读取并优化内存 chunks = [] reader = pd.read_csv( file_path, encoding=encoding, usecols=usecols, dtype=dtype, chunksize=chunksize, low_memory=False ) for chunk in tqdm(reader, desc=f"加载 {os.path.basename(file_path)}"): # 优化分类列内存 if dtype: # 确保dtype不为空 for col in chunk.columns: if col in dtype and dtype[col] == 'category': chunk[col] = chunk[col].astype('category').cat.as_ordered() chunks.append(chunk) if chunks: return pd.concat(chunks, ignore_index=True) return pd.DataFrame() except Exception as e: print(f"⚠️ 加载 {file_path} 失败: {str(e)}") return pd.DataFrame() # 修复:确保所有函数都有缩进的代码块 def load_historical_data(days=32): """高效加载历史数据,支持分批处理""" see_list, click_list, play_list = [], [], [] for day in tqdm(range(1, days + 1), desc="加载历史数据"): day_str = f"{day:02d}" # 加载曝光数据 see_path = f'see_{day_str}.csv' if os.path.exists(see_path): see = load_data_safely(see_path, usecols=['did', 'vid'], dtype={'did': 'category', 'vid': 'category'}) if not see.empty and 'did' in see.columns and 'vid' in see.columns: see_list.append(see) del see gc.collect() # 加载点击数据 click_path = f'click_{day_str}.csv' if os.path.exists(click_path): click = load_data_safely(click_path, usecols=['did', 'vid', 'click_time'], dtype={'did': 'category', 'vid': 'category'}) if not click.empty and 'click_time' in click.columns and 'did' in click.columns and 'vid' in click.columns: # 优化日期处理 click['date'] = pd.to_datetime(click['click_time'], errors='coerce').dt.date click = click.drop(columns=['click_time'], errors='ignore') click_list.append(click[['did', 'vid', 'date']]) del click gc.collect() # 加载播放数据 play_path = f'playplus_{day_str}.csv' if os.path.exists(play_path): play = load_data_safely(play_path, usecols=['did', 'vid', 'play_time'], dtype={'did': 'category', 'vid': 'category'}) if not play.empty and 'play_time' in play.columns and 'did' in play.columns and 'vid' in play.columns: play_list.append(play) del play gc.collect() gc.collect() # 确保返回三个DataFrame,即使某些为空 return ( pd.concat(see_list).drop_duplicates(['did', 'vid']) if see_list else pd.DataFrame(), pd.concat(click_list).drop_duplicates(['did', 'vid']) if click_list else pd.DataFrame(), pd.concat(play_list).drop_duplicates(['did', 'vid']) if play_list else pd.DataFrame() ) # 修复:添加缺失的函数定义 def build_click_dataset(hist_exposure, hist_click, sample_ratio=0.1): """构建点击数据集,包含负样本采样""" if hist_exposure.empty or hist_click.empty: print("⚠️ 历史曝光或点击数据为空,无法构建数据集") return pd.DataFrame() # 标记正样本 hist_click = hist_click.copy() hist_click['label'] = 1 # 高效标记负样本(使用集合操作) exposure_set = set(zip(hist_exposure['did'], hist_exposure['vid'])) click_set = set(zip(hist_click['did'], hist_click['vid'])) # 找出未点击的曝光 negative_set = exposure_set - click_set # 创建负样本DataFrame if negative_set: negative_dids, negative_vids = zip(*negative_set) negative_samples = pd.DataFrame({ 'did': list(negative_dids), 'vid': list(negative_vids), 'label': 0 }) # 采样负样本 if sample_ratio < 1.0: negative_samples = negative_samples.sample(frac=sample_ratio, random_state=42) else: negative_samples = pd.DataFrame(columns=['did', 'vid', 'label']) # 合并数据集 click_data = pd.concat([ hist_click[['did', 'vid', 'label']], negative_samples ], ignore_index=True) # 释放内存 del exposure_set, click_set, negative_set, negative_samples gc.collect() return click_data # 修复:添加缺失的函数定义 def add_click_features(df, did_features, vid_info, hist_click, hist_play): """添加关键特征,避免内存溢出""" if df.empty: return df # 基础特征(使用索引加速合并) if not did_features.empty and 'did' in did_features.columns: df = df.merge(did_features, on='did', how='left') if not vid_info.empty and 'vid' in vid_info.columns: df = df.merge(vid_info, on='vid', how='left') # 用户行为统计 user_click_count = pd.Series(dtype='int') if not hist_click.empty and 'did' in hist_click.columns: user_click_count = hist_click.groupby('did').size().rename('user_click_count') if not user_click_count.empty: df = df.merge(user_click_count, on='did', how='left') else: df['user_click_count'] = 0 user_total_play = pd.Series(dtype='float') if not hist_play.empty and 'did' in hist_play.columns and 'play_time' in hist_play.columns: user_total_play = hist_play.groupby('did')['play_time'].sum().rename('user_total_play') if not user_total_play.empty: df = df.merge(user_total_play, on='did', how='left') else: df['user_total_play'] = 0 # 视频热度统计 video_click_count = pd.Series(dtype='int') if not hist_click.empty and 'vid' in hist_click.columns: video_click_count = hist_click.groupby('vid').size().rename('video_click_count') if not video_click_count.empty: df = df.merge(video_click_count, on='vid', how='left') else: df['video_click_count'] = 0 avg_play_time = pd.Series(dtype='float') if not hist_play.empty and 'vid' in hist_play.columns and 'play_time' in hist_play.columns: avg_play_time = hist_play.groupby('vid')['play_time'].mean().rename('avg_play_time') if not avg_play_time.empty: df = df.merge(avg_play_time, on='vid', how='left') else: df['avg_play_time'] = 0 # 填充缺失值 fill_values = { 'user_click_count': 0, 'user_total_play': 0, 'video_click_count': df['video_click_count'].median() if 'video_click_count' in df else 0, 'avg_play_time': df['avg_play_time'].median() if 'avg_play_time' in df else 0 } for col, value in fill_values.items(): if col in df: df[col] = df[col].fillna(value) # 添加时间相关特征 if 'date' in df: df['day_of_week'] = pd.to_datetime(df['date']).dt.dayofweek.astype('int8') df['hour'] = pd.to_datetime(df['date']).dt.hour.astype('int8') return df # 修复:添加缺失的函数定义 def get_categorical_features(df, base_features): """动态获取存在的分类特征""" existing_features = [] for feature in base_features: if feature in df.columns: try: # 尝试转换为数值,如果是数值则跳过 pd.to_numeric(df[feature], errors='raise') except: existing_features.append(feature) # 确保转换为category类型 df[feature] = df[feature].astype('category').cat.as_ordered() return existing_features # 修复:添加缺失的函数定义 def build_play_dataset(hist_play, vid_info, did_features, hist_click): """构建完播率数据集,优化内存使用""" if hist_play.empty: print("⚠️ 历史播放数据为空,无法构建完播率数据集") return pd.DataFrame() # 基础数据 play_data = hist_play[['did', 'vid', 'play_time']].copy() # 添加视频时长信息 if not vid_info.empty and 'vid' in vid_info.columns and 'item_duration' in vid_info.columns: play_data = play_data.merge( vid_info[['vid', 'item_duration']], on='vid', how='left' ) else: play_data['item_duration'] = 1.0 # 默认值 # 计算完播率 play_data['completion_rate'] = play_data['play_time'] / play_data['item_duration'] play_data['completion_rate'] = play_data['completion_rate'].clip(upper=1.0) # 添加用户特征 if not did_features.empty and 'did' in did_features.columns: play_data = play_data.merge( did_features, on='did', how='left' ) # 添加视频特征 if not vid_info.empty and 'vid' in vid_info.columns: vid_cols = [col for col in vid_info.columns if col != 'item_duration'] play_data = play_data.merge( vid_info[vid_cols], on='vid', how='left' ) # 用户平均完播率 play_data['user_avg_completion'] = play_data.groupby('did')['completion_rate'].transform('mean') play_data['user_play_count'] = play_data.groupby('did')['completion_rate'].transform('count') # 视频平均完播率 play_data['video_avg_completion'] = play_data.groupby('vid')['completion_rate'].transform('mean') play_data['video_completion_std'] = play_data.groupby('vid')['completion_rate'].transform('std') # 用户-视频互动特征 if not hist_click.empty and 'did' in hist_click.columns and 'vid' in hist_click.columns: user_vid_clicks = hist_click.groupby(['did', 'vid']).size().reset_index(name='user_vid_clicks') play_data = play_data.merge(user_vid_clicks, on=['did', 'vid'], how='left') else: play_data['user_vid_clicks'] = 0 # 填充缺失值 play_data['user_avg_completion'].fillna(play_data['completion_rate'].mean(), inplace=True) play_data['user_play_count'].fillna(1, inplace=True) play_data['video_avg_completion'].fillna(play_data['completion_rate'].median(), inplace=True) play_data['video_completion_std'].fillna(0, inplace=True) play_data['user_vid_clicks'].fillna(0, inplace=True) return play_data # 修复:添加缺失的函数定义 def predict_for_test_data(test_users, test_exposure, did_features, vid_info): """为测试数据生成预测结果 - 修改为只保留点击概率最高的vid""" if test_users.empty or test_exposure.empty: print("⚠️ 测试数据为空,无法进行预测") return pd.DataFrame() # 合并测试数据 test_data = test_exposure.merge(test_users, on='did', how='left') # 添加特征 test_data = add_click_features( test_data, did_features, vid_info, pd.DataFrame(), # 无历史点击 pd.DataFrame() # 无历史播放 ) # 动态获取分类特征 test_categorical_features = get_categorical_features(test_data, base_categorical_features) # 预测点击率 X_test = test_data.drop(columns=['did', 'vid', 'date'], errors='ignore') click_probs = [] if model_click and not X_test.empty: click_probs = model_click.predict(X_test) else: click_probs = [0.5] * len(test_data) # 默认值 # 预测完播率 completion_rates = [] if model_play and not X_test.empty: # 添加视频时长信息 if not vid_info.empty and 'vid' in vid_info.columns and 'item_duration' in vid_info.columns: test_data = test_data.merge(vid_info[['vid', 'item_duration']], on='vid', how='left') else: test_data['item_duration'] = 1.0 completion_rates = model_play.predict(X_test) else: completion_rates = [0.7] * len(test_data) # 默认值 # 存储预测结果 test_data['click_prob'] = click_probs test_data['completion_rate'] = completion_rates # 修改:只保留每个did点击概率最高的vid result = test_data.sort_values('click_prob', ascending=False).groupby('did').head(1) # 选择需要的列 result = result[['did', 'vid', 'completion_rate']].copy() # 重命名列 result.columns = ['did', 'vid', 'predicted_completion_rate'] return result # 主程序流程 if __name__ == "__main__": # 定义内存优化的数据类型 dtypes = { 'did': 'category', 'vid': 'category', 'play_time': 'float32' } # 可选特征 - 只有在数据中存在时才添加 optional_features = { 'item_cid': 'category', 'item_type': 'category', 'item_assetSource': 'category', 'item_classify': 'category', 'item_isIntact': 'category', 'sid': 'category', 'stype': 'category' } # 添加特征字段 for i in range(88): dtypes[f'f{i}'] = 'float32' # 加载核心数据 print("开始加载核心数据...") did_features = load_data_safely('did_features_table.csv', dtype=dtypes) vid_info = load_data_safely('vid_info_table.csv', dtype=dtypes) # 添加可选特征到dtypes(仅当列存在时) for feature, dtype in optional_features.items(): if not vid_info.empty and feature in vid_info.columns: dtypes[feature] = dtype # 重新加载数据以确保所有列使用正确的数据类型 if os.path.exists('did_features_table.csv'): did_features = load_data_safely('did_features_table.csv', dtype=dtypes) else: print("⚠️ did_features_table.csv 不存在") did_features = pd.DataFrame() if os.path.exists('vid_info_table.csv'): vid_info = load_data_safely('vid_info_table.csv', dtype=dtypes) else: print("⚠️ vid_info_table.csv 不存在") vid_info = pd.DataFrame() # 加载历史数据 - 确保所有变量都被定义 print("开始加载历史数据...") hist_exposure, hist_click, hist_play = load_historical_data(days=32) # 打印历史数据状态 print(f"历史曝光数据形状: {hist_exposure.shape if not hist_exposure.empty else '空'}") print(f"历史点击数据形状: {hist_click.shape if not hist_click.empty else '空'}") print(f"历史播放数据形状: {hist_play.shape if not hist_play.empty else '空'}") # 构建点击数据集 if not hist_exposure.empty and not hist_click.empty: print("构建点击数据集...") click_train_data = build_click_dataset(hist_exposure, hist_click, sample_ratio=0.1) else: print("⚠️ 无法构建点击数据集,因为历史曝光或点击数据为空") click_train_data = pd.DataFrame() # 添加特征 - 确保所有参数都已定义 if not click_train_data.empty: print("开始构建点击特征...") click_train_data = add_click_features( click_train_data, did_features, vid_info, hist_click, # 确保hist_click已定义 hist_play # 确保hist_play已定义 ) else: print("⚠️ 点击数据集为空,跳过特征构建") # 基础分类特征列表 base_categorical_features = [ 'item_cid', 'item_type', 'item_assetSource', 'item_classify', 'item_isIntact', 'sid', 'stype', 'day_of_week', 'hour' ] # 动态获取存在的分类特征 categorical_features = [] if not click_train_data.empty: categorical_features = get_categorical_features(click_train_data, base_categorical_features) print(f"使用的分类特征: {categorical_features}") else: print("⚠️ 点击训练数据为空,无法获取分类特征") # 准备训练数据 if not click_train_data.empty: if 'date' in click_train_data.columns: X = click_train_data.drop(columns=['did', 'vid', 'label', 'date'], errors='ignore') else: X = click_train_data.drop(columns=['did', 'vid', 'label'], errors='ignore') y = click_train_data['label'] else: X, y = pd.DataFrame(), pd.Series() print("⚠️ 点击训练数据为空") # 划分数据集 if not X.empty and not y.empty: X_train, X_val, y_train, y_val = train_test_split( X, y, test_size=0.2, random_state=42, stratify=y ) else: print("⚠️ 训练数据为空,无法进行模型训练") X_train, X_val, y_train, y_val = pd.DataFrame(), pd.DataFrame(), pd.Series(), pd.Series() # 训练模型(优化参数) params = { 'objective': 'binary', 'metric': 'binary_logloss', 'boosting_type': 'gbdt', 'num_leaves': 63, 'learning_rate': 0.05, 'feature_fraction': 0.8, 'bagging_fraction': 0.8, 'bagging_freq': 5, 'min_child_samples': 100, 'verbosity': -1 } model_click = None if not X_train.empty: train_data = lgb.Dataset(X_train, label=y_train, categorical_feature=categorical_features) val_data = lgb.Dataset(X_val, label=y_val, categorical_feature=categorical_features) print("开始训练点击预测模型...") model_click = lgb.train( params, train_data, num_boost_round=1500, valid_sets=[val_data], callbacks=[ early_stopping(stopping_rounds=100, verbose=True), log_evaluation(period=50) ] ) # 计算并输出AUC if not X_val.empty and not y_val.empty and model_click: y_val_pred = model_click.predict(X_val) auc_score = roc_auc_score(y_val, y_val_pred) print(f"📊 点击率模型在验证集上的AUC: {auc_score:.6f}") with open('model_metrics.txt', 'w') as f: f.write(f"点击率模型AUC: {auc_score:.6f}\n") else: print("⚠️ 训练数据为空,跳过点击预测模型训练") # 构建完播率数据集 print("开始构建完播率数据集...") if 'hist_play' in globals() and 'vid_info' in globals() and 'did_features' in globals() and 'hist_click' in globals(): play_train_data = build_play_dataset(hist_play, vid_info, did_features, hist_click) else: print("⚠️ 无法构建完播率数据集,因为所需变量未定义") play_train_data = pd.DataFrame() # 训练完播率模型 model_play = None if not play_train_data.empty: X_play = play_train_data.drop(columns=['did', 'vid', 'play_time', 'item_duration', 'completion_rate'], errors='ignore') y_play = play_train_data['completion_rate'] else: X_play, y_play = pd.DataFrame(), pd.Series() print("⚠️ 完播率训练数据为空") if not X_play.empty and not y_play.empty: X_train_play, X_val_play, y_train_play, y_val_play = train_test_split( X_play, y_play, test_size=0.2, random_state=42 ) else: print("⚠️ 完播率训练数据为空,无法进行模型训练") X_train_play, X_val_play, y_train_play, y_val_play = pd.DataFrame(), pd.DataFrame(), pd.Series(), pd.Series() # 获取完播率模型的分类特征 play_categorical_features = [] if not play_train_data.empty: play_categorical_features = get_categorical_features(play_train_data, base_categorical_features) print(f"完播率模型使用的分类特征: {play_categorical_features}") else: print("⚠️ 完播率训练数据为空,无法获取分类特征") # 训练参数 params_reg = { 'objective': 'regression', 'metric': 'mae', 'boosting_type': 'gbdt', 'num_leaves': 63, 'learning_rate': 0.03, 'feature_fraction': 0.8, 'bagging_fraction': 0.8, 'bagging_freq': 5, 'lambda_l1': 0.1, 'lambda_l2': 0.1, 'min_data_in_leaf': 50, 'verbosity': -1 } if not X_train_play.empty: train_data_play = lgb.Dataset(X_train_play, label=y_train_play, categorical_feature=play_categorical_features) val_data_play = lgb.Dataset(X_val_play, label=y_val_play, categorical_feature=play_categorical_features) print("开始训练完播率模型...") model_play = lgb.train( params_reg, train_data_play, num_boost_round=2000, valid_sets=[val_data_play], callbacks=[ early_stopping(stopping_rounds=100, verbose=True), log_evaluation(period=50) ] ) else: print("⚠️ 训练数据为空,跳过完播率模型训练") # 保存模型 if model_click: model_click.save_model('click_model.txt') if model_play: model_play.save_model('play_model.txt') joblib.dump(base_categorical_features, 'categorical_features.pkl') # 加载预测数据 print("开始加载预测数据...") to_predict_users = load_data_safely('testA_pred_did.csv', dtype={'did': 'category'}) to_predict_exposure = load_data_safely('testA_did_show.csv', dtype={'did': 'category', 'vid': 'category'}) # 执行预测 if not to_predict_users.empty and not to_predict_exposure.empty: print("开始生成预测结果...") submission = predict_for_test_data(to_predict_users, to_predict_exposure, did_features, vid_info) # 保存结果 if not submission.empty: timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") output_file = f'submission_{timestamp}.csv' # 修改:保存为无表头CSV submission.to_csv(output_file, index=False, header=False) print(f"预测结果已保存至: {output_file}") print(f"结果格式: 共 {len(submission)} 行") print(f"列信息: [did, vid, predicted_completion_rate]") else: print("⚠️ 预测结果为空,未保存文件") else: print("⚠️ 预测数据加载失败,无法生成结果")

为什么完播率预测结果数据一样?需要如何调整?import pandas as pd import numpy as np import lightgbm as lgb from lightgbm import early_stopping, log_evaluation import gc import os import chardet from sklearn.model_selection import train_test_split from tqdm.auto import tqdm # 使用auto版本自动选择界面 import joblib from datetime import datetime import dask.dataframe as dd # 添加Dask支持大数据处理 from dask.diagnostics import ProgressBar # 1. 增强数据加载函数(优化内存和IO) def load_data_safely(file_path, usecols=None, dtype=None, chunksize=500000): """安全高效加载大型CSV文件,自动处理编码""" if not os.path.exists(file_path): print(f"⚠️ 文件不存在: {file_path}") return pd.DataFrame() try: # 高效检测编码 with open(file_path, 'rb') as f: detector = chardet.UniversalDetector() for line in f: detector.feed(line) if detector.done or f.tell() > 100000: break detector.close() encoding = detector.result['encoding'] if detector.result['confidence'] > 0.6 else 'utf-8' # 使用Dask处理大文件 ddf = dd.read_csv(file_path, encoding=encoding, usecols=usecols, dtype=dtype, blocksize=chunksize, low_memory=False) with ProgressBar(): df = ddf.compute() # 优化分类列内存 if dtype: for col, col_type in dtype.items(): if col in df.columns and col_type == 'category': df[col] = df[col].astype('category').cat.as_ordered() return df except Exception as e: print(f"⚠️ 加载 {file_path} 失败: {str(e)}") return pd.DataFrame() # 2. 优化历史数据加载(并行处理) def load_historical_data(days=32): """并行加载历史数据,自动跳过缺失文件""" from concurrent.futures import ThreadPoolExecutor def load_day(day): day_str = f"{day:02d}" results = {} # 曝光数据 see_path = f'see_{day_str}.csv' if os.path.exists(see_path): results['see'] = load_data_safely( see_path, usecols=['did', 'vid'], dtype={'did': 'category', 'vid': 'category'} ) # 点击数据 click_path = f'click_{day_str}.csv' if os.path.exists(click_path): click = load_data_safely( click_path, usecols=['did', 'vid', 'click_time'], dtype={'did': 'category', 'vid': 'category'} ) if not click.empty and 'click_time' in click: click['date'] = pd.to_datetime(click['click_time']).dt.date click.drop(columns=['click_time'], inplace=True, errors='ignore') results['click'] = click # 播放数据 play_path = f'play_{day_str}.csv' if os.path.exists(play_path): results['play'] = load_data_safely( play_path, usecols=['did', 'vid', 'play_time'], dtype={'did': 'category', 'vid': 'category', 'play_time': 'float32'} ) return results with ThreadPoolExecutor(max_workers=8) as executor: futures = [executor.submit(load_day, day) for day in range(1, days+1)] results = [f.result() for f in tqdm(futures, desc="加载历史数据", total=days)] # 合并结果 see_list = [r['see'] for r in results if 'see' in r and not r['see'].empty] click_list = [r['click'] for r in results if 'click' in r and not r['click'].empty] play_list = [r['play'] for r in results if 'play' in r and not r['play'].empty] hist_exposure = pd.concat(see_list).drop_duplicates(['did', 'vid']) if see_list else pd.DataFrame() hist_click = pd.concat(click_list).drop_duplicates(['did', 'vid']) if click_list else pd.DataFrame() hist_play = pd.concat(play_list).drop_duplicates(['did', 'vid']) if play_list else pd.DataFrame() return hist_exposure, hist_click, hist_play # 3. 优化点击数据集构建(内存友好的负采样) def build_click_dataset(hist_exposure, hist_click, sample_ratio=0.1): """使用Bloom Filter进行高效负样本采样""" if hist_exposure.empty or hist_click.empty: print("⚠️ 历史曝光或点击数据为空,无法构建数据集") return pd.DataFrame() # 标记正样本 hist_click = hist_click[['did', 'vid']].copy() hist_click['label'] = 1 # 创建Bloom Filter存储正样本 from pybloom_live import ScalableBloomFilter bloom = ScalableBloomFilter(mode=ScalableBloomFilter.SMALL_SET_GROWTH) # 添加正样本 for _, row in tqdm(hist_click.iterrows(), total=len(hist_click), desc="构建布隆过滤器"): bloom.add((row['did'], row['vid'])) # 采样负样本 negative_samples = [] chunk_size = 500000 for i in range(0, len(hist_exposure), chunk_size): chunk = hist_exposure.iloc[i:i+chunk_size] for _, row in tqdm(chunk.iterrows(), total=len(chunk), desc="采样负样本"): if sample_ratio > np.random.random() and (row['did'], row['vid']) not in bloom: negative_samples.append([row['did'], row['vid'], 0]) # 构建负样本DataFrame negative_df = pd.DataFrame(negative_samples, columns=['did', 'vid', 'label']) click_data = pd.concat([hist_click, negative_df], ignore_index=True) return click_data # 4. 优化特征工程(延迟计算) def add_click_features(df, did_features, vid_info, hist_click, hist_play): """按需计算特征,避免中间大DataFrame""" if df.empty: return df # 基础特征 if not did_features.empty and 'did' in did_features.columns: # 优化内存合并 df = df.merge(did_features.add_suffix('_user'), left_on='did', right_index=True, how='left') if not vid_info.empty and 'vid' in vid_info.columns: vid_info_sub = vid_info[[c for c in vid_info.columns if c != 'item_duration']] df = df.merge(vid_info_sub.add_suffix('_item'), left_on='vid', right_index=True, how='left') # 按需计算统计特征 def calc_stat_feature(df, source, group_col, target_col, feature_name, agg_func='size'): if source.empty or group_col not in source or (agg_func != 'size' and target_col not in source): df[feature_name] = 0 return # 使用预聚合缓存 cache_file = f"{feature_name}_cache.pkl" if os.path.exists(cache_file): stats = joblib.load(cache_file) else: stats = source.groupby(group_col) if agg_func == 'size': stats = stats.size().rename(feature_name) else: stats = stats[target_col].agg(agg_func).rename(feature_name) joblib.dump(stats, cache_file) if group_col == 'did': df = df.merge(stats, left_on='did', right_index=True, how='left') else: df = df.merge(stats, left_on='vid', right_index=True, how='left') df[feature_name].fillna(0, inplace=True) return df # 计算用户特征 df = calc_stat_feature(df, hist_click, 'did', None, 'user_click_count') df = calc_stat_feature(df, hist_play, 'did', 'play_time', 'user_total_play', 'sum') # 计算物品特征 df = calc_stat_feature(df, hist_click, 'vid', None, 'video_click_count') df = calc_stat_feature(df, hist_play, 'vid', 'play_time', 'avg_play_time', 'mean') # 时间特征 if 'date' in df: df['day_of_week'] = pd.to_datetime(df['date']).dt.dayofweek.astype('int8') df['hour'] = pd.to_datetime(df['date']).dt.hour.astype('int8') # 释放内存 gc.collect() return df # 5. 模型训练函数封装 def train_lgb_model(X, y, categorical_features, params, model_name="模型"): if X.empty or y.empty: print(f"⚠️ {model_name}训练数据为空") return None X_train, X_val, y_train, y_val = train_test_split( X, y, test_size=0.2, random_state=42, stratify=y if 'binary' in params['objective'] else None ) train_data = lgb.Dataset(X_train, label=y_train, categorical_feature=categorical_features, free_raw_data=False) val_data = lgb.Dataset(X_val, label=y_val, categorical_feature=categorical_features, free_raw_data=False) print(f"开始训练{model_name}...") model = lgb.train( params, train_data, num_boost_round=10000, valid_sets=[train_data, val_data], valid_names=['train', 'valid'], callbacks=[ early_stopping(stopping_rounds=100, verbose=True), log_evaluation(period=50), lgb.reset_parameter(learning_rate=lambda iter: params['learning_rate'] * (0.99 ** iter)) ] ) return model # 主程序优化 def main(): # 配置优化 #pd.set_option('future.no_silent_downcasting', True) gc.enable() # 核心数据类型 dtypes = {'did': 'category', 'vid': 'category', 'play_time': 'float32'} for i in range(88): dtypes[f'f{i}'] = 'float32' # 核心数据加载 print("高效加载核心数据...") did_features = load_data_safely('did_features_table.csv', dtype=dtypes) vid_info = load_data_safely('vid_info_table.csv', dtype=dtypes) # 历史数据加载 print("并行加载历史数据...") hist_exposure, hist_click, hist_play = load_historical_data(days=15) # 减少天数提高速度 # 点击模型训练 if not hist_exposure.empty and not hist_click.empty: print("构建点击数据集...") click_train_data = build_click_dataset(hist_exposure, hist_click, sample_ratio=0.05) # 降低采样率 print("构建点击特征...") click_train_data = add_click_features( click_train_data, did_features, vid_info, hist_click, hist_play ) # 获取分类特征 base_categorical = ['item_cid', 'item_type', 'item_assetSource', 'item_classify', 'item_isIntact', 'sid', 'stype', 'day_of_week', 'hour'] categorical_features = [c for c in base_categorical if c in click_train_data] # 训练模型 click_params = { 'objective': 'binary', 'metric': 'binary_logloss', 'boosting_type': 'gbdt', 'num_leaves': 127, 'learning_rate': 0.1, 'feature_fraction': 0.7, 'bagging_freq': 5, 'min_data_in_leaf': 100, 'verbosity': -1 } model_click = train_lgb_model( click_train_data.drop(columns=['label', 'did', 'vid', 'date'], errors='ignore'), click_train_data['label'], categorical_features, click_params, "点击预测模型" ) else: model_click = None # 完播率模型训练 if not hist_play.empty and not vid_info.empty: print("构建完播率数据集...") play_data = hist_play[['did', 'vid', 'play_time']].copy() play_data = play_data.merge( vid_info[['vid', 'item_duration']], on='vid', how='left' ) play_data['completion_rate'] = play_data['play_time'] / play_data['item_duration'] # 添加特征 play_data = add_click_features( play_data, did_features, vid_info, hist_click, hist_play ) # 训练模型 play_params = { 'objective': 'regression', 'metric': 'mae', 'boosting_type': 'gbdt', 'num_leaves': 63, 'learning_rate': 0.05, 'feature_fraction': 0.7, 'bagging_freq': 5, 'min_data_in_leaf': 50, 'verbosity': -1 } model_play = train_lgb_model( play_data.drop(columns=['completion_rate', 'did', 'vid', 'play_time', 'item_duration'], errors='ignore'), play_data['completion_rate'], categorical_features, play_params, "完播率预测模型" ) else: model_play = None # 保存模型 for model, name in zip([model_click, model_play], ['click_model.txt', 'play_model.txt']): if model: model.save_model(name) joblib.dump(base_categorical, 'categorical_features.pkl') # 测试预测 print("加载预测数据...") test_users = load_data_safely('testA_pred_did.csv', dtype={'did': 'category'}) test_exposure = load_data_safely('testA_did_show.csv', dtype={'did': 'category', 'vid': 'category'}) if not test_users.empty and not test_exposure.empty: print("生成测试预测...") # 预测逻辑保持不变... else: print("⚠️ 预测数据加载失败") if __name__ == "__main__": main()

帮我检查代码,如有可以加速计算可以优化:import pandas as pd import numpy as np import lightgbm as lgb from lightgbm import early_stopping, log_evaluation import gc import os import chardet from sklearn.model_selection import train_test_split from tqdm import tqdm import joblib from datetime import datetime from sklearn.metrics import roc_auc_score, mean_absolute_error # 全局变量存储特征列表 click_features = [] play_features = [] base_categorical_features = [] # 优化内存使用:减小chunksize,添加更多内存清理 def load_data_safely(file_path, usecols=None, dtype=None, chunksize=50000): """安全加载大型CSV文件,优化内存使用""" try: if not os.path.exists(file_path): print(f"⚠️ 文件不存在: {file_path}") return pd.DataFrame() # 自动检测编码 with open(file_path, 'rb') as f: result = chardet.detect(f.read(100000)) encoding = result['encoding'] if result['confidence'] > 0.7 else 'latin1' # 分批读取并优化内存 - 减小chunksize以降低内存峰值 chunks = [] reader = pd.read_csv( file_path, encoding=encoding, usecols=usecols, dtype=dtype, chunksize=chunksize, low_memory=False ) for chunk in tqdm(reader, desc=f"加载 {os.path.basename(file_path)}"): # 优化分类列内存 if dtype: # 确保dtype不为空 for col in chunk.columns: if col in dtype and dtype[col] == 'category': chunk[col] = chunk[col].astype('category').cat.as_ordered() chunks.append(chunk) if chunks: result_df = pd.concat(chunks, ignore_index=True) del chunks gc.collect() return result_df return pd.DataFrame() except Exception as e: print(f"⚠️ 加载 {file_path} 失败: {str(e)}") return pd.DataFrame() # 修复播放数据加载问题 def load_historical_data(days=30): """高效加载历史数据,支持分批处理""" see_list, click_list, play_list = [], [], [] for day in tqdm(range(1, days + 1), desc="加载历史数据"): day_str = f"{day:02d}" # 加载曝光数据 see_path = f'see_{day_str}.csv' if os.path.exists(see_path): see = load_data_safely(see_path, usecols=['did', 'vid'], dtype={'did': 'category', 'vid': 'category'}) if not see.empty and 'did' in see.columns and 'vid' in see.columns: see_list.append(see) del see gc.collect() else: print(f"⚠️ 曝光数据文件不存在: {see_path}") # 加载点击数据 click_path = f'click_{day_str}.csv' if os.path.exists(click_path): click = load_data_safely(click_path, usecols=['did', 'vid'], dtype={'did': 'category', 'vid': 'category'}) if not click.empty and 'did' in click.columns and 'vid' in click.columns: click_list.append(click[['did', 'vid']]) del click gc.collect() else: print(f"⚠️ 点击数据文件不存在: {click_path}") # 修复播放数据加载问题 - 尝试多种可能的文件名格式 play_paths = [ f'playplus_{day_str}.csv', # 原始文件名 f'play_{day_str}.csv', # 可能的其他格式 f'playplus_{day}.csv', # 无前导零 f'play_{day}.csv' # 无前导零 ] play_loaded = False for play_path in play_paths: if os.path.exists(play_path): play = load_data_safely( play_path, usecols=['did', 'vid', 'play_time'], dtype={'did': 'category', 'vid': 'category'} ) if not play.empty and 'play_time' in play.columns and 'did' in play.columns and 'vid' in play.columns: play_list.append(play) del play play_loaded = True print(f"✅ 成功加载播放数据: {play_path}") break if not play_loaded: print(f"⚠️ 播放数据文件不存在: 尝试了 {play_paths}") # 每处理3天数据清理一次内存 if day % 3 == 0: gc.collect() # 确保返回三个DataFrame,即使某些为空 return ( pd.concat(see_list).drop_duplicates(['did', 'vid']) if see_list else pd.DataFrame(), pd.concat(click_list).drop_duplicates(['did', 'vid']) if click_list else pd.DataFrame(), pd.concat(play_list).drop_duplicates(['did', 'vid']) if play_list else pd.DataFrame() ) # 优化内存:使用更高效的方法处理负样本 def build_click_dataset(hist_exposure, hist_click, sample_ratio=0.1): """构建点击数据集,包含负样本采样""" if hist_exposure.empty or hist_click.empty: print("⚠️ 历史曝光或点击数据为空,无法构建数据集") return pd.DataFrame() # 标记正样本 hist_click = hist_click.copy() hist_click['label'] = 1 # 高效标记负样本(使用索引加速操作) exposure_index = hist_exposure.set_index(['did', 'vid']).index click_index = hist_click.set_index(['did', 'vid']).index # 找出未点击的曝光 negative_index = exposure_index.difference(click_index) # 创建负样本DataFrame if not negative_index.empty: negative_samples = pd.DataFrame( list(negative_index), columns=['did', 'vid'] ) negative_samples['label'] = 0 # 采样负样本 if sample_ratio < 1.0: negative_samples = negative_samples.sample(frac=sample_ratio, random_state=42) else: negative_samples = pd.DataFrame(columns=['did', 'vid', 'label']) # 合并数据集 click_data = pd.concat([ hist_click[['did', 'vid', 'label']], negative_samples ], ignore_index=True) # 释放内存 del exposure_index, click_index, negative_index, negative_samples gc.collect() return click_data # 优化内存:减少合并操作,使用更高效的特征添加方法 def add_click_features(df, did_features, vid_info, hist_click, hist_play): """添加关键特征,避免内存溢出""" if df.empty: return df # 基础特征(使用索引加速合并) if not did_features.empty and 'did' in did_features.columns: # 只选择需要的列 did_cols = ['did'] + [col for col in did_features.columns if col.startswith('f')] df = df.merge(did_features[did_cols], on='did', how='left') if not vid_info.empty and 'vid' in vid_info.columns: # 只选择分类特征 vid_cols = ['vid'] + [col for col in vid_info.columns if col in ['item_cid', 'item_type', 'item_assetSource', 'item_classify', 'item_isIntact']] df = df.merge(vid_info[vid_cols], on='vid', how='left') # 确保始终创建 'user_click_count' 列 df['user_click_count'] = 0 # 用户行为统计 - 使用预计算的统计量 if not hist_click.empty and 'did' in hist_click.columns: # 计算用户点击次数 user_click_count = hist_click.groupby('did').size().rename('user_click_count') # 直接添加到df,避免创建中间变量 df = df.join(user_click_count, on='did', how='left') # 填充缺失值 df['user_click_count'] = df['user_click_count'].fillna(0) # 确保始终创建 'user_total_play' 列 df['user_total_play'] = 0 if not hist_play.empty and 'did' in hist_play.columns and 'play_time' in hist_play.columns: # 计算用户总播放时间 user_total_play = hist_play.groupby('did')['play_time'].sum().rename('user_total_play') df = df.join(user_total_play, on='did', how='left') # 填充缺失值 df['user_total_play'] = df['user_total_play'].fillna(0) # 确保始终创建 'video_click_count' 列 df['video_click_count'] = 0 # 视频热度统计 if not hist_click.empty and 'vid' in hist_click.columns: # 计算视频点击次数 video_click_count = hist_click.groupby('vid').size().rename('video_click_count') df = df.join(video_click_count, on='vid', how='left') # 填充缺失值 df['video_click_count'] = df['video_click_count'].fillna(0) # 确保始终创建 'avg_play_time' 列 df['avg_play_time'] = 0 if not hist_play.empty and 'vid' in hist_play.columns and 'play_time' in hist_play.columns: # 计算平均播放时间 avg_play_time = hist_play.groupby('vid')['play_time'].mean().rename('avg_play_time') df = df.join(avg_play_time, on='vid', how='left') # 填充缺失值 df['avg_play_time'] = df['avg_play_time'].fillna(0) # 填充缺失值 fill_values = { 'user_click_count': 0, 'user_total_play': 0, 'video_click_count': df['video_click_count'].median() if 'video_click_count' in df else 0, 'avg_play_time': df['avg_play_time'].median() if 'avg_play_time' in df else 0 } for col, value in fill_values.items(): if col in df: df[col] = df[col].fillna(value) # 移除日期相关特征 if 'date' in df: df = df.drop(columns=['date'], errors='ignore') return df # 优化内存:使用更小的数据类型 def get_categorical_features(df, base_features): """动态获取存在的分类特征""" existing_features = [] for feature in base_features: if feature in df.columns: try: # 尝试转换为数值,如果是数值则跳过 pd.to_numeric(df[feature], errors='raise') except: existing_features.append(feature) # 确保转换为category类型 df[feature] = df[feature].astype('category').cat.as_ordered() return existing_features # 优化内存:减少中间变量,使用transform避免创建大型临时DataFrame def build_play_dataset(hist_play, vid_info, did_features, hist_click): """构建完播率数据集,优化内存使用""" if hist_play.empty: print("⚠️ 历史播放数据为空,无法构建完播率数据集") return pd.DataFrame() # 基础数据 - 只选择需要的列 play_data = hist_play[['did', 'vid', 'play_time']].copy() # 添加视频时长信息 if not vid_info.empty and 'vid' in vid_info.columns and 'item_duration' in vid_info.columns: play_data = play_data.merge( vid_info[['vid', 'item_duration']], on='vid', how='left' ) else: play_data['item_duration'] = 1.0 # 默认值 # 计算完播率 play_data['completion_rate'] = play_data['play_time'] / play_data['item_duration'] play_data['completion_rate'] = play_data['completion_rate'].clip(upper=1.0) # 添加用户特征 - 只选择数值特征 if not did_features.empty and 'did' in did_features.columns: did_cols = ['did'] + [col for col in did_features.columns if col.startswith('f')] play_data = play_data.merge( did_features[did_cols], on='did', how='left' ) # 添加视频特征 - 只选择分类特征 if not vid_info.empty and 'vid' in vid_info.columns: vid_cols = ['vid'] + [col for col in vid_info.columns if col in ['item_cid', 'item_type', 'item_assetSource', 'item_classify', 'item_isIntact']] play_data = play_data.merge( vid_info[vid_cols], on='vid', how='left' ) # 用户平均完播率 - 使用transform避免创建大型临时DataFrame play_data['user_avg_completion'] = play_data.groupby('did')['completion_rate'].transform('mean') play_data['user_play_count'] = play_data.groupby('did')['completion_rate'].transform('count') # 视频平均完播率 play_data['video_avg_completion'] = play_data.groupby('vid')['completion_rate'].transform('mean') play_data['video_completion_std'] = play_data.groupby('vid')['completion_rate'].transform('std') # 用户-视频互动特征 if not hist_click.empty and 'did' in hist_click.columns and 'vid' in hist_click.columns: # 使用transform避免创建大型临时DataFrame user_vid_clicks = hist_click.groupby(['did', 'vid']).size().reset_index(name='user_vid_clicks') play_data = play_data.merge(user_vid_clicks, on=['did', 'vid'], how='left') else: play_data['user_vid_clicks'] = 0 # 添加交互特征 - 确保训练和预测时特征一致 play_data['interaction_feature'] = (play_data['user_click_count'] * play_data['video_click_count']).astype('float32') # 填充缺失值 play_data['user_avg_completion'].fillna(play_data['completion_rate'].mean(), inplace=True) play_data['user_play_count'].fillna(1, inplace=True) play_data['video_avg_completion'].fillna(play_data['completion_rate'].median(), inplace=True) play_data['video_completion_std'].fillna(0, inplace=True) play_data['user_vid_clicks'].fillna(0, inplace=True) return play_data # 优化内存:分批预测,避免一次性加载所有测试数据 def predict_for_test_data(test_users, test_exposure, did_features, vid_info): """为测试数据生成预测结果 - 确保结果行数与test_users一致""" if test_users.empty: print("⚠️ 测试用户数据为空,无法进行预测") return pd.DataFrame() # 确保每个测试用户都有记录 if test_exposure.empty: # 如果没有曝光数据,使用默认视频 print("⚠️ 测试曝光数据为空,使用默认视频") test_data = test_users.copy() test_data['vid'] = vid_info['vid'].iloc[0] if not vid_info.empty else 'default_vid' else: # 合并测试数据,确保包含所有测试用户 test_data = test_users.merge(test_exposure, on='did', how='left') # 处理可能缺失的vid most_common_vid = test_exposure['vid'].mode()[0] if not test_exposure.empty else 'default_vid' test_data['vid'] = test_data['vid'].fillna(most_common_vid) # 分批处理测试数据以避免内存溢出 chunk_size = 50000 # 每批处理5万行 results = [] for i in tqdm(range(0, len(test_data), chunk_size), desc="分批预测"): chunk = test_data.iloc[i:i+chunk_size].copy() # 添加特征 chunk = add_click_features( chunk, did_features, vid_info, pd.DataFrame(), # 无历史点击 pd.DataFrame() # 无历史播放 ) # 动态获取分类特征 test_categorical_features = get_categorical_features(chunk, base_categorical_features) # 预测点击率 X_chunk = chunk.drop(columns=['did', 'vid'], errors='ignore') click_probs = [] if model_click and not X_chunk.empty: # 确保特征数量一致 if len(X_chunk.columns) != len(click_features): print(f"⚠️ 点击模型特征数量不一致: 训练时 {len(click_features)}, 预测时 {len(X_chunk.columns)}") # 对齐特征 missing_features = set(click_features) - set(X_chunk.columns) extra_features = set(X_chunk.columns) - set(click_features) # 添加缺失特征 for feature in missing_features: X_chunk[feature] = 0 # 移除多余特征 X_chunk = X_chunk[click_features] click_probs = model_click.predict(X_chunk) else: click_probs = [0.5] * len(chunk) # 默认值 # 预测完播率 completion_rates = [] if model_play and not X_chunk.empty: # 添加视频时长信息 if not vid_info.empty and 'vid' in vid_info.columns and 'item_duration' in vid_info.columns: chunk = chunk.merge(vid_info[['vid', 'item_duration']], on='vid', how='left') else: chunk['item_duration'] = 1.0 # 添加交互特征 - 确保与训练时一致 chunk['interaction_feature'] = (chunk['user_click_count'] * chunk['video_click_count']).astype('float32') # 准备预测数据 X_play_chunk = chunk.drop(columns=['did', 'vid'], errors='ignore') # 确保特征数量一致 if len(X_play_chunk.columns) != len(play_features): print(f"⚠️ 完播率模型特征数量不一致: 训练时 {len(play_features)}, 预测时 {len(X_play_chunk.columns)}") # 对齐特征 missing_features = set(play_features) - set(X_play_chunk.columns) extra_features = set(X_play_chunk.columns) - set(play_features) # 添加缺失特征 for feature in missing_features: X_play_chunk[feature] = 0 # 移除多余特征 X_play_chunk = X_play_chunk[play_features] completion_rates = model_play.predict(X_play_chunk) else: completion_rates = [0.7] * len(chunk) # 默认值 # 存储预测结果 chunk['click_prob'] = click_probs chunk['completion_rate'] = completion_rates # 修改:确保每个did只有一行结果,选取点击概率最高的vid chunk_result = chunk.sort_values('click_prob', ascending=False).groupby('did').head(1) # 选择需要的列 chunk_result = chunk_result[['did', 'vid', 'completion_rate']].copy() results.append(chunk_result) # 清理内存 del chunk, X_chunk, click_probs, completion_rates, chunk_result gc.collect() # 合并所有批次结果 if results: result = pd.concat(results, ignore_index=True) else: result = pd.DataFrame(columns=['did', 'vid', 'completion_rate']) # 重命名列 result.columns = ['did', 'vid', 'predicted_completion_rate'] # 确保结果行数与测试用户一致 if len(result) != len(test_users): missing_dids = set(test_users['did']) - set(result['did']) print(f"⚠️ 警告: {len(missing_dids)} 个用户缺失预测结果,使用默认值填充") default_df = pd.DataFrame({ 'did': list(missing_dids), 'vid': most_common_vid, 'predicted_completion_rate': np.mean(result['predicted_completion_rate']) if not result.empty else 0.7 }) result = pd.concat([result, default_df], ignore_index=True) return result # 主程序流程 if __name__ == "__main__": # 定义内存优化的数据类型 dtypes = { 'did': 'category', 'vid': 'category', 'play_time': 'float32' } # 可选特征 - 只有在数据中存在时才添加 optional_features = { 'item_cid': 'category', 'item_type': 'category', 'item_assetSource': 'category', 'item_classify': 'category', 'item_isIntact': 'category', 'sid': 'category', 'stype': 'category' } # 添加特征字段 for i in range(88): dtypes[f'f{i}'] = 'float32' # 加载核心数据 - 分批加载 print("开始加载核心数据...") did_features = load_data_safely('did_features_table.csv', dtype=dtypes) vid_info = load_data_safely('vid_info_table.csv', dtype=dtypes) # 添加可选特征到dtypes(仅当列存在时) for feature, dtype in optional_features.items(): if not vid_info.empty and feature in vid_info.columns: dtypes[feature] = dtype # 重新加载数据以确保所有列使用正确的数据类型 if os.path.exists('did_features_table.csv'): did_features = load_data_safely('did_features_table.csv', dtype=dtypes) else: print("⚠️ did_features_table.csv 不存在") did_features = pd.DataFrame() if os.path.exists('vid_info_table.csv'): vid_info = load_data_safely('vid_info_table.csv', dtype=dtypes) else: print("⚠️ vid_info_table.csv 不存在") vid_info = pd.DataFrame() # 加载历史数据 - 确保所有变量都被定义 print("开始加载历史数据...") hist_exposure, hist_click, hist_play = load_historical_data(days=30) # 打印历史数据状态 print(f"历史曝光数据形状: {hist_exposure.shape if not hist_exposure.empty else '空'}") print(f"历史点击数据形状: {hist_click.shape if not hist_click.empty else '空'}") print(f"历史播放数据形状: {hist_play.shape if not hist_play.empty else '空'}") # 如果播放数据为空,尝试替代方案 if hist_play.empty: print("⚠️ 警告: 历史播放数据为空,尝试使用点击数据作为替代") # 使用点击数据作为播放数据的替代 hist_play = hist_click.copy() hist_play['play_time'] = 1.0 # 添加默认播放时间 print(f"使用替代播放数据形状: {hist_play.shape}") # 构建点击数据集 if not hist_exposure.empty and not hist_click.empty: print("构建点击数据集...") click_train_data = build_click_dataset(hist_exposure, hist_click, sample_ratio=0.1) else: print("⚠️ 无法构建点击数据集,因为历史曝光或点击数据为空") click_train_data = pd.DataFrame() # 添加特征 - 确保所有参数都已定义 if not click_train_data.empty: print("开始构建点击特征...") click_train_data = add_click_features( click_train_data, did_features, vid_info, hist_click, # 确保hist_click已定义 hist_play # 确保hist_play已定义 ) else: print("⚠️ 点击数据集为空,跳过特征构建") # 基础分类特征列表 - 移除日期相关特征 base_categorical_features = [ 'item_cid', 'item_type', 'item_assetSource', 'item_classify', 'item_isIntact', 'sid', 'stype' ] # 动态获取存在的分类特征 categorical_features = [] if not click_train_data.empty: categorical_features = get_categorical_features(click_train_data, base_categorical_features) print(f"使用的分类特征: {categorical_features}") else: print("⚠️ 点击训练数据为空,无法获取分类特征") # 准备训练数据 if not click_train_data.empty: # 移除所有日期相关字段 X = click_train_data.drop(columns=['did', 'vid', 'label'], errors='ignore') y = click_train_data['label'] else: X, y = pd.DataFrame(), pd.Series() print("⚠️ 点击训练数据为空") # 划分数据集 if not X.empty and not y.empty: X_train, X_val, y_train, y_val = train_test_split( X, y, test_size=0.2, random_state=42, stratify=y ) else: print("⚠️ 训练数据为空,无法进行模型训练") X_train, X_val, y_train, y_val = pd.DataFrame(), pd.DataFrame(), pd.Series(), pd.Series() # 训练模型(优化参数) params = { 'objective': 'binary', 'metric': 'binary_logloss', 'boosting_type': 'gbdt', 'num_leaves': 63, 'learning_rate': 0.05, 'feature_fraction': 0.8, 'bagging_fraction': 0.8, 'bagging_freq': 5, 'min_child_samples': 100, 'verbosity': -1, 'max_bin': 255 # 减少bin数量以降低内存 } model_click = None if not X_train.empty: train_data = lgb.Dataset(X_train, label=y_train, categorical_feature=categorical_features) val_data = lgb.Dataset(X_val, label=y_val, categorical_feature=categorical_features) print("开始训练点击预测模型...") model_click = lgb.train( params, train_data, num_boost_round=1500, valid_sets=[val_data], callbacks=[ early_stopping(stopping_rounds=100, verbose=True), log_evaluation(period=50) ] ) # 保存点击模型使用的特征 global click_features click_features = list(X_train.columns) joblib.dump(click_features, 'click_features.pkl') # 计算并输出AUC if not X_val.empty and not y_val.empty and model_click: y_val_pred = model_click.predict(X_val) auc_score = roc_auc_score(y_val, y_val_pred) print(f"📊 点击率模型在验证集上的AUC: {auc_score:.6f}") with open('model_metrics.txt', 'w') as f: f.write(f"点击率模型AUC: {auc_score:.6f}\n") # 清理内存 del X_train, X_val, y_train, y_val, train_data, val_data gc.collect() else: print("⚠️ 训练数据为空,跳过点击预测模型训练") # 构建完播率数据集 print("开始构建完播率数据集...") if not hist_play.empty: play_train_data = build_play_dataset(hist_play, vid_info, did_features, hist_click) else: print("⚠️ 无法构建完播率数据集,因为播放数据为空") play_train_data = pd.DataFrame() # 训练完播率模型 model_play = None if not play_train_data.empty: X_play = play_train_data.drop(columns=['did', 'vid', 'play_time', 'item_duration', 'completion_rate'], errors='ignore') y_play = play_train_data['completion_rate'] else: X_play, y_play = pd.DataFrame(), pd.Series() print("⚠️ 完播率训练数据为空") if not X_play.empty and not y_play.empty: X_train_play, X_val_play, y_train_play, y_val_play = train_test_split( X_play, y_play, test_size=0.2, random_state=42 ) else: print("⚠️ 完播率训练数据为空,无法进行模型训练") X_train_play, X_val_play, y_train_play, y_val_play = pd.DataFrame(), pd.DataFrame(), pd.Series(), pd.Series() # 获取完播率模型的分类特征 play_categorical_features = [] if not play_train_data.empty: play_categorical_features = get_categorical_features(play_train_data, base_categorical_features) print(f"完播率模型使用的分类特征: {play_categorical_features}") else: print("⚠️ 完播率训练数据为空,无法获取分类特征") # 训练参数 - 优化内存使用 params_reg = { 'objective': 'regression', 'metric': 'mae', 'boosting_type': 'gbdt', 'num_leaves': 63, # 减少树复杂度 'learning_rate': 0.03, 'feature_fraction': 0.7, 'bagging_fraction': 0.7, 'bagging_freq': 5, 'lambda_l1': 0.1, 'lambda_l2': 0.1, 'min_data_in_leaf': 100, 'verbosity': -1, 'max_bin': 255 # 减少bin数量以降低内存 } if not X_train_play.empty: train_data_play = lgb.Dataset(X_train_play, label=y_train_play, categorical_feature=play_categorical_features) val_data_play = lgb.Dataset(X_val_play, label=y_val_play, categorical_feature=play_categorical_features) print("开始训练完播率模型...") model_play = lgb.train( params_reg, train_data_play, num_boost_round=1000, # 减少迭代次数 valid_sets=[val_data_play], callbacks=[ early_stopping(stopping_rounds=100, verbose=True), log_evaluation(period=50) ] ) # 保存完播率模型使用的特征 global play_features play_features = list(X_train_play.columns) joblib.dump(play_features, 'play_features.pkl') # 评估模型 y_pred_val = model_play.predict(X_val_play) mae = mean_absolute_error(y_val_play, y_pred_val) print(f"📊 完播率模型在验证集上的MAE: {mae:.6f}") with open('model_metrics.txt', 'a') as f: f.write(f"完播率模型MAE: {mae:.6f}\n") # 清理内存 del X_train_play, X_val_play, y_train_play, y_val_play, train_data_play, val_data_play gc.collect() else: print("⚠️ 训练数据为空,跳过完播率模型训练") # 保存模型 if model_click: model_click.save_model('click_model.txt') if model_play: model_play.save_model('play_model.txt') joblib.dump(base_categorical_features, 'categorical_features.pkl') # 如果是从文件加载模型,需要加载特征列表 if not model_click: try: model_click = lgb.Booster(model_file='click_model.txt') click_features = joblib.load('click_features.pkl') print("✅ 从文件加载点击模型和特征") except: print("⚠️ 无法加载点击模型") if not model_play: try: model_play = lgb.Booster(model_file='play_model.txt') play_features = joblib.load('play_features.pkl') print("✅ 从文件加载完播率模型和特征") except: print("⚠️ 无法加载完播率模型") # 加载预测数据 print("开始加载预测数据...") to_predict_users = load_data_safely('testA_pred_did.csv', dtype={'did': 'category'}) to_predict_exposure = load_data_safely('testA_did_show.csv', dtype={'did': 'category', 'vid': 'category'}) # 执行预测 if not to_predict_users.empty: print("开始生成预测结果...") submission = predict_for_test_data(to_predict_users, to_predict_exposure, did_features, vid_info) # 验证行数一致性 if len(submission) != len(to_predict_users): print(f"⚠️ 行数不一致: 预测结果 {len(submission)} 行, 测试用户 {len(to_predict_users)} 行") # 处理缺失的DID missing_dids = set(to_predict_users['did']) - set(submission['did']) if missing_dids: print(f"添加缺失的 {len(missing_dids)} 个用户") default_vid = vid_info['vid'].iloc[0] if not vid_info.empty else 'default_vid' missing_df = pd.DataFrame({ 'did': list(missing_dids), 'vid': default_vid, 'predicted_completion_rate': submission['predicted_completion_rate'].mean() }) submission = pd.concat([submission, missing_df], ignore_index=True) # 保存结果 if not submission.empty: print(f"预测结果行数: {len(submission)} (应与测试用户行数一致)") timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") output_file = f'submission_{timestamp}.csv' # 保存为无表头CSV submission.to_csv(output_file, index=False, header=False) print(f"预测结果已保存至: {output_file}") print(f"结果格式: 共 {len(submission)} 行") print(f"列信息: [did, vid, predicted_completion_rate]") # 验证结果分布 print(f"完播率预测值分布: min={submission['predicted_completion_rate'].min():.4f}, " f"max={submission['predicted_completion_rate'].max():.4f}, " f"mean={submission['predicted_completion_rate'].mean():.4f}") else: print("⚠️ 预测结果为空,未保存文件") else: print("⚠️ 预测数据加载失败,无法生成结果")

请按照上面的方法,帮我补充并优化代码:import pandas as pd import numpy as np import xgboost as xgb import lightgbm as lgb from sklearn.model_selection import train_test_split from sklearn.metrics import roc_auc_score from sklearn.cluster import MiniBatchKMeans from sklearn.preprocessing import StandardScaler from sklearn.impute import SimpleImputer from sklearn.pipeline import make_pipeline import chardet import gc import joblib import dask.dataframe as dd from dask.diagnostics import ProgressBar from datetime import datetime import warnings warnings.filterwarnings('ignore') # 配置Dask进度条显示 ProgressBar().register() def detect_encoding(file_path): """自动检测文件编码""" with open(file_path, 'rb') as f: result = chardet.detect(f.read(10000)) return result['encoding'], result['confidence'] def load_data_with_dask(days=32): """使用Dask加载和处理大规模数据""" see_dfs, click_dfs, play_dfs = [], [], [] # 并行加载32天数据 for i in range(1, days + 1): day = f"{i:02d}" # 加载曝光数据 see = dd.read_csv( f'see_{day}.csv', dtype={'did': 'str', 'vid': 'str'}, blocksize='128MB' ) see = see.assign(day=day) see_dfs.append(see) # 加载点击数据 click = dd.read_csv( f'click_{day}.csv', dtype={'did': 'str', 'vid': 'str', 'item_cid': 'str'}, blocksize='128MB' ) click = click[['did', 'vid', 'click_time']] click_dfs.append(click) # 加载播放数据 play = dd.read_csv( f'playplus_{day}.csv', dtype={'did': 'str', 'vid': 'str', 'item_cid': 'str'}, blocksize='128MB' ) play = play[['did', 'vid', 'play_time']] play_dfs.append(play) # 合并所有数据 all_see = dd.concat(see_dfs).drop_duplicates(['did', 'vid']) all_click = dd.concat(click_dfs).drop_duplicates(['did', 'vid']) all_play = dd.concat(play_dfs) # 计算基本统计数据 total_users = all_see['did'].nunique().compute() total_videos = all_see['vid'].nunique().compute() print(f"Total unique users: {total_users}, Total unique videos: {total_videos}") return all_see, all_click, all_play def prepare_user_features(all_see, all_click, all_play, video_info): """为有记录的用户准备特征""" print("Preparing user behavior features for users with history...") # 计算用户曝光统计 user_exposure = all_see.groupby('did').size().rename('user_exposure_count').compute().astype('int32') # 计算用户点击统计 user_click = all_click.groupby('did').size().rename('user_click_count').compute().astype('int32') # 计算用户播放时长 user_play = all_play.groupby('did')['play_time'].sum().rename('total_play_time').compute().astype('float32') # 合并用户行为特征 user_features = pd.concat([user_exposure, user_click, user_play], axis=1).fillna(0) user_features['user_ctr'] = user_features['user_click_count'] / (user_features['user_exposure_count'] + 1e-6) user_features['avg_play_time'] = user_features['total_play_time'] / (user_features['user_click_count'] + 1e-6) # 添加用户活跃天数 active_days = all_see.groupby('did')['day'].nunique().compute().rename('active_days').astype('int8') user_features = user_features.merge(active_days, left_index=True, right_index=True, how='left').fillna(0) return user_features.reset_index() def prepare_video_features(all_see, all_click, all_play, video_info): """准备视频特征""" print("Preparing video popularity features...") # 计算视频曝光 video_exposure = all_see.groupby('vid').size().rename('video_exposure_count').compute().astype('int32') # 计算视频点击 video_click = all_click.groupby('vid').size().rename('video_click_count').compute().astype('int32') # 计算视频播放时长 video_play = all_play.groupby('vid')['play_time'].sum().rename('total_play_time').compute().astype('float32') # 合并视频特征 video_features = pd.concat([video_exposure, video_click, video_play], axis=1).fillna(0) video_features['video_ctr'] = video_features['video_click_count'] / (video_features['video_exposure_count'] + 1e-6) video_features['avg_play_time'] = video_features['total_play_time'] / (video_features['video_click_count'] + 1e-6) # 合并视频元数据 video_features = video_features.merge(video_info, left_index=True, right_on='vid', how='left') # 类别特征编码 for cat_col in ['item_type', 'item_assetSource', 'item_classify']: video_features[cat_col] = video_features[cat_col].astype('category') return video_features def prepare_cold_start_cluster(user_features_table, history_users): """为冷启动用户准备聚类模型""" print("Preparing clustering model for cold-start users...") # 只使用有记录的用户进行聚类训练 trained_users = history_users['did'].tolist() user_features_table['has_history'] = user_features_table['did'].isin(trained_users) # 提取有历史记录用户的特征 trained_user_features = user_features_table[user_features_table['has_history']] feature_cols = [f'f{i}' for i in range(0, 87)] X = trained_user_features[feature_cols].values # 使用MiniBatchKMeans处理大数据 pipe = make_pipeline( SimpleImputer(strategy='mean'), StandardScaler(), MiniBatchKMeans(n_clusters=100, batch_size=5000, n_init=3) ) # 训练聚类模型 cluster_model = pipe.fit(X) trained_user_features['cluster'] = cluster_model.labels_ # 保存模型 joblib.dump(cluster_model, 'cold_start_cluster_model.pkl') return cluster_model def prepare_samples(all_see, all_click, all_play, user_features_table): """准备训练样本,区分有记录和无记录用户""" print("Preparing training samples...") # 加载视频元数据 video_info = pd.read_csv('vid_info_table.csv', encoding='gbk', dtype={'vid': 'str'}) # 准备用户和视频特征 user_behavior_features = prepare_user_features(all_see, all_click, all_play, video_info) video_features = prepare_video_features(all_see, all_click, all_play, video_info) # 标记有历史记录的用户 history_users = all_see['did'].unique().compute().to_frame(name='did') user_features_table['has_history'] = user_features_table['did'].isin(history_users['did']) # 准备冷启动聚类模型 cluster_model = prepare_cold_start_cluster(user_features_table, history_users) # 为有记录用户准备训练样本 train_samples = dd.merge(all_see, all_click, on=['did', 'vid'], how='left', suffixes=('', '_click')) train_samples = dd.merge(train_samples, all_play, on=['did', 'vid'], how='left') train_samples = dd.merge(train_samples, user_behavior_features, on='did', how='left') train_samples = dd.merge(train_samples, video_features, on='vid', how='left') # 创建标签(点击为1,否则为0) train_samples['label'] = (~train_samples['click_time'].isnull()).astype('int8') # 优化内存使用 train_samples = train_samples.compute() for col in train_samples.select_dtypes(include='float64').columns: train_samples[col] = train_samples[col].astype('float32') print(f"Training samples shape: {train_samples.shape}") return train_samples, cluster_model, video_features def train_behavior_model(samples, feature_columns): """训练有记录用户的行为预测模型""" print("Training behavior prediction model...") # 准备特征和标签 X = samples[feature_columns] y = samples['label'] # 划分训练验证集(时间序列分割) days = samples['day'].unique() train_days = days[:-3] # 前29天用于训练 test_days = days[-3:] # 最后3天用于验证 X_train = samples[samples['day'].isin(train_days)][feature_columns] y_train = samples[samples['day'].isin(train_days)]['label'] X_val = samples[samples['day'].isin(test_days)][feature_columns] y_val = samples[samples['day'].isin(test_days)]['label'] # LightGBM参数设置 params = { 'boosting_type': 'gbdt', 'objective': 'binary', 'metric': 'auc', 'learning_rate': 0.05, 'num_leaves': 63, 'feature_fraction': 0.8, 'bagging_fraction': 0.8, 'bagging_freq': 5, 'verbose': -1, 'seed': 42, 'max_depth': 7, 'min_child_samples': 500, 'n_jobs': 8 } # 训练模型 behavior_model = lgb.train( params, lgb.Dataset(X_train, label=y_train), num_boost_round=1000, valid_sets=[lgb.Dataset(X_val, label=y_val)], callbacks=[ lgb.early_stopping(stopping_rounds=30, verbose=False), lgb.log_evaluation(period=50) ] ) # 保存模型 behavior_model.save_model('behavior_model.txt') return behavior_model def predict_behavior(model, test_data, feature_columns): """预测有记录用户的行为""" print("Predicting behavior for users with history...") # 准备特征矩阵 X = test_data[feature_columns] # 预测点击概率 preds = model.predict(X) return preds def predict_cold_start(cluster_model, video_features, user_features_table): """预测冷启动用户的偏好""" print("Predicting preferences for cold-start users...") # 获取冷启动用户 cold_start_users = user_features_table[~user_features_table['has_history']] feature_cols = [f'f{i}' for i in range(0, 87)] # 预测用户所属聚类 X = cold_start_users[feature_cols].values cold_start_users['cluster'] = cluster_model.predict(X) # 加载热门视频(每个聚类Top 50视频) cluster_top_videos = joblib.load('cluster_top_videos.pkl') # 为每个用户生成推荐 cold_start_users['recommended_vid'] = cold_start_users['cluster'].map( lambda c: cluster_top_videos.get(c, []).copy() ) # 对推荐列表进行截断(每个用户最多100个推荐) cold_start_users['recommended_vid'] = cold_start_users['recommended_vid'].apply( lambda lst: lst[:min(100, len(lst))] ) return cold_start_users[['did', 'recommended_vid']] def save_cluster_top_videos(video_features, cluster_model, behavior_data): """保存每个聚类的热门视频""" print("Saving top videos for each cluster...") # 获取每个聚类的热门视频(基于播放时长和点击率) video_cluster_score = video_features[['vid', 'video_ctr', 'avg_play_time']].copy() video_cluster_score['popularity_score'] = (video_cluster_score['video_ctr'] * video_cluster_score['avg_play_time'] * 1000) # 获取训练数据中的聚类分配 cluster_model = joblib.load('cold_start_cluster_model.pkl') behavior_data['cluster'] = cluster_model.predict(behavior_data.iloc[:, 5:92]) # 统计每个聚类的视频偏好 cluster_video_pref = behavior_data.groupby(['cluster', 'vid'])['play_time'].sum().reset_index() cluster_video_pref = cluster_video_pref.merge(video_cluster_score, on='vid') # 为每个聚类计算Top视频 cluster_top_videos = {} for cluster_id in behavior_data['cluster'].unique(): cluster_vids = cluster_video_pref[cluster_video_pref['cluster'] == cluster_id] top_vids = cluster_vids.sort_values('popularity_score', ascending=False)['vid'].head(100).tolist() cluster_top_videos[cluster_id] = top_vids # 保存聚类视频偏好 joblib.dump(cluster_top_videos, 'cluster_top_videos.pkl') return cluster_top_videos def main(): """主执行流程""" # 1. 自动检测编码 encoding, confidence = detect_encoding('see_01.csv') print(f"Detected encoding: {encoding} (confidence: {confidence:.2f})") # 2. 加载基础数据 print("Loading base data...") all_see, all_click, all_play = load_data_with_dask(days=32) # 3. 加载用户特征表 user_features_table = pd.read_csv('did_features_table.csv', encoding='gbk', dtype={'did': 'str'}) # 4. 准备样本和聚类模型 train_samples, cluster_model, video_features = prepare_samples(all_see, all_click, all_play, user_features_table) # 5. 保存聚类热门视频 save_cluster_top_videos(video_features, cluster_model, train_samples) # 6. 定义模型特征列 feature_columns = [ 'user_exposure_count', 'user_click_count', 'user_ctr', 'video_exposure_count', 'video_click_count', 'video_ctr', 'item_duration', 'item_serialno', 'item_classify', 'item_type', 'item_assetSource' ] # 7. 训练行为预测模型 behavior_model = train_behavior_model(train_samples, feature_columns) # 8. 加载测试数据 print("Loading test data...") test_data = dd.read_csv('testA_did_show.csv', dtype={'did': 'str', 'vid': 'str'}) test_data = test_data.compute() # 9. 合并用户历史状态 history_users = train_samples['did'].unique() test_data['has_history'] = test_data['did'].isin(history_users) # 10. 对于有历史记录的用户 - 使用模型预测 if test_data['has_history'].any(): print("Processing users with history...") history_users_test = test_data[test_data['has_history']].copy() # 合并特征 history_users_test = history_users_test.merge( user_features_table, on='did', how='left', suffixes=('', '_feat') ) history_users_test = history_users_test.merge( video_features, on='vid', how='left' ) # 预测点击概率 history_preds = predict_behavior(behavior_model, history_users_test, feature_columns) history_users_test['click_prob'] = history_preds # 保存结果 history_users_test[['did', 'vid', 'click_prob']].to_csv('history_user_predictions.csv', index=False) # 11. 对于无历史记录的用户 - 使用聚类推荐 if not test_data['has_history'].all(): print("Processing cold-start users...") cold_start_preds = predict_cold_start(cluster_model, video_features, user_features_table) cold_start_preds.to_csv('cold_start_predictions.csv', index=False) print("Prediction completed!") if __name__ == '__main__': main()

train() got an unexpected keyword argument 'early_stopping_rounds'报错:请修复下列代码并完整输出:import pandas as pd import numpy as np import lightgbm as lgb from lightgbm import early_stopping, log_evaluation import gc import os import chardet from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score, mean_absolute_error from tqdm import tqdm # 添加进度条 # 步骤1:数据读取与预处理(优化内存管理) def load_data_safely(file_path, usecols=None, dtype=None): """安全加载大型CSV文件""" try: # 自动检测编码 with open(file_path, 'rb') as f: encoding = chardet.detect(f.read(10000))['encoding'] encoding = encoding if encoding else 'latin1' # 分批读取 chunks = [] for chunk in tqdm(pd.read_csv(file_path, encoding=encoding, usecols=usecols, dtype=dtype, chunksize=100000, low_memory=False), desc=f"Loading {os.path.basename(file_path)}"): chunks.append(chunk) return pd.concat(chunks, ignore_index=True) except Exception as e: print(f"⚠️ 加载 {file_path} 失败: {str(e)}") return pd.DataFrame() # 定义内存优化的数据类型 dtypes = { 'did': 'category', 'vid': 'category', 'item_cid': 'category', 'item_type': 'category', 'item_assetSource': 'category', 'item_classify': 'category', 'item_isIntact': 'category', 'sid': 'category', 'stype': 'category', 'play_time': 'float32' } # 加载核心数据 did_features = load_data_safely('did_features_table.csv', dtype={**dtypes, **{f'f{i}': 'float32' for i in range(88)}}) vid_info = load_data_safely('vid_info_table.csv', dtype=dtypes) # 加载历史数据(分批处理) def load_historical_data(days=32, sample_frac=0.3): """分批加载历史数据并采样""" see_list, click_list, play_list = [], [], [] for day in tqdm(range(1, days + 1), desc="加载历史数据"): day_str = f"{day:02d}" # 加载曝光数据(采样减少内存) see_path = f'see_{day_str}.csv' if os.path.exists(see_path): see = load_data_safely(see_path, usecols=['did', 'vid'], dtype=dtypes) if not see.empty: see = see.sample(frac=sample_frac) # 采样减少数据量 see['day'] = day_str see_list.append(see) del see # 加载点击数据 click_path = f'click_{day_str}.csv' if os.path.exists(click_path): click = load_data_safely(click_path, usecols=['did', 'vid', 'click_time'], dtype=dtypes) if not click.empty and 'click_time' in click.columns: click['date'] = pd.to_datetime(click['click_time'], errors='coerce').dt.date click_list.append(click[['did', 'vid', 'date']]) del click # 加载播放数据 play_path = f'playplus_{day_str}.csv' if os.path.exists(play_path): play = load_data_safely(play_path, usecols=['did', 'vid', 'play_time'], dtype=dtypes) if not play.empty and 'play_time' in play.columns: play_list.append(play) del play gc.collect() return ( pd.concat(see_list).drop_duplicates(['did', 'vid']) if see_list else pd.DataFrame(), pd.concat(click_list).drop_duplicates(['did', 'vid']) if click_list else pd.DataFrame(), pd.concat(play_list).drop_duplicates(['did', 'vid']) if play_list else pd.DataFrame() ) # 加载历史数据(采样30%减少内存) hist_exposure, hist_click, hist_play = load_historical_data(days=32, sample_frac=0.3) # 加载预测数据 to_predict_users = load_data_safely('testA_pred_did.csv', dtype=dtypes) to_predict_exposure = load_data_safely('testA_did_show.csv', dtype=dtypes) # 步骤2:构建点击预测训练集(优化内存使用) def build_click_dataset(hist_exposure, hist_click, sample_ratio=0.1): """构建点击数据集,优化内存使用""" # 标记正样本 hist_click['label'] = 1 # 高效标记负样本(避免创建大集合) merged = hist_exposure.merge( hist_click[['did', 'vid']].assign(is_clicked=True), on=['did', 'vid'], how='left' ) merged['is_clicked'] = merged['is_clicked'].fillna(False) # 负样本采样 negative_samples = merged[~merged['is_clicked']].sample(frac=sample_ratio) negative_samples['label'] = 0 # 合并数据集 click_data = pd.concat([ hist_click[['did', 'vid', 'label']], negative_samples[['did', 'vid', 'label']] ], ignore_index=True) # 释放内存 del merged, negative_samples gc.collect() return click_data click_train_data = build_click_dataset(hist_exposure, hist_click, sample_ratio=0.1) # 步骤3:特征工程(点击预测模型) def add_click_features(df, did_features, vid_info, hist_click, hist_play): """添加关键特征,避免内存溢出""" # 基础特征 df = df.merge(did_features, on='did', how='left') df = df.merge(vid_info, on='vid', how='left') # 用户行为统计(使用聚合避免大表连接) user_stats = pd.concat([ hist_click.groupby('did').size().rename('user_click_count'), hist_play.groupby('did')['play_time'].sum().rename('user_total_play') ], axis=1).reset_index() df = df.merge(user_stats, on='did', how='left') # 视频热度统计 video_stats = pd.concat([ hist_click.groupby('vid').size().rename('video_click_count'), hist_play.groupby('vid')['play_time'].mean().rename('avg_play_time') ], axis=1).reset_index() df = df.merge(video_stats, on='vid', how='left') # 填充缺失值(冷启动处理) fill_values = { 'user_click_count': 0, 'user_total_play': 0, 'video_click_count': df['video_click_count'].median(), 'avg_play_time': df['avg_play_time'].median() } for col, value in fill_values.items(): df[col] = df[col].fillna(value) # 添加时间相关特征 if 'date' in df: df['day_of_week'] = pd.to_datetime(df['date']).dt.dayofweek.astype('category') df['hour'] = pd.to_datetime(df['date']).dt.hour.astype('category') return df # 添加特征 click_train_data = add_click_features( click_train_data, did_features, vid_info, hist_click, hist_play ) # 步骤4:训练点击预测模型(优化类别特征处理) categorical_features = [ 'item_cid', 'item_type', 'item_assetSource', 'item_classify', 'item_isIntact', 'sid', 'stype', 'day_of_week', 'hour' ] # 明确指定分类特征 for col in categorical_features: if col in click_train_data.columns: click_train_data[col] = click_train_data[col].astype('category').cat.as_ordered() # 准备训练数据 X = click_train_data.drop(columns=['did', 'vid', 'label', 'date'], errors='ignore') y = click_train_data['label'] # 内存优化:删除不需要的列后立即释放 del click_train_data gc.collect() # 划分数据集 X_train, X_val, y_train, y_val = train_test_split( X, y, test_size=0.2, random_state=42, stratify=y ) # 训练模型(优化参数) params = { 'objective': 'binary', 'metric': 'binary_logloss', 'boosting_type': 'gbdt', 'num_leaves': 63, # 增加复杂度 'learning_rate': 0.05, 'feature_fraction': 0.8, 'bagging_fraction': 0.8, 'bagging_freq': 5, 'min_child_samples': 100, # 防止过拟合 'verbosity': -1 } train_data = lgb.Dataset(X_train, label=y_train, categorical_feature=categorical_features) val_data = lgb.Dataset(X_val, label=y_val, categorical_feature=categorical_features) model_click = lgb.train( params, train_data, num_boost_round=1500, # 增加轮次 valid_sets=[train_data, val_data], early_stopping_rounds=100, # 更宽松的早停 verbose_eval=50 ) # 步骤5:构建完播率训练集(优化特征工程) def build_play_dataset(hist_play, vid_info, did_features, hist_click): """构建完播率数据集""" # 基础数据 play_data = hist_play.merge( vid_info[['vid', 'item_duration']], on='vid', how='left' ) # 计算完播率 play_data['completion_rate'] = play_data['play_time'] / play_data['item_duration'] play_data['completion_rate'] = play_data['completion_rate'].clip(upper=1.0) # 添加用户特征 play_data = play_data.merge(did_features, on='did', how='left') # 添加视频特征 play_data = play_data.merge( vid_info.drop(columns=['item_duration']), on='vid', how='left' ) # 添加统计特征 # 用户平均完播率 user_stats = play_data.groupby('did')['completion_rate'].agg(['mean', 'count']).reset_index() user_stats.columns = ['did', 'user_avg_completion', 'user_play_count'] play_data = play_data.merge(user_stats, on='did', how='left') # 视频平均完播率 video_stats = play_data.groupby('vid')['completion_rate'].agg(['mean', 'std']).reset_index() video_stats.columns = ['vid', 'video_avg_completion', 'video_completion_std'] play_data = play_data.merge(video_stats, on='vid', how='left') # 用户-视频互动特征 user_video_stats = hist_click.groupby(['did', 'vid']).size().reset_index(name='user_vid_clicks') play_data = play_data.merge(user_video_stats, on=['did', 'vid'], how='left') # 填充缺失值 play_data['user_avg_completion'].fillna(play_data['completion_rate'].mean(), inplace=True) play_data['user_play_count'].fillna(1, inplace=True) play_data['video_avg_completion'].fillna(play_data['completion_rate'].median(), inplace=True) play_data['video_completion_std'].fillna(0, inplace=True) play_data['user_vid_clicks'].fillna(0, inplace=True) return play_data play_train_data = build_play_dataset(hist_play, vid_info, did_features, hist_click) # 步骤6:训练完播率模型(添加正则化) X_play = play_train_data.drop(columns=['did', 'vid', 'play_time', 'item_duration', 'completion_rate']) y_play = play_train_data['completion_rate'] # 划分数据集 X_train_play, X_val_play, y_train_play, y_val_play = train_test_split( X_play, y_play, test_size=0.2, random_state=42 ) # 训练参数 params_reg = { 'objective': 'regression', 'metric': 'mae', 'boosting_type': 'gbdt', 'num_leaves': 63, 'learning_rate': 0.03, # 降低学习率 'feature_fraction': 0.8, 'bagging_fraction': 0.8, 'bagging_freq': 5, 'lambda_l1': 0.1, # 添加L1正则化 'lambda_l2': 0.1, # 添加L2正则化 'min_data_in_leaf': 50, 'verbosity': -1 } train_data_play = lgb.Dataset(X_train_play, label=y_train_play, categorical_feature=categorical_features) val_data_play = lgb.Dataset(X_val_play, label=y_val_play, categorical_feature=categorical_features) model_play = lgb.train( params_reg, train_data_play, num_boost_round=2000, valid_sets=[train_data_play, val_data_play], early_stopping_rounds=100, verbose_eval=50 ) # 保存模型 model_click.save_model('click_model.txt') model_play.save_model('play_model.txt')

帮我修改下面代码,把训练模型的方法从抽样改成所有数据进行训练 # -*- coding: utf-8 -*- """ Created on Sat Aug 9 11:56:46 2025 @author: srx20 """ # -*- coding: utf-8 -*- """ Created on Sat Aug 9 10:33:06 2025 @author: srx20 """ import os import glob import pandas as pd import numpy as np import joblib import gc from datetime import datetime, timedelta from sklearn.preprocessing import StandardScaler from sklearn.cluster import MiniBatchKMeans from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import classification_report, confusion_matrix import talib as ta import warnings import chardet import psutil import sys import pyarrow as pa import pyarrow.parquet as pq import pyarrow.csv as pc from tqdm import tqdm from sklearn.model_selection import train_test_split # 修复警告处理 warnings.filterwarnings("ignore", category=np.VisibleDeprecationWarning) warnings.filterwarnings("ignore", category=RuntimeWarning) warnings.filterwarnings("ignore", category=pd.errors.ParserWarning) warnings.filterwarnings("ignore", category=UserWarning) warnings.filterwarnings("ignore", category=FutureWarning) # 忽略FutureWarning class StockPredictionSystem: def __init__(self, config): """ 初始化预测系统 - 针对超大内存优化 :param config: 配置字典 """ self.config = config self.five_min_paths = { 'sz': config['five_min_sz_path'], 'sh': config['five_min_sh_path'] } self.daily_paths = { 'sz': config['daily_sz_path'], 'sh': config['daily_sh_path'] } self.output_path = config['output_path'] self.start_date = datetime.strptime(config['start_date'], '%Y-%m-%d') self.end_date = datetime.strptime(config['end_date'], '%Y-%m-%d') self.data = None self.features = None self.labels = None self.scaler = StandardScaler() self.cluster_model = None self.prediction_model = None self.feature_cols = [] self.temp_dir = os.path.join(self.output_path, "temp") os.makedirs(self.temp_dir, exist_ok=True) self.parquet_files = [] def print_memory_usage(self, step_name): """打印当前内存使用情况""" process = psutil.Process(os.getpid()) mem = process.memory_info().rss / 1024 ** 2 print(f"[{step_name}] 当前内存使用: {mem:.2f} MB") def safe_read_csv(self, file_path, required_columns): """ 安全读取CSV文件 - 使用PyArrow进行高效读取 :param file_path: 文件路径 :param required_columns: 需要的列名列表 :return: 读取的DataFrame或None """ try: # 检查文件大小 if not os.path.exists(file_path): print(f"文件不存在: {file_path}") return None file_size = os.path.getsize(file_path) if file_size == 0: print(f"文件 {file_path} 大小为0,跳过") return None # 使用PyArrow读取CSV read_options = pc.ReadOptions( use_threads=True, block_size=4096 * 1024 # 4MB块大小 ) parse_options = pc.ParseOptions(delimiter=',') convert_options = pc.ConvertOptions( include_columns=required_columns, column_types={ 'date': pa.string(), 'time': pa.string(), 'open': pa.float32(), 'high': pa.float32(), 'low': pa.float32(), 'close': pa.float32(), 'volume': pa.float32(), 'amount': pa.float32() } ) table = pc.read_csv( file_path, read_options=read_options, parse_options=parse_options, convert_options=convert_options ) # 转换为Pandas DataFrame df = table.to_pandas() # 检查是否读取到数据 if df.empty: print(f"文件 {file_path} 读取后为空") return None return df except Exception as e: print(f"读取文件 {file_path} 时出错: {str(e)}") return None def process_and_save_chunk(self, df, market, stock_code, chunk_index): """ 处理单个股票的数据块并保存为Parquet文件 - 内存优化版本 """ if df is None or df.empty: return None try: # 添加市场前缀 df['stock_code'] = f"{market}_{stock_code}" # 修复日期时间转换问题 df['date'] = df['date'].astype(str).str.zfill(8) # 填充为8位字符串 df['time'] = df['time'].astype(str) # 处理时间格式 df['time'] = df['time'].apply( lambda x: f"{x[:2]}:{x[2:4]}" if len(x) == 4 else x ) # 合并日期和时间 df['datetime'] = pd.to_datetime( df['date'] + ' ' + df['time'], format='%Y%m%d %H:%M', errors='coerce' ) # 删除无效的日期时间 df = df.dropna(subset=['datetime']) # 筛选日期范围 df = df[(df['datetime'] >= self.start_date) & (df['datetime'] <= self.end_date)] if df.empty: return None # 优化内存使用 df = df[['stock_code', 'datetime', 'open', 'high', 'low', 'close', 'volume', 'amount']] # 保存为Parquet文件 output_file = os.path.join(self.temp_dir, f"{market}_{stock_code}_{chunk_index}.parquet") # 使用PyArrow直接写入Parquet,避免Pandas中间转换 table = pa.Table.from_pandas(df, preserve_index=False) pq.write_table(table, output_file, compression='SNAPPY') return output_file except Exception as e: print(f"处理股票 {stock_code} 时出错: {str(e)}") return None def incremental_merge_parquet_files(self, parquet_files, batch_size=100): """ 增量合并Parquet文件 - 避免一次性加载所有数据 :param parquet_files: Parquet文件列表 :param batch_size: 每次合并的文件数量 :return: 合并后的Parquet文件路径 """ merged_file = os.path.join(self.temp_dir, "merged_data.parquet") # 如果文件已存在,删除 if os.path.exists(merged_file): os.remove(merged_file) # 分批合并文件 for i in tqdm(range(0, len(parquet_files), batch_size), desc="合并Parquet文件"): batch_files = parquet_files[i:i+batch_size] # 读取当前批次文件 tables = [] for file in batch_files: try: table = pq.read_table(file) tables.append(table) except Exception as e: print(f"读取文件 {file} 出错: {str(e)}") if not tables: continue # 合并当前批次 merged_table = pa.concat_tables(tables) # 追加到输出文件 if os.path.exists(merged_file): # 追加模式 with pq.ParquetWriter(merged_file, merged_table.schema) as writer: writer.write_table(merged_table) else: # 首次写入 pq.write_table(merged_table, merged_file) # 释放内存 del tables del merged_table gc.collect() return merged_file def load_and_preprocess_data(self): """ 加载和预处理数据 - 使用增量合并避免内存溢出 """ print("开始加载和预处理数据...") self.print_memory_usage("开始加载数据") # 创建临时目录 os.makedirs(self.temp_dir, exist_ok=True) parquet_files = [] # 加载五分钟线数据 for market, path in self.five_min_paths.items(): print(f"开始处理市场: {market}, 路径: {path}") file_count = 0 processed_count = 0 # 获取文件列表 csv_files = list(glob.glob(os.path.join(path, '*.csv'))) print(f"找到 {len(csv_files)} 个文件") for file_path in tqdm(csv_files, desc=f"处理 {market} 市场文件"): file_count += 1 stock_code = os.path.basename(file_path).split('.')[0] try: # 安全读取CSV文件 df = self.safe_read_csv(file_path, ['date', 'time', 'open', 'high', 'low', 'close', 'volume', 'amount']) if df is None: continue # 处理并保存为Parquet output_file = self.process_and_save_chunk(df, market, stock_code, processed_count) if output_file: parquet_files.append(output_file) processed_count += 1 # 每处理100个文件释放内存 if processed_count % 100 == 0: self.print_memory_usage(f"已处理 {processed_count} 个文件") gc.collect() except Exception as e: print(f"处理文件 {file_path} 时出错: {str(e)}") continue print(f"市场 {market} 完成: 共 {file_count} 个文件, 成功处理 {processed_count} 个文件") # 如果没有找到有效文件 if not parquet_files: raise ValueError("没有找到有效的五分钟线数据") print(f"开始增量合并 {len(parquet_files)} 个Parquet文件...") self.print_memory_usage("合并前") # 增量合并Parquet文件 merged_file = self.incremental_merge_parquet_files(parquet_files, batch_size=50) # 加载合并后的数据 print(f"加载合并后的数据: {merged_file}") self.data = pq.read_table(merged_file).to_pandas() # 优化内存使用 self.data['stock_code'] = self.data['stock_code'].astype('category') print(f"数据合并完成,共 {len(self.data)} 条记录") self.print_memory_usage("合并后") # 清理临时文件 for file in parquet_files: try: os.remove(file) except: pass # 加载日线数据 daily_data = [] daily_required_columns = ['date', 'open', 'high', 'low', 'close', 'volume'] for market, path in self.daily_paths.items(): print(f"开始处理日线市场: {market}, 路径: {path}") file_count = 0 processed_count = 0 # 获取所有CSV文件 all_files = list(glob.glob(os.path.join(path, '*.csv'))) print(f"找到 {len(all_files)} 个日线文件") for file_path in tqdm(all_files, desc=f"处理 {market} 日线文件"): file_count += 1 stock_code = os.path.basename(file_path).split('.')[0] try: # 安全读取CSV文件 df = self.safe_read_csv(file_path, daily_required_columns) if df is None or df.empty: continue # 添加市场前缀 df['stock_code'] = f"{market}_{stock_code}" # 转换日期格式 df['date'] = pd.to_datetime(df['date'], errors='coerce') # 删除无效日期 df = df.dropna(subset=['date']) # 筛选日期范围 df = df[(df['date'] >= self.start_date) & (df['date'] <= self.end_date)] if df.empty: continue # 优化内存使用 df = df[['stock_code', 'date', 'open', 'high', 'low', 'close', 'volume']] # 优化数据类型 - 修复错误: 使用astype而不是ast df['open'] = df['open'].astype(np.float32) df['high'] = df['high'].astype(np.float32) df['low'] = df['low'].astype(np.float32) df['close'] = df['close'].astype(np.float32) df['volume'] = df['volume'].astype(np.float32) daily_data.append(df) processed_count += 1 if processed_count % 100 == 0: self.print_memory_usage(f"已处理 {processed_count} 个日线文件") gc.collect() except Exception as e: print(f"处理日线文件 {file_path} 时出错: {str(e)}") continue print(f"日线市场 {market} 完成: 共 {file_count} 个文件, 成功处理 {processed_count} 个文件") # 合并日线数据 if daily_data: daily_df = pd.concat(daily_data, ignore_index=True) daily_df['stock_code'] = daily_df['stock_code'].astype('category') # 添加日线特征 self._add_daily_features(daily_df) else: print("警告: 没有找到日线数据") print(f"数据加载完成,共 {len(self.data)} 条记录") self.print_memory_usage("数据加载完成") def _add_daily_features(self, daily_df): """ 添加日线特征到五分钟线数据 - 使用内存优化技术 """ print("添加日线特征...") # 预处理日线数据 daily_df = daily_df.sort_values(['stock_code', 'date']) # 计算日线技术指标 - 修复FutureWarning daily_df['daily_ma5'] = daily_df.groupby('stock_code', observed=True)['close'].transform( lambda x: x.rolling(5).mean()) daily_df['daily_ma10'] = daily_df.groupby('stock_code', observed=True)['close'].transform( lambda x: x.rolling(10).mean()) daily_df['daily_vol_ma5'] = daily_df.groupby('stock_code', observed=True)['volume'].transform( lambda x: x.rolling(5).mean()) # 计算MACD - 使用更高效的方法 def calculate_macd(group): group = group.sort_values('date') if len(group) < 26: return group.assign(daily_macd=np.nan, daily_signal=np.nan) close_vals = group['close'].values.astype(np.float64) macd, signal, _ = ta.MACD(close_vals, fastperiod=12, slowperiod=26, signalperiod=9) return group.assign(daily_macd=macd, daily_signal=signal) daily_df = daily_df.groupby('stock_code', group_keys=False, observed=True).apply(calculate_macd) # 提取日期部分用于合并 self.data['date'] = self.data['datetime'].dt.date.astype('datetime64[ns]') # 优化数据类型 daily_df = daily_df[['stock_code', 'date', 'daily_ma5', 'daily_ma10', 'daily_vol_ma5', 'daily_macd', 'daily_signal']] daily_df['daily_ma5'] = daily_df['daily_ma5'].astype(np.float32) daily_df['daily_ma10'] = daily_df['daily_ma10'].astype(np.float32) daily_df['daily_vol_ma5'] = daily_df['daily_vol_ma5'].astype(np.float32) daily_df['daily_macd'] = daily_df['daily_macd'].astype(np.float32) daily_df['daily_signal'] = daily_df['daily_signal'].astype(np.float32) # 合并日线特征 self.data = pd.merge( self.data, daily_df, on=['stock_code', 'date'], how='left' ) # 删除临时列 del self.data['date'] # 释放内存 del daily_df gc.collect() def create_features(self): """ 创建特征工程 - 使用内存优化技术 """ print("开始创建特征...") self.print_memory_usage("创建特征前") if self.data is None: raise ValueError("请先加载数据") # 按股票和时间排序 self.data = self.data.sort_values(['stock_code', 'datetime']) # 特征列表 features = [] # 1. 基础特征 features.append('open') features.append('high') features.append('low') features.append('close') features.append('volume') features.append('amount') # 2. 技术指标 - 使用分组计算避免内存溢出 # 计算移动平均线 self.data['ma5'] = self.data.groupby('stock_code', observed=True)['close'].transform( lambda x: x.rolling(5, min_periods=1).mean()) self.data['ma10'] = self.data.groupby('stock_code', observed=True)['close'].transform( lambda x: x.rolling(10, min_periods=1).mean()) features.extend(['ma5', 'ma10']) # 计算RSI - 使用更高效的方法 print("计算RSI指标...") def calculate_rsi(group): group = group.sort_values('datetime') close = group['close'].values.astype(np.float64) rsi = ta.RSI(close, timeperiod=14) return group.assign(rsi=rsi) self.data = self.data.groupby('stock_code', group_keys=False, observed=True).apply(calculate_rsi) features.append('rsi') # 3. 波动率特征 print("计算波动率特征...") self.data['price_change'] = self.data.groupby('stock_code', observed=True)['close'].pct_change() self.data['volatility'] = self.data.groupby('stock_code', observed=True)['price_change'].transform( lambda x: x.rolling(10, min_periods=1).std()) features.append('volatility') # 4. 成交量特征 self.data['vol_change'] = self.data.groupby('stock_code', observed=True)['volume'].pct_change() self.data['vol_ma5'] = self.data.groupby('stock_code', observed=True)['volume'].transform( lambda x: x.rolling(5, min_periods=1).mean()) features.extend(['vol_change', 'vol_ma5']) # 5. 日线特征 features.extend(['daily_ma5', 'daily_ma10', 'daily_vol_ma5', 'daily_macd', 'daily_signal']) # 保存特征列 self.feature_cols = features # 处理缺失值 - 只删除特征列中的缺失值 self.data = self.data.dropna(subset=features) # 优化数据类型 - 使用astype而不是ast for col in features: if self.data[col].dtype == np.float64: self.data[col] = self.data[col].astype(np.float32) print(f"特征创建完成,共 {len(features)} 个特征") self.print_memory_usage("创建特征后") def clean_data(self): """ 清洗数据 - 处理无穷大和超出范围的值(修复索引问题) """ print("开始数据清洗...") self.print_memory_usage("清洗前") # 1. 检查无穷大值 inf_mask = np.isinf(self.data[self.feature_cols].values) inf_rows = np.any(inf_mask, axis=1) inf_count = np.sum(inf_rows) if inf_count > 0: print(f"发现 {inf_count} 行包含无穷大值,正在清理...") # 将无穷大替换为NaN self.data[self.feature_cols] = self.data[self.feature_cols].replace([np.inf, -np.inf], np.nan) # 2. 检查超出float32范围的值 float32_max = np.finfo(np.float32).max float32_min = np.finfo(np.float32).min # 统计超出范围的值 overflow_count = 0 for col in self.feature_cols: col_max = self.data[col].max() col_min = self.data[col].min() if col_max > float32_max or col_min < float32_min: overflow_count += 1 print(f"列 {col} 包含超出float32范围的值: min={col_min}, max={col_max}") if overflow_count > 0: print(f"共发现 {overflow_count} 列包含超出float32范围的值,正在处理...") # 缩放到安全范围 for col in self.feature_cols: col_min = self.data[col].min() col_max = self.data[col].max() # 如果范围过大,进行缩放 if col_max - col_min > 1e6: print(f"列 {col} 范围过大 ({col_min} 到 {col_max}),进行缩放...") self.data[col] = (self.data[col] - col_min) / (col_max - col_min) # 3. 处理NaN值 - 修复索引问题 nan_count = self.data[self.feature_cols].isna().sum().sum() if nan_count > 0: print(f"发现 {nan_count} 个NaN值,使用前向填充处理...") # 方法1: 使用transform保持索引一致 for col in self.feature_cols: self.data[col] = self.data.groupby('stock_code', observed=True)[col].transform( lambda x: x.fillna(method='ffill').fillna(method='bfill').fillna(0) ) # 方法2: 使用循环逐组处理(备用方法) # for stock in self.data['stock_code'].unique(): # stock_mask = self.data['stock_code'] == stock # self.data.loc[stock_mask, self.feature_cols] = self.data.loc[stock_mask, self.feature_cols].fillna(method='ffill').fillna(method='bfill').fillna(0) # 4. 最终检查 cleaned = True for col in self.feature_cols: if np.isinf(self.data[col]).any() or self.data[col].isna().any(): print(f"警告: 列 {col} 仍包含无效值") cleaned = False if cleaned: print("数据清洗完成") else: print("数据清洗完成,但仍存在部分问题") self.print_memory_usage("清洗后") def create_labels(self): """ 创建标签 - 添加新条件: 1. 次日(T+1)收盘价(15:00)比次日(T+1)9:35收盘价大5% 2. 后日(T+2)9:35收盘价比次日(T+1)收盘价(15:00)大1% """ print("开始创建标签...") self.print_memory_usage("创建标签前") if self.data is None: raise ValueError("请先加载数据") # 按股票和时间排序 self.data = self.data.sort_values(['stock_code', 'datetime']) # 添加日期列用于合并 self.data['date'] = self.data['datetime'].dt.date # 创建每日关键时间点价格数据 daily_key_points = self.data.groupby(['stock_code', 'date']).apply( lambda x: pd.Series({ 'time9_35_close': x[x['datetime'].dt.time == pd.to_datetime('09:35:00').time()]['close'].iloc[0] if not x[x['datetime'].dt.time == pd.to_datetime('09:35:00').time()].empty else np.nan, 'time15_00_close': x[x['datetime'].dt.time == pd.to_datetime('15:00:00').time()]['close'].iloc[0] if not x[x['datetime'].dt.time == pd.to_datetime('15:00:00').time()].empty else np.nan }) ).reset_index() # 为每日关键点添加次日(T+1)和后日(T+2)数据 daily_key_points = daily_key_points.sort_values(['stock_code', 'date']) daily_key_points['next_date'] = daily_key_points.groupby('stock_code')['date'].shift(-1) daily_key_points['next_next_date'] = daily_key_points.groupby('stock_code')['date'].shift(-2) # 合并次日(T+1)数据 daily_key_points = pd.merge( daily_key_points, daily_key_points[['stock_code', 'date', 'time9_35_close', 'time15_00_close']].rename( columns={ 'date': 'next_date', 'time9_35_close': 'next_time9_35_close', 'time15_00_close': 'next_time15_00_close' } ), on=['stock_code', 'next_date'], how='left' ) # 合并后日(T+2)数据 daily_key_points = pd.merge( daily_key_points, daily_key_points[['stock_code', 'date', 'time9_35_close']].rename( columns={ 'date': 'next_next_date', 'time9_35_close': 'next_next_time9_35_close' } ), on=['stock_code', 'next_next_date'], how='left' ) # 将关键点数据合并回原始数据 self.data = pd.merge( self.data, daily_key_points[['stock_code', 'date', 'next_time9_35_close', 'next_time15_00_close', 'next_next_time9_35_close']], on=['stock_code', 'date'], how='left' ) # 计算新条件 cond1 = (self.data['next_time15_00_close'] > self.data['next_time9_35_close'] * 1.05) cond2 = (self.data['next_next_time9_35_close'] > self.data['next_time15_00_close'] * 1.01) # 创建标签(满足两个条件则为1) self.data['label'] = np.where(cond1 & cond2, 1, 0).astype(np.int8) # 删除中间列 self.data.drop([ 'date', 'next_time9_35_close', 'next_time15_00_close', 'next_next_time9_35_close' ], axis=1, inplace=True, errors='ignore') # 保存标签 self.labels = self.data['label'] # 分析标签分布 label_counts = self.data['label'].value_counts(normalize=True) print(f"标签分布:\n{label_counts}") print("标签创建完成") self.print_memory_usage("创建标签后") def perform_clustering(self, n_clusters=5, batch_size=100000): """ 执行聚类分析 - 使用MiniBatchKMeans处理大数据 :param n_clusters: 聚类数量 :param batch_size: 每次处理的样本数量 """ print(f"开始聚类分析,聚类数: {n_clusters}...") self.print_memory_usage("聚类前") if self.feature_cols is None: raise ValueError("请先创建特征") # 添加数据清洗步骤 self.clean_data() # 标准化特征 print("标准化特征...") self.scaler.fit(self.data[self.feature_cols]) # 使用MiniBatchKMeans进行聚类 self.cluster_model = MiniBatchKMeans( n_clusters=n_clusters, batch_size=batch_size, random_state=42, n_init=3 ) # 分批处理数据 print("分批聚类...") n_samples = len(self.data) for i in tqdm(range(0, n_samples, batch_size), desc="聚类进度"): batch_data = self.data.iloc[i:i+batch_size] scaled_batch = self.scaler.transform(batch_data[self.feature_cols]) self.cluster_model.partial_fit(scaled_batch) # 获取最终聚类结果 print("获取聚类结果...") clusters = [] for i in tqdm(range(0, n_samples, batch_size), desc="分配聚类"): batch_data = self.data.iloc[i:i+batch_size] scaled_batch = self.scaler.transform(batch_data[self.feature_cols]) batch_clusters = self.cluster_model.predict(scaled_batch) clusters.append(batch_clusters) # 添加聚类结果到数据 self.data['cluster'] = np.concatenate(clusters) self.feature_cols.append('cluster') # 分析聚类结果 cluster_summary = self.data.groupby('cluster')['label'].agg(['mean', 'count']) print("聚类结果分析:") print(cluster_summary) # 保存聚类模型 cluster_model_path = os.path.join( self.output_path, "分钟线预测训练聚类模型.pkl" ) joblib.dump(self.cluster_model, cluster_model_path) print(f"聚类模型已保存至: {cluster_model_path}") self.print_memory_usage("聚类后") def train_prediction_model(self, sample_fraction=0.1): """ 训练预测模型 - 使用数据抽样减少内存使用 :param sample_fraction: 抽样比例 """ print("开始训练预测模型...") self.print_memory_usage("训练模型前") if self.feature_cols is None or self.labels is None: raise ValueError("请先创建特征和标签") # 抽样数据 if sample_fraction < 1.0: print(f"抽样 {sample_fraction*100:.1f}% 数据用于训练") sample_data = self.data.sample(frac=sample_fraction, random_state=42) X = sample_data[self.feature_cols] y = sample_data['label'] else: X = self.data[self.feature_cols] y = self.labels # 检查类别分布 if y.nunique() < 2: print("警告: 只有一个类别的数据,无法训练模型") return # 划分训练集和测试集 X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=42, stratify=y ) # 训练随机森林分类器 self.prediction_model = RandomForestClassifier( n_estimators=100, # 减少树的数量 max_depth=8, # 减小最大深度 min_samples_split=10, class_weight='balanced', random_state=42, n_jobs=-1 ) self.prediction_model.fit(X_train, y_train) # 评估模型 y_pred = self.prediction_model.predict(X_test) print("模型评估报告:") print(classification_report(y_test, y_pred)) # 打印混淆矩阵 cm = confusion_matrix(y_test, y_pred) print("混淆矩阵:") print(cm) # 保存预测模型 model_path = os.path.join( self.output_path, "分钟线预测训练模型.pkl" ) joblib.dump(self.prediction_model, model_path) print(f"预测模型已保存至: {model_path}") self.print_memory_usage("训练模型后") def predict_and_save(self, output_results=True): """ 使用模型进行预测并保存结果 :param output_results: 是否输出预测结果 """ print("开始预测...") self.print_memory_usage("预测前") if self.prediction_model is None: raise ValueError("请先训练预测模型") # 准备预测数据 X = self.data[self.feature_cols] # 分批预测 predictions = [] batch_size = 10000 n_samples = len(X) for i in tqdm(range(0, n_samples, batch_size), desc="预测进度"): batch_data = X.iloc[i:i+batch_size] batch_pred = self.prediction_model.predict(batch_data) predictions.append(batch_pred) # 合并预测结果 self.data['prediction'] = np.concatenate(predictions) # 保存预测结果 if output_results: output_file = os.path.join(self.output_path, "预测结果.csv") self.data[['stock_code', 'datetime', 'close', 'label', 'prediction']].to_csv(output_file, index=False) print(f"预测结果已保存至: {output_file}") # 分析预测效果 accuracy = (self.data['label'] == self.data['prediction']).mean() print(f"整体预测准确率: {accuracy:.4f}") # 按股票分析预测效果 stock_accuracy = self.data.groupby('stock_code').apply( lambda x: (x['label'] == x['prediction']).mean() ) print("\n股票预测准确率统计:") print(stock_accuracy.describe()) self.print_memory_usage("预测后") def run(self, output_results=True, sample_fraction=0.1): """ 运行整个流程 - 使用内存优化技术 """ try: # 分步执行,每步完成后释放内存 self.load_and_preprocess_data() gc.collect() self.print_memory_usage("数据加载后") self.create_features() gc.collect() self.print_memory_usage("特征创建后") self.create_labels() # 使用新的标签创建方法 gc.collect() self.print_memory_usage("标签创建后") self.perform_clustering(n_clusters=self.config.get('n_clusters', 5)) gc.collect() self.print_memory_usage("聚类后") self.train_prediction_model(sample_fraction=sample_fraction) gc.collect() self.print_memory_usage("模型训练后") self.predict_and_save(output_results) gc.collect() self.print_memory_usage("预测后") print("训练和预测流程完成!") except KeyboardInterrupt: print("用户中断执行") except Exception as e: print(f"运行过程中出错: {str(e)}") import traceback traceback.print_exc() # 配置参数 config = { # 数据路径配置 'five_min_sz_path': r"D:\股票量化数据库\股票五分钟线csv数据\深证", 'five_min_sh_path': r"D:\股票量化数据库\股票五分钟线csv数据\上证", 'daily_sz_path': r"D:\股票量化数据库\股票csv数据\深证", 'daily_sh_path': r"D:\股票量化数据库\股票csv数据\上证", # 输出路径 'output_path': r"D:\股票量化数据库\预测结果", # 时间范围配置 'start_date': '2023-09-08', 'end_date': '2025-08-07', # 聚类配置 'n_clusters': 5 } # 创建并运行系统 if __name__ == "__main__": # 打印环境信息 print(f"Python版本: {sys.version}") print(f"Pandas版本: {pd.__version__}") # 是否输出预测结果 output_results = True # 抽样比例 (0.1 = 10%) sample_fraction = 0.1 # 设置Pandas内存选项 pd.set_option('mode.chained_assignment', None) pd.set_option('display.max_columns', None) # 设置内存优化选项 pd.set_option('compute.use_numexpr', True) pd.set_option('compute.use_bottleneck', True) # 创建并运行系统 system = StockPredictionSystem(config) system.run(output_results=output_results, sample_fraction=sample_fraction)

请帮我检查并完善代码:#步骤1:数据读取与预处理,如果内存不足,可以考虑分批处理或使用Dask等工具。 import pandas as pd import numpy as np import lightgbm as lgb from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score, mean_absolute_error # 读取数据 did_features = pd.read_csv('did_features_table.csv') # 用户特征 vid_info = pd.read_csv('vid_info_table.csv') # 视频信息 # 历史32天曝光数据see_list(用于构建负样本) # 历史32天点击数据click_list(正样本) # 历史32天播放数据play_list(用于训练完播率模型) def detect_encoding(file_path): with open(file_path, 'rb') as f: result = chardet.detect(f.read(10000)) return result['encoding'], result['confidence'] def load_all_data(days=32): see_list, click_list, play_list = [], [], [] dtypes = {'did': 'category', 'vid': 'category'} for i in range(1, days + 1): day = f"{i:02d}" # 检查文件是否存在 for file_type in [f'see_{day}.csv', f'click_{day}.csv', f'playplus_{day}.csv']: if not os.path.exists(file_type): print(f"⚠️ 警告: 文件 {file_type} 不存在,跳过该天数据") continue try: # 加载 see 数据 see = pd.read_csv(f'see_{day}.csv', encoding='latin1', dtype=dtypes) if 'did' not in see.columns or 'vid' not in see.columns: print(f"⚠️ 警告: see_{day}.csv 缺少必要字段,跳过该天数据") continue see['day'] = day see_list.append(see) del see gc.collect() # 加载 click 数据 click = pd.read_csv( f'click_{day}.csv', encoding='ISO-8859-1', on_bad_lines='skip', dtype=dtypes ) if 'click_time' not in click.columns: print(f"⚠️ 警告: click_{day}.csv 缺少 click_time 字段,跳过该天数据") continue click['date'] = pd.to_datetime(click['click_time'], errors='coerce').dt.date click_list.append(click[['did', 'vid', 'date']]) del click gc.collect() # 加载 play 数据 play = pd.read_csv( f'playplus_{day}.csv', engine='python', encoding_errors='ignore', dtype=dtypes ) if 'play_time' not in play.columns: print(f"⚠️ 警告: playplus_{day}.csv 缺少 play_time 字段,跳过该天数据") continue play_list.append(play[['did', 'vid', 'play_time']]) del play gc.collect() except Exception as e: print(f"⚠️ 加载第 {day} 天数据时出错: {str(e)}") continue # 处理所有天都没有数据的情况 if not see_list: raise ValueError("错误: 未找到任何有效数据,请检查输入文件") all_see = pd.concat(see_list).drop_duplicates(['did', 'vid']) all_click = pd.concat(click_list).drop_duplicates(['did', 'vid']) to_predict_users = pd.read_csv('testA_pred_did.csv') # 待预测用户 to_predict_exposure = pd.read_csv('testA_did_show.csv') # 待预测用户的曝光视频 # 注意:弹幕数据暂不使用 #步骤2:构建点击预测模型的训练集,如果内存不足,可以考虑分批处理或使用Dask等工具。 # 正样本:历史点击数据,标记为1 positive_samples = hist_click[['did', 'vid']].copy() positive_samples['label'] = 1 # 负样本:从历史曝光数据中,排除出现在历史点击数据中的(did, vid) # 注意:同一个用户可能曝光了多个视频,但只点击了部分,所以未点击的就是负样本 # 合并同一个用户的所有曝光和点击,然后取差集 # 方法:对每个用户,曝光视频中不在点击视频列表中的作为负样本 # 注意:数据量可能很大,需要分组操作 # 先获取每个用户点击了哪些视频(did, vid集合) user_clicked_vids = hist_click.groupby('did')['vid'].apply(set).reset_index(name='clicked_set') # 将历史曝光数据与user_clicked_vids合并 hist_exposure_with_click = hist_exposure.merge(user_clicked_vids, on='did', how='left') # 对于每个曝光记录,如果vid不在clicked_set中,则作为负样本 # 注意:如果用户没有点击记录,则clicked_set为NaN,使用空集 hist_exposure_with_click['clicked_set'] = hist_exposure_with_click['clicked_set'].apply(lambda x: x if isinstance(x, set) else set()) hist_exposure_with_click['is_clicked'] = hist_exposure_with_click.apply(lambda row: row['vid'] in row['clicked_set'], axis=1) # 负样本:未点击的曝光记录 negative_samples = hist_exposure_with_click[~hist_exposure_with_click['is_clicked']][['did', 'vid']] negative_samples['label'] = 0 # 合并正负样本 click_train_data = pd.concat([positive_samples, negative_samples], axis=0, ignore_index=True) # 合并用户特征和视频特征 click_train_data = click_train_data.merge(did_features, on='did', how='left') click_train_data = click_train_data.merge(vid_info, on='vid', how='left') # 注意:这里可能会有缺失值(比如视频信息表中没有某个视频的信息),需要填充 # 填充策略:对于数值特征,用中位数或均值;对于类别特征,用众数或特殊值(如-1) #步骤3:特征工程(点击预测模型) video_click_count = hist_click.groupby('vid').size().reset_index(name='video_click_count') click_train_data = click_train_data.merge(video_click_count, on='vid', how='left') click_train_data['video_click_count'].fillna(0, inplace=True) # 对于新视频,用0填充 #步骤4:训练点击预测模型 # 划分训练集和验证集 X = click_train_data.drop(columns=['did', 'vid', 'label']) y = click_train_data['label'] # 将类别特征转换为类别类型(LightGBM可以处理类别特征) categorical_features = ['item_cid', 'item_type', 'item_assetSource', 'item_classify', 'item_isIntact', 'sid', 'stype'] for col in categorical_features: if col in X.columns: X[col] = X[col].astype('category') X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42) # 训练LightGBM分类器 params = { 'objective': 'binary', 'metric': 'binary_logloss', 'boosting_type': 'gbdt', 'num_leaves': 31, 'learning_rate': 0.05, 'feature_fraction': 0.9, 'bagging_fraction': 0.8, 'bagging_freq': 5, 'verbose': 0 } train_data = lgb.Dataset(X_train, label=y_train, categorical_feature=categorical_features, free_raw_data=False) val_data = lgb.Dataset(X_val, label=y_val, categorical_feature=categorical_features, free_raw_data=False) model_click = lgb.train(params, train_data, valid_sets=[train_data, val_data], num_boost_round=1000, early_stopping_rounds=50, verbose_eval=10) # 保存模型 model_click.save_model('click_model.txt') #步骤5:构建完播率预测模型的训练集 # 使用历史播放数据(有播放时长),需要合并视频信息表获取视频时长,然后计算完播率 # 注意:播放时长可能大于视频时长,所以完播率最大为1 play_data = hist_play.merge(vid_info[['vid', 'item_duration']], on='vid', how='left') play_data['completion_rate'] = play_data['play_time'] / play_data['item_duration'] play_data['completion_rate'] = play_data['completion_rate'].clip(upper=1.0) # 超过1的设为1 # 合并用户特征和视频特征 play_train_data = play_data.merge(did_features, on='did', how='left') play_train_data = play_train_data.merge(vid_info.drop(columns=['item_duration']), on='vid', how='left') # 同样,构造统计特征(如用户平均完播率、视频平均完播率等) # 示例:用户平均完播率 user_avg_completion = play_train_data.groupby('did')['completion_rate'].mean().reset_index(name='user_avg_completion') play_train_data = play_train_data.merge(user_avg_completion, on='did', how='left') # 视频平均完播率 video_avg_completion = play_train_data.groupby('vid')['completion_rate'].mean().reset_index(name='video_avg_completion') play_train_data = play_train_data.merge(video_avg_completion, on='vid', how='left') # 填充缺失值 # ... # 特征矩阵 X_play = play_train_data.drop(columns=['did', 'vid', 'play_time', 'item_duration', 'completion_rate']) y_play = play_train_data['completion_rate'] #步骤6:训练完播率预测模型 # 划分训练集和验证集 X_train_play, X_val_play, y_train_play, y_val_play = train_test_split(X_play, y_play, test_size=0.2, random_state=42) # 训练LightGBM回归模型 params_reg = { 'objective': 'regression', 'metric': 'mae', 'boosting_type': 'gbdt', 'num_leaves': 31, 'learning_rate': 0.05, 'feature_fraction': 0.9, 'bagging_fraction': 0.8, 'bagging_freq': 5, 'verbose': 0 } train_data_play = lgb.Dataset(X_train_play, label=y_train_play, categorical_feature=categorical_features, free_raw_data=False) val_data_play = lgb.Dataset(X_val_play, label=y_val_play, categorical_feature=categorical_features, free_raw_data=False) model_play = lgb.train(params_reg, train_data_play, valid_sets=[train_data_play, val_data_play], num_boost_round=1000, early_stopping_rounds=50, verbose_eval=10) # 保存模型 model_play.save_model('play_model.txt')

编码: ascii, 置信度: 1.00 Training until validation scores don't improve for 20 rounds [10] training's auc: 0.999999 valid_1's auc: 0.999999 [20] training's auc: 0.999999 valid_1's auc: 0.999999 Early stopping, best iteration is: [1] training's auc: 0.999999 valid_1's auc: 0.999999 Validation AUC: 1.0000 --------------------------------------------------------------------------- InvalidIndexError Traceback (most recent call last) Cell In[16], line 188 186 samples = prepare_samples(all_see, all_click, all_play) 187 model, features, auc_score = train_model(samples) --> 188 result = predict_new_data(model, features, 'testA_did_show.csv') Cell In[16], line 164, in predict_new_data(model, feature_columns, test_file) 161 user_click_rate = pd.read_csv('user_click_rate.csv', encoding='gbk').set_index('did')['user_click_rate'] 162 video_popularity = pd.read_csv('video_popularity.csv', encoding='gbk').set_index('vid')['video_popularity'] --> 164 test_data['user_click_rate'] = test_data['did'].map(user_click_rate).fillna(0).astype(np.float32) 165 test_data['video_popularity'] = test_data['vid'].map(video_popularity).fillna(0).astype(np.int32) 167 test_data[feature_columns] = test_data[feature_columns].fillna(0) File ~\ANA\Lib\site-packages\pandas\core\series.py:4544, in Series.map(self, arg, na_action) 4464 def map( 4465 self, 4466 arg: Callable | Mapping | Series, 4467 na_action: Literal["ignore"] | None = None, 4468 ) -> Series: 4469 """ 4470 Map values of Series according to an input mapping or function. 4471 (...) 4542 dtype: object 4543 """ -> 4544 new_values = self._map_values(arg, na_action=na_action) 4545 return self._constructor(new_values, index=self.index, copy=False).__finalize__( 4546 self, method="map" 4547 ) File ~\ANA\Lib\site-packages\pandas\core\base.py:919, in IndexOpsMixin._map_values(self, mapper, na_action, convert) 916 arr = self._values 918 if isinstance(arr, ExtensionArray): --> 919 return arr.map(mapper, na_action=na_action) 921 return algorithms.map_array(arr, mapper, na_action=na_action, convert=convert) File ~\ANA\Lib\site-packages\pandas\core\arrays\categorical.py:1530, in Categorical.map(self, mapper, na_action) 1526 na_action = "ignore" 1528 assert callable(mapper) or is_dict_like(mapper) -> 1530 new_categories = self.categories.map(mapper) 1532 has_nans = np.any(self._codes == -1) 1534 na_val = np.nan File ~\ANA\Lib\site-packages\pandas\core\indexes\base.py:6419, in Index.map(self, mapper, na_action) 6383 """ 6384 Map values using an input mapping or function. 6385 (...) 6415 Index(['A', 'B', 'C'], dtype='object') 6416 """ 6417 from pandas.core.indexes.multi import MultiIndex -> 6419 new_values = self._map_values(mapper, na_action=na_action) 6421 # we can return a MultiIndex 6422 if new_values.size and isinstance(new_values[0], tuple): File ~\ANA\Lib\site-packages\pandas\core\base.py:921, in IndexOpsMixin._map_values(self, mapper, na_action, convert) 918 if isinstance(arr, ExtensionArray): 919 return arr.map(mapper, na_action=na_action) --> 921 return algorithms.map_array(arr, mapper, na_action=na_action, convert=convert) File ~\ANA\Lib\site-packages\pandas\core\algorithms.py:1803, in map_array(arr, mapper, na_action, convert) 1799 mapper = mapper[mapper.index.notna()] 1801 # Since values were input this means we came from either 1802 # a dict or a series and mapper should be an index -> 1803 indexer = mapper.index.get_indexer(arr) 1804 new_values = take_nd(mapper._values, indexer) 1806 return new_values File ~\ANA\Lib\site-packages\pandas\core\indexes\base.py:3875, in Index.get_indexer(self, target, method, limit, tolerance) 3872 self._check_indexing_method(method, limit, tolerance) 3874 if not self._index_as_unique: -> 3875 raise InvalidIndexError(self._requires_unique_msg) 3877 if len(target) == 0: 3878 return np.array([], dtype=np.intp) InvalidIndexError: Reindexing only valid with uniquely valued Index objects,请帮我定位并解决问题

最新推荐

recommend-type

基于QT的调色板

【基于QT的调色板】是一个使用Qt框架开发的色彩选择工具,类似于Windows操作系统中常见的颜色选取器。Qt是一个跨平台的应用程序开发框架,广泛应用于桌面、移动和嵌入式设备,支持C++和QML语言。这个调色板功能提供了横竖两种渐变模式,用户可以方便地选取所需的颜色值。 在Qt中,调色板(QPalette)是一个关键的类,用于管理应用程序的视觉样式。QPalette包含了一系列的颜色角色,如背景色、前景色、文本色、高亮色等,这些颜色可以根据用户的系统设置或应用程序的需求进行定制。通过自定义QPalette,开发者可以创建具有独特视觉风格的应用程序。 该调色板功能可能使用了QColorDialog,这是一个标准的Qt对话框,允许用户选择颜色。QColorDialog提供了一种简单的方式来获取用户的颜色选择,通常包括一个调色板界面,用户可以通过滑动或点击来选择RGB、HSV或其他色彩模型中的颜色。 横渐变取色可能通过QGradient实现,QGradient允许开发者创建线性或径向的色彩渐变。线性渐变(QLinearGradient)沿直线从一个点到另一个点过渡颜色,而径向渐变(QRadialGradient)则以圆心为中心向外扩散颜色。在调色板中,用户可能可以通过滑动条或鼠标拖动来改变渐变的位置,从而选取不同位置的颜色。 竖渐变取色则可能是通过调整QGradient的方向来实现的,将原本水平的渐变方向改为垂直。这种设计可以提供另一种方式来探索颜色空间,使得选取颜色更为直观和便捷。 在【colorpanelhsb】这个文件名中,我们可以推测这是与HSB(色相、饱和度、亮度)色彩模型相关的代码或资源。HSB模型是另一种常见且直观的颜色表示方式,与RGB或CMYK模型不同,它以人的感知为基础,更容易理解。在这个调色板中,用户可能可以通过调整H、S、B三个参数来选取所需的颜色。 基于QT的调色板是一个利用Qt框架和其提供的色彩管理工具,如QPalette、QColorDialog、QGradient等,构建的交互式颜色选择组件。它不仅提供了横竖渐变的色彩选取方式,还可能支持HSB色彩模型,使得用户在开发图形用户界面时能更加灵活和精准地控制色彩。
recommend-type

基于springboot二手物品交易网站系统【附万字论文+PPT+包部署+录制讲解视频】.zip

标题基于Spring Boot的二手物品交易网站系统研究AI更换标题第1章引言阐述基于Spring Boot开发二手物品交易网站的研究背景、意义、现状及本文方法与创新点。1.1研究背景与意义介绍二手物品交易的市场需求和Spring Boot技术的适用性。1.2国内外研究现状概述当前二手物品交易网站的发展现状和趋势。1.3论文方法与创新点说明本文采用的研究方法和在系统设计中的创新之处。第2章相关理论与技术介绍开发二手物品交易网站所涉及的相关理论和关键技术。2.1Spring Boot框架解释Spring Boot的核心概念和主要特性。2.2数据库技术讨论适用的数据库技术及其在系统中的角色。2.3前端技术阐述与后端配合的前端技术及其在系统中的应用。第3章系统需求分析详细分析二手物品交易网站系统的功能需求和性能需求。3.1功能需求列举系统应实现的主要功能模块。3.2性能需求明确系统应满足的性能指标和安全性要求。第4章系统设计与实现具体描述基于Spring Boot的二手物品交易网站系统的设计和实现过程。4.1系统架构设计给出系统的整体架构设计和各模块间的交互方式。4.2数据库设计详细阐述数据库的结构设计和数据操作流程。4.3界面设计与实现介绍系统的界面设计和用户交互的实现细节。第5章系统测试与优化说明对系统进行测试的方法和性能优化的措施。5.1测试方法与步骤测试环境的搭建、测试数据的准备及测试流程。5.2测试结果分析对测试结果进行详细分析,验证系统是否满足需求。5.3性能优化措施提出针对系统性能瓶颈的优化建议和实施方案。第6章结论与展望总结研究成果,并展望未来可能的研究方向和改进空间。6.1研究结论概括本文基于Spring Boot开发二手物品交易网站的主要发现和成果。6.2展望与改进讨论未来可能的系统改进方向和新的功能拓展。
recommend-type

基于Python的学生宿舍管理系统的设计与实现+数据库文档

1. 用户与权限管理模块 角色管理: 学生:查看个人住宿信息、提交报修申请、查看卫生检查结果、请假外出登记 宿管人员:分配宿舍床位、处理报修申请、记录卫生检查结果、登记晚归情况 管理员:维护楼栋与房间信息、管理用户账号、统计住宿数据、发布宿舍通知 用户操作: 登录认证:对接学校统一身份认证(模拟实现,用学号 / 工号作为账号),支持密码重置 信息管理:学生完善个人信息(院系、专业、联系电话),管理员维护所有用户信息 权限控制:不同角色仅可见对应功能(如学生无法修改床位分配信息) 2. 宿舍信息管理模块 楼栋与房间管理: 楼栋信息:名称(如 "1 号宿舍楼")、层数、性别限制(男 / 女 / 混合)、管理员(宿管) 房间信息:房间号(如 "101")、户型(4 人间 / 6 人间)、床位数量、已住人数、可用状态 设施信息:记录房间内设施(如空调、热水器、桌椅)的配置与完好状态 床位管理: 床位编号:为每个床位设置唯一编号(如 "101-1" 表示 101 房间 1 号床) 状态标记:标记床位为 "空闲 / 已分配 / 维修中",支持批量查询空闲床位 历史记录:保存床位的分配变更记录(如从学生 A 调换到学生 B 的时间与原因) 3. 住宿分配与调整模块 住宿分配: 新生分配:管理员导入新生名单后,宿管可按专业集中、性别匹配等规则批量分配床位 手动分配:针对转专业、复学学生,宿管手动指定空闲床位并记录分配时间 分配结果公示:学生登录后可查看自己的宿舍信息(楼栋、房间号、床位号、室友列表) 调整管理: 调宿申请:学生提交调宿原因(如室友矛盾、身体原因),选择意向宿舍(需有空位) 审批流程:宿管审核申请,通过后执行床位调换,更新双方住宿信息 换宿记录:保存调宿历史(申请人、原床位、新床位、审批人、时间) 4. 报修与安全管理模块 报修管理: 报修提交:学生选择宿舍、设施类型(如 "
recommend-type

美国国际航空交通数据分析报告(1990-2020)

根据给定的信息,我们可以从中提取和分析以下知识点: 1. 数据集概述: 该数据集名为“U.S. International Air Traffic data(1990-2020)”,记录了美国与国际间航空客运和货运的详细统计信息。数据集涵盖的时间范围从1990年至2020年,这说明它包含了长达30年的时间序列数据,对于进行长期趋势分析非常有价值。 2. 数据来源及意义: 此数据来源于《美国国际航空客运和货运统计报告》,该报告是美国运输部(USDOT)所管理的T-100计划的一部分。T-100计划旨在收集和发布美国和国际航空公司在美国机场的出入境交通报告,这表明数据的权威性和可靠性较高,适用于政府、企业和学术研究等领域。 3. 数据内容及应用: 数据集包含两个主要的CSV文件,分别是“International_Report_Departures.csv”和“International_Report_Passengers.csv”。 a. International_Report_Departures.csv文件可能包含了以下内容: - 离港航班信息:记录了各航空公司的航班号、起飞和到达时间、起飞和到达机场的代码以及国际地区等信息。 - 航空公司信息:可能包括航空公司代码、名称以及所属国家等。 - 飞机机型信息:如飞机类型、座位容量等,这有助于分析不同机型的使用频率和趋势。 - 航线信息:包括航线的起始和目的国家及城市,对于研究航线网络和优化航班计划具有参考价值。 这些数据可以用于航空交通流量分析、机场运营效率评估、航空市场分析等。 b. International_Report_Passengers.csv文件可能包含了以下内容: - 航班乘客信息:可能包括乘客的国籍、年龄、性别等信息。 - 航班类型:如全客机、全货机或混合型航班,可以分析乘客运输和货物运输的比例。 - 乘客数量:记录了各航班或航线的乘客数量,对于分析航空市场容量和增长趋势很有帮助。 - 飞行里程信息:有助于了解国际间不同航线的长度和飞行距离,为票价设置和燃油成本分析提供数据支持。 这些数据可以用于航空客运市场分析、需求预测、收益管理等方面。 4. 数据分析和应用实例: - 航空流量分析:通过分析离港航班数据,可以观察到哪些航线最为繁忙,哪些机场的国际航空流量最大,这有助于航空公司调整航班时刻表和运力分配。 - 市场研究:乘客数据可以揭示不同国家和地区之间的人口流动趋势,帮助航空公司和政府机构了解国际旅行市场的需求变化。 - 飞机利用率:结合飞机机型和飞行频率信息,可以对特定机型的使用率进行分析,评估飞机维护需求和燃油效率。 - 安全监管:通过对比不同航空公司和航班的安全记录,监管机构可以更有效地评估航空公司的安全性能,并采取必要的监管措施。 5. 技术和方法论: 分析此类数据通常涉及数据清洗、数据整合、统计分析、时间序列分析、预测建模等数据科学方法。使用Excel、SQL、R、Python等工具进行数据处理和分析是常见的做法。例如,可以使用Python的Pandas库来清洗和准备数据,使用Matplotlib和Seaborn库来可视化数据,然后利用Scikit-learn或Statsmodels库来构建预测模型。 通过以上知识点的提取和分析,我们可以理解到“U.S. International Air Traffic data(1990-2020)-数据集”的重要性,它不仅记录了跨越30年的航空交通数据,还为各种分析和应用提供了详实的基础信息。对于航空业从业者、政策制定者、研究人员以及数据分析师来说,这是一个极具价值的数据资源。
recommend-type

统计学视角:深入理解最小二乘法的概率论基础

# 1. 最小二乘法的基本概念 最小二乘法(Least Squares Method, LSM)是统计学和数据分析中广泛使用的一种数学优化技术。其主要目的是通过最小化误差的平方和来寻找数据的最佳函数匹配。这种方法最早由高斯提出,但在工程、物理、经济和其他领域中有着广泛的应用。 在本章中,我们将首先了
recommend-type

vscode中使用Codeium

<think>好的,我需要回答用户如何在VSCode中使用Codeium插件的问题。首先,我需要回顾用户提供的引用内容,看看是否有相关信息。用户提供的引用[1]提到了Codeium Chat可以解释代码中的问题,引用[2]提到了在IDEA中的一些问题,可能对VSCode的安装有帮助。用户还提到了安装步骤、基本功能、常见问题等。 首先,我应该分步骤说明安装过程,包括在VSCode扩展商店搜索Codeium并安装。然后,登录部分可能需要用户访问仪表板获取API密钥,引用[2]中提到登录问题,可能需要提醒用户注意网络或权限设置。 接下来是基本功能,比如代码自动补全和Chat功能。引用[1]提到C
recommend-type

UniMoCo:统一框架下的多监督视觉学习方法

在详细解析“unimoco”这个概念之前,我们需要明确几个关键点。首先,“unimoco”代表的是一种视觉表示学习方法,它在机器学习尤其是深度学习领域中扮演着重要角色。其次,文章作者通过这篇论文介绍了UniMoCo的全称,即“Unsupervised, Semi-Supervised and Full-Supervised Visual Representation Learning”,其背后的含义是在于UniMoCo框架整合了无监督学习、半监督学习和全监督学习三种不同的学习策略。最后,该框架被官方用PyTorch库实现,并被提供给了研究者和开发者社区。 ### 1. 对比学习(Contrastive Learning) UniMoCo的概念根植于对比学习的思想,这是一种无监督学习的范式。对比学习的核心在于让模型学会区分不同的样本,通过将相似的样本拉近,将不相似的样本推远,从而学习到有效的数据表示。对比学习与传统的分类任务最大的不同在于不需要手动标注的标签来指导学习过程,取而代之的是从数据自身结构中挖掘信息。 ### 2. MoCo(Momentum Contrast) UniMoCo的实现基于MoCo框架,MoCo是一种基于队列(queue)的对比学习方法,它在训练过程中维持一个动态的队列,其中包含了成对的负样本。MoCo通过 Momentum Encoder(动量编码器)和一个队列来保持稳定和历史性的负样本信息,使得模型能够持续地进行对比学习,即使是在没有足够负样本的情况下。 ### 3. 无监督学习(Unsupervised Learning) 在无监督学习场景中,数据样本没有被标记任何类别或标签,算法需自行发现数据中的模式和结构。UniMoCo框架中,无监督学习的关键在于使用没有标签的数据进行训练,其目的是让模型学习到数据的基础特征表示,这对于那些标注资源稀缺的领域具有重要意义。 ### 4. 半监督学习(Semi-Supervised Learning) 半监督学习结合了无监督和有监督学习的优势,它使用少量的标注数据与大量的未标注数据进行训练。UniMoCo中实现半监督学习的方式,可能是通过将已标注的数据作为对比学习的一部分,以此来指导模型学习到更精准的特征表示。这对于那些拥有少量标注数据的场景尤为有用。 ### 5. 全监督学习(Full-Supervised Learning) 在全监督学习中,所有的训练样本都有相应的标签,这种学习方式的目的是让模型学习到映射关系,从输入到输出。在UniMoCo中,全监督学习用于训练阶段,让模型在有明确指示的学习目标下进行优化,学习到的任务相关的特征表示。这通常用于有充足标注数据的场景,比如图像分类任务。 ### 6. PyTorch PyTorch是一个开源机器学习库,由Facebook的人工智能研究团队开发,主要用于计算机视觉和自然语言处理等任务。它被广泛用于研究和生产环境,并且因其易用性、灵活性和动态计算图等特性受到研究人员的青睐。UniMoCo官方实现选择PyTorch作为开发平台,说明了其对科研社区的支持和对易于实现的重视。 ### 7. 可视化表示学习(Visual Representation Learning) 可视化表示学习的目的是从原始视觉数据中提取特征,并将它们转换为能够反映重要信息且更易于处理的形式。在UniMoCo中,无论是无监督、半监督还是全监督学习,最终的目标都是让模型学习到有效的视觉表示,这些表示可以用于下游任务,如图像分类、目标检测、图像分割等。 ### 8. 标签队列(Label Queue) UniMoCo通过标签队列维护受监管的标签,这可能意味着对于那些半监督或全监督学习的任务,模型在进行对比学习时,会参考这些来自标签队列的数据。标签队列机制能帮助模型更好地利用有限的标注数据,增强模型的泛化能力。 ### 结论 UniMoCo的提出,以及其官方PyTorch实现的发布,将对计算机视觉领域产生深远影响。它不仅提供了一个统一的对比学习框架,使得从无监督到全监督的学习过程更加灵活和高效,而且为研究者们提供了一个强力的工具,以便更好地探索和实现各种视觉任务。UniMoCo的研究和应用前景,为机器学习尤其是深度学习在视觉领域的研究和实践提供了新的视角和可能。
recommend-type

【MATLAB算法精讲】:最小二乘法的实现与案例深度分析

# 1. 最小二乘法的基本原理 最小二乘法是一种数学优化技术,它通过最小化误差的平方和来寻找数据的最佳函数匹配。其核心思想是选择一条曲线,使得所有观察点到这条曲线的距离之和最小。这种方法广泛应用于统计学、信号处理、工程学和经济学等领域,尤其适用于需要通过一组数据点来确定函数参数的情况。 ## 1.1 统计学视角下的最小二乘法 在统计学中,最小二乘法经常用于
recommend-type

Idea使用教程+jdk配置

<think>好的,我现在需要回答用户关于IntelliJ IDEA使用教程和JDK配置方法的问题。首先,用户明确提到了参考站内引用中的内容,引用[1]和引用[2]都涉及JDK的配置步骤,但看起来这些引用内容可能来自其他问题或文档,可能与当前问题不完全相关,但用户希望我利用这些信息来组织回答。 首先,我需要确认IntelliJ IDEA配置JDK的基本步骤,并整合用户提供的引用内容。引用[1]提到选择JDK安装根目录,例如D:\develop\Java\jdk-17,这说明配置时需要定位到JDK的主目录。引用[2]则提到了通过New按钮选择JDK版本,并完成项目创建,这部分可能涉及到项目设置
recommend-type

GitHub入门实践:审查拉取请求指南

从提供的文件信息中,我们可以抽取以下知识点: **GitHub入门与Pull Request(PR)的审查** **知识点1:GitHub简介** GitHub是一个基于Git的在线代码托管和版本控制平台,它允许开发者在互联网上进行代码的托管和协作。通过GitHub,用户可以跟踪和管理代码变更,参与开源项目,或者创建自己的私有仓库进行项目协作。GitHub为每个项目提供了问题跟踪和任务管理功能,支持Pull Request机制,以便用户之间可以进行代码的审查和讨论。 **知识点2:Pull Request的作用与审查** Pull Request(PR)是协作开发中的一个重要机制,它允许开发者向代码库贡献代码。当开发者在自己的分支上完成开发后,他们可以向主分支(或其他分支)提交一个PR,请求合入他们的更改。此时,其他开发者,包括项目的维护者,可以审查PR中的代码变更,进行讨论,并最终决定是否合并这些变更到目标分支。 **知识点3:审查Pull Request的步骤** 1. 访问GitHub仓库,并查看“Pull requests”标签下的PR列表。 2. 选择一个PR进行审查,点击进入查看详细内容。 3. 查看PR的标题、描述以及涉及的文件变更。 4. 浏览代码的具体差异,可以逐行审查,也可以查看代码变更的概览。 5. 在PR页面添加评论,可以针对整个PR,也可以针对特定的代码行或文件。 6. 当审查完成后,可以提交评论,或者批准、请求修改或关闭PR。 **知识点4:代码审查的最佳实践** 1. 确保PR的目标清晰且具有针对性,避免过于宽泛。 2. 在审查代码时,注意代码的质量、结构以及是否符合项目的编码规范。 3. 提供建设性的反馈,指出代码的优点和需要改进的地方。 4. 使用清晰、具体的语言,避免模糊和主观的评论。 5. 鼓励开发者间的协作,而不是单向的批评。 6. 经常审查PR,以避免延迟和工作积压。 **知识点5:HTML基础** HTML(HyperText Markup Language)是用于创建网页的标准标记语言。它通过各种标签(如`<p>`用于段落,`<img>`用于图片,`<a>`用于链接等)来定义网页的结构和内容。HTML文档由元素组成,这些元素通过开始标签和结束标签来标识。例如,`<p>This is a paragraph.</p>`。HTML的最新版本是HTML5,它引入了许多新的元素和API,增强了对多媒体、图形和本地存储的支持。 **知识点6:GitHub Pages功能介绍** GitHub Pages是一个静态站点托管服务,允许用户直接从GitHub仓库中发布个人、组织或项目的网站。你可以通过设置一个专门的分支来存放你的网站源代码,然后利用GitHub Pages的设置选项,选择分支并发布你的网站。发布的网站将可以通过一个自定义的URL访问,这个URL通常是`username.github.io/repo-name`的格式。这为开发者提供了一个简单而快速的方法来搭建个人或项目的展示页面。 **知识点7:简单的游戏开发实践** 文件描述中提到了一个基于项目的学习活动,通过游戏的形式,让入门人员体验操作和理解基本的游戏开发概念。在这个活动中,参与者通过键盘操作控制形状的旋转和移动,目标是创建无空隙的完整行,这涉及到游戏逻辑、用户输入处理和图形界面显示等基础知识。该活动可能使用了HTML、JavaScript和CSS等前端技术实现,参与者通过实践操作来学习如何编程,并理解基本的游戏设计原理。