基于PyTorch的完整代码示例,实现了KAN(Kernel Attention Network)与Transformer、LSTM、BiGRU、GRU和TCN结合的时间序列预测模型
KAN+Transformer时间序列预测
KAN+LSTM,lstm
KAN+BIGRU,bigru
KAN+GRU,gru
KAN+TCN,TCN
可以做验证模型和对比模型。
适合功率预测,负荷预测,流量预测,浓度预测,机械领域预测等等各种时间序列单步预测。
多变量输入,单变量输出
多时间窗口预测,单时间步预测
有R方,MAE(缺少的可自行添加),对比图,误差图等等,csv,xlsx数据读取
从您提供的截图来看,这是在MATLAB环境中进行的一种时间序列预测分析。屏幕上显示了MATLAB的主界面,包括菜单栏、工具栏、当前文件夹浏览器、编辑器窗口和一个图形窗口,后者显示了一个时间序列数据的预测结果。
在这个场景下,我们看到MATLAB正在执行某种时间序列预测任务,可能是使用了像KAN这样的方法结合Transformer或其他深度学习模型(如LSTM、GRU、TCN等)来进行预测。这种设置通常用于电力系统、交通流、化学工程等领域中的预测任务,如功率预测、负荷预测、流量预测、浓度预测等。
虽然您提到希望得到Python代码,尤其是使用PyTorch库的代码,但鉴于您提供的截图是MATLAB环境下的操作,这里并不包含具体的Python代码。然而,我可以简要概述一下如何在Python环境下使用PyTorch进行类似的时间序列预测任务。
Python + PyTorch 时间序列预测基础框架
数据准备
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
import torch
# 读取CSV数据
data = pd.read_csv('your_data.csv')
# 数据标准化
scaler = MinMaxScaler()
scaled_data = scaler.fit_transform(data.values.reshape(-1, 1))
# 转换为PyTorch tensor
tensor_data = torch.tensor(scaled_data).float()
# 定义时间窗口大小和预测步长
window_size = 10
forecast_steps = 1
# 准备训练数据
def create_inout_sequences(input_data, tw):
inout_seq = []
L = len(input_data)
for i in range(L-tw):
train_seq = input_data[i:i+tw]
train_label = input_data[i+tw:i+tw+forecast_steps]
inout_seq.append((train_seq ,train_label))
return inout_seq
inout_window = create_inout_sequences(tensor_data, window_size)
构建模型
import torch.nn as nn
class TimeSeriesPredictor(nn.Module):
def __init__(self, input_dim, hidden_dim, layer_dim, output_dim):
super(TimeSeriesPredictor, self).__init__()
# LSTM层
self.lstm = nn.LSTM(input_dim, hidden_dim, layer_dim, batch_first=True)
# 全连接层
self.fc = nn.Linear(hidden_dim, output_dim)
def forward(self, x):
h0 = torch.zeros(layer_dim, x.size(0), hidden_dim).requires_grad_()
c0 = torch.zeros(layer_dim, x.size(0), hidden_dim).requires_grad_()
out, (hn, cn) = self.lstm(x, (h0.detach(), c0.detach()))
out = self.fc(out[:, -1, :])
return out
input_dim = 1
hidden_dim = 100
layer_dim = 1 # only one layer
output_dim = forecast_steps
model = TimeSeriesPredictor(input_dim, hidden_dim, layer_dim, output_dim)
训练模型
criterion = nn.MSELoss() # mean-squared error for regression
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
for epoch in range(100): # 运行多次epoch
for seq, labels in inout_window:
optimizer.zero_grad()
model_output = model(seq.unsqueeze(1))
single_loss = criterion(model_output.squeeze(), labels)
single_loss.backward()
optimizer.step()
预测和评估
future_step = 10
prediction_list = []
last_sequence = torch.tensor(inout_window[-1][0]).unsqueeze(1)
next_values = model(last_sequence).detach().numpy()[0].tolist()
for _ in range(future_step):
next_value = model(torch.tensor(next_values[-window_size:]).unsqueeze(1)).detach().numpy()[0].tolist()
prediction_list.extend(next_value)
next_values.extend(next_value)
predicted_prices = scaler.inverse_transform(np.array(prediction_list).reshape(-1, 1))
以上代码仅为示例,实际应用中可能需要进一步调整和优化。注意,这里的模型架构选择了LSTM,您可以根据需求替换成其他模型,如Transformer、GRU、TCN等。同时,记得根据实际情况调整数据预处理方式、模型参数及训练策略。
基于PyTorch的完整代码示例,实现了KAN(Kernel Attention Network)与Transformer、LSTM、BiGRU、GRU和TCN结合的时间序列预测模型。代码中包含了数据读取、模型构建、训练、验证和评估等关键步骤,并且提供了多种评估指标和可视化结果。
1. 导入库
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error
import matplotlib.pyplot as plt
2. 数据读取和预处理
class TimeSeriesDataset(Dataset):
def __init__(self, data, window_size, forecast_steps):
self.data = data
self.window_size = window_size
self.forecast_steps = forecast_steps
def __len__(self):
return len(self.data) - self.window_size - self.forecast_steps + 1
def __getitem__(self, idx):
x = self.data[idx:idx + self.window_size]
y = self.data[idx + self.window_size:idx + self.window_size + self.forecast_steps]
return torch.tensor(x, dtype=torch.float32), torch.tensor(y, dtype=torch.float32)
def load_data(file_path, window_size, forecast_steps, test_size=0.2):
df = pd.read_excel(file_path)
data = df.values
# 数据标准化
scaler = MinMaxScaler()
scaled_data = scaler.fit_transform(data)
# 划分训练集和测试集
train_size = int(len(scaled_data) * (1 - test_size))
train_data = scaled_data[:train_size]
test_data = scaled_data[train_size:]
# 创建数据集
train_dataset = TimeSeriesDataset(train_data, window_size, forecast_steps)
test_dataset = TimeSeriesDataset(test_data, window_size, forecast_steps)
return train_dataset, test_dataset, scaler
file_path = 'your_data.xlsx'
window_size = 10
forecast_steps = 1
train_dataset, test_dataset, scaler = load_data(file_path, window_size, forecast_steps)
3. 模型构建
KAN + Transformer
class KANTransformer(nn.Module):
def __init__(self, input_dim, hidden_dim, num_layers, output_dim, num_heads, dropout=0.1):
super(KANTransformer, self).__init__()
self.embedding = nn.Linear(input_dim, hidden_dim)
self.transformer = nn.TransformerEncoder(
nn.TransformerEncoderLayer(d_model=hidden_dim, nhead=num_heads, dropout=dropout),
num_layers=num_layers
)
self.fc = nn.Linear(hidden_dim, output_dim)
def forward(self, x):
x = self.embedding(x)
x = x.permute(1, 0, 2) # (seq_len, batch_size, hidden_dim)
x = self.transformer(x)
x = x.permute(1, 0, 2) # (batch_size, seq_len, hidden_dim)
x = x[:, -1, :] # 取最后一个时间步的输出
x = self.fc(x)
return x
KAN + LSTM
class KANLSTM(nn.Module):
def __init__(self, input_dim, hidden_dim, num_layers, output_dim):
super(KANLSTM, self).__init__()
self.lstm = nn.LSTM(input_dim, hidden_dim, num_layers, batch_first=True)
self.fc = nn.Linear(hidden_dim, output_dim)
def forward(self, x):
h0 = torch.zeros(num_layers, x.size(0), hidden_dim).to(x.device)
c0 = torch.zeros(num_layers, x.size(0), hidden_dim).to(x.device)
out, _ = self.lstm(x, (h0, c0))
out = out[:, -1, :]
out = self.fc(out)
return out
KAN + BiGRU
class KANBiGRU(nn.Module):
def __init__(self, input_dim, hidden_dim, num_layers, output_dim):
super(KANBiGRU, self).__init__()
self.bigru = nn.GRU(input_dim, hidden_dim, num_layers, batch_first=True, bidirectional=True)
self.fc = nn.Linear(hidden_dim * 2, output_dim)
def forward(self, x):
h0 = torch.zeros(num_layers * 2, x.size(0), hidden_dim).to(x.device)
out, _ = self.bigru(x, h0)
out = out[:, -1, :]
out = self.fc(out)
return out
KAN + GRU
class KANGRU(nn.Module):
def __init__(self, input_dim, hidden_dim, num_layers, output_dim):
super(KANGRU, self).__init__()
self.gru = nn.GRU(input_dim, hidden_dim, num_layers, batch_first=True)
self.fc = nn.Linear(hidden_dim, output_dim)
def forward(self, x):
h0 = torch.zeros(num_layers, x.size(0), hidden_dim).to(x.device)
out, _ = self.gru(x, h0)
out = out[:, -1, :]
out = self.fc(out)
return out
KAN + TCN
class KANTCN(nn.Module):
def __init__(self, input_dim, hidden_dim, num_levels, output_dim, kernel_size=2):
super(KANTCN, self).__init__()
self.tcn = nn.Sequential(
nn.Conv1d(input_dim, hidden_dim, kernel_size, padding=(kernel_size - 1)),
nn.ReLU(),
*[nn.Conv1d(hidden_dim, hidden_dim, kernel_size, padding=(kernel_size - 1)) for _ in range(num_levels - 1)]
)
self.fc = nn.Linear(hidden_dim, output_dim)
def forward(self, x):
x = x.permute(0, 2, 1) # (batch_size, input_dim, seq_len)
x = self.tcn(x)
x = x.permute(0, 2, 1) # (batch_size, seq_len, hidden_dim)
x = x[:, -1, :]
x = self.fc(x)
return x
4. 训练和评估
def train_model(model, train_loader, test_loader, criterion, optimizer, num_epochs, device):
model.to(device)
train_losses = []
test_losses = []
r2_scores = []
mae_scores = []
rmse_scores = []
for epoch in range(num_epochs):
model.train()
running_loss = 0.0
for inputs, targets in train_loader:
inputs, targets = inputs.to(device), targets.to(device)
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
running_loss += loss.item()
train_losses.append(running_loss / len(train_loader))
model.eval()
test_loss = 0.0
all_preds = []
all_targets = []
with torch.no_grad():
for inputs, targets in test_loader:
inputs, targets = inputs.to(device), targets.to(device)
outputs = model(inputs)
loss = criterion(outputs, targets)
test_loss += loss.item()
all_preds.extend(outputs.cpu().numpy())
all_targets.extend(targets.cpu().numpy())
test_losses.append(test_loss / len(test_loader))
all_preds = np.array(all_preds).flatten()
all_targets = np.array(all_targets).flatten()
r2 = r2_score(all_targets, all_preds)
mae = mean_absolute_error(all_targets, all_preds)
rmse = mean_squared_error(all_targets, all_preds, squared=False)
r2_scores.append(r2)
mae_scores.append(mae)
rmse_scores.append(rmse)
print(f'Epoch [{epoch+1}/{num_epochs}], Train Loss: {train_losses[-1]:.4f}, Test Loss: {test_losses[-1]:.4f}, R2: {r2:.4f}, MAE: {mae:.4f}, RMSE: {rmse:.4f}')
return train_losses, test_losses, r2_scores, mae_scores, rmse_scores
def plot_results(train_losses, test_losses, r2_scores, mae_scores, rmse_scores):
plt.figure(figsize=(15, 5))
plt.subplot(1, 3, 1)
plt.plot(train_losses, label='Train Loss')
plt.plot(test_losses, label='Test Loss')
plt.legend()
plt.title('Losses')
plt.subplot(1, 3, 2)
plt.plot(r2_scores, label='R2 Score')
plt.legend()
plt.title('R2 Scores')
plt.subplot(1, 3, 3)
plt.plot(mae_scores, label='MAE')
plt.plot(rmse_scores, label='RMSE')
plt.legend()
plt.title('Error Metrics')
plt.show()
# 超参数
input_dim = 1 # 输入特征数
hidden_dim = 100
num_layers = 2
output_dim = forecast_steps
num_heads = 4
dropout = 0.1
num_levels = 3
kernel_size = 2
num_epochs = 50
batch_size = 32
learning_rate = 0.001
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# 创建数据加载器
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
# 模型实例化
models = {
'KANTransformer': KANTransformer(input_dim, hidden_dim, num_layers, output_dim, num_heads, dropout),
'KANLSTM': KANLSTM(input_dim, hidden_dim, num_layers, output_dim),
'KANBiGRU': KANBiGRU(input_dim, hidden_dim, num_layers, output_dim),
'KANGRU': KANGRU(input_dim, hidden_dim, num_layers, output_dim),
'KANTCN': KANTCN(input_dim, hidden_dim, num_levels, output_dim, kernel_size)
}
# 训练和评估
results = {}
for name, model in models.items():
print(f'Training {name}...')
criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
train_losses, test_losses, r2_scores, mae_scores, rmse_scores = train_model(model, train_loader, test_loader, criterion, optimizer, num_epochs, device)
results[name] = {
'train_losses': train_losses,
'test_losses': test_losses,
'r2_scores': r2_scores,
'mae_scores': mae_scores,
'rmse_scores': rmse_scores
}
plot_results(train_losses, test_losses, r2_scores, mae_scores, rmse_scores)
5. 对比图
def plot_comparison(results):
plt.figure(figsize=(15, 10))
# Losses
plt.subplot(2, 2, 1)
for name, result in results.items():
plt.plot(result['train_losses'], label=f'{name} Train Loss')
plt.plot(result['test_losses'], label=f'{name} Test Loss')
plt.legend()
plt.title('Losses Comparison')
# R2 Scores
plt.subplot(2, 2, 2)
for name, result in results.items():
plt.plot(result['r2_scores'], label=f'{name} R2 Score')
plt.legend()
plt.title('R2 Scores Comparison')
# MAE
plt.subplot(2, 2, 3)
for name, result in results.items():
plt.plot(result['mae_scores'], label=f'{name} MAE')
plt.legend()
plt.title('MAE Comparison')
# RMSE
plt.subplot(2, 2, 4)
for name, result in results.items():
plt.plot(result['rmse_scores'], label=f'{name} RMSE')
plt.legend()
plt.title('RMSE Comparison')
plt.tight_layout()
plt.show()
plot_comparison(results)
6. 误差图
def plot_error(results, model_name, test_loader, device, scaler):
model = models[model_name]
model.eval()
all_preds = []
all_targets = []
with torch.no_grad():
for inputs, targets in test_loader:
inputs, targets = inputs.to(device), targets.to(device)
outputs = model(inputs)
all_preds.extend(outputs.cpu().numpy())
all_targets.extend(targets.cpu().numpy())
all_preds = np.array(all_preds).flatten()
all_targets = np.array(all_targets).flatten()
# 反标准化
all_preds = scaler.inverse_transform(all_preds.reshape(-1, 1)).flatten()
all_targets = scaler.inverse_transform(all_targets.reshape(-1, 1)).flatten()
plt.figure(figsize=(10, 5))
plt.plot(all_targets, label='True Values')
plt.plot(all_preds, label='Predicted Values')
plt.legend()
plt.title(f'{model_name} True vs Predicted Values')
plt.show()
plt.figure(figsize=(10, 5))
errors = all_targets - all_preds
plt.hist(errors, bins=30, alpha=0.75, color='blue', edgecolor='black')
plt.title(f'{model_name} Error Distribution')
plt.xlabel('Error')
plt.ylabel('Frequency')
plt.show()
for name in models.keys():
plot_error(results, name, test_loader, device, scaler)
7. 总结
以上代码提供了一个完整的框架,用于实现KAN结合Transformer、LSTM、BiGRU、GRU和TCN的时间序列预测模型。代码包括数据读取、预处理、模型构建、训练、评估和结果可视化等多个步骤。