这似乎让事情进展顺利:
from keras.models import Sequential, Model
from keras.layers import Dense, LSTM, Dropout, Activation
import keras
# config
stateful = False
look_back = 3
lstm_cells = 1024
dropout_rate = 0.5
n_features = int(X.shape[1]) * 3
input_shape = (look_back, n_features)
output_shape = n_features
def loss(y_true, y_pred):
return keras.losses.mean_squared_error(y_true, y_pred)
model = Sequential()
model.add(LSTM(lstm_cells, stateful=stateful, return_sequences=True, input_shape=input_shape))
model.add(LSTM(lstm_cells, stateful=stateful, return_sequences=True))
model.add(LSTM(lstm_cells, stateful=stateful))
model.add(Dense(output_shape, activation='relu'))
model.compile(loss=loss, optimizer='sgd')
然后可以按如下方式对训练数据进行分区:
# build training data
train_x = []
train_y = []
n_time = int(X.shape[0])
n_obs = int(X.shape[1])
n_attrs = int(X.shape[2])
# note we flatten the last dimension
for i in range(look_back, n_time-1, 1):
train_x.append( X[i-look_back:i].reshape(look_back, n_obs * n_attrs ) )
train_y.append( X[i+1].ravel() )
train_x = np.array(train_x)
train_y = np.array(train_y)
然后可以训练玩具模型:
model.fit(train_x, train_y, epochs=10, batch_size=10)