class LSTMForecast(object):
def __init__(self):
self.batch_size = 24
self.num_lstm_layers = 2
self.lstm_units = 10
self.reset_states = False
self.time_steps = 1
self.look_back = 1
self.tmodel = None
self.pmodel = None
self.num_epochs = 10
self.lstm_model()
def lstm_model(self):
from keras.layers import Flatten
self.tmodel = Sequential()
self.pmodel = Sequential()
if(self.num_lstm_layers == 1):
self.tmodel.add(LSTM(self.lstm_units, batch_input_shape=(self.batch_size, self.look_back, no_of_medicines), stateful=True))
self.pmodel.add(LSTM(self.lstm_units, input_shape=(self.look_back, no_of_medicines), stateful=False))
elif(self.num_lstm_layers == 2):
self.tmodel.add(LSTM(self.lstm_units, batch_input_shape=(self.batch_size, self.lstm_units, self.time_steps), stateful=True, return_sequences=True))
self.tmodel.add(LSTM(self.lstm_units, batch_input_shape=(self.batch_size, self.lstm_units, self.time_steps), stateful=True))
self.pmodel.add(LSTM(self.lstm_units, input_shape=(self.lstm_units, self.time_steps), stateful=False, return_sequences=True))
self.pmodel.add(LSTM(self.lstm_units, input_shape=(self.lstm_units, self.time_steps), stateful=False))
self.tmodel.add(Dense(1))
self.tmodel.compile(loss='mean_squared_error', optimizer='adam')
self.pmodel.add(Dense(1))
self.pmodel.compile(loss='mean_squared_error', optimizer='adam')
self.losses = np.zeros(self.num_epochs)
self.train_scores = np.zeros(self.num_epochs)
self.test_scores = np.zeros(self.num_epochs)
print(self.tmodel.summary())
print(self.tmodel.weights)
def lstm_model_train(self, xtr, ytr):
#Reshape input to be [samples, time steps, features]
print('***',xtr.shape)
xtr = xtr.drop(["PRODUCT_ID", "week_id"], axis=1)
xtr = xtr.values
print('*****',xtr.shape)
xtr = np.reshape(xtr, (xtr.shape[0], xtr.shape[1], 1))
print(xtr.shape)
print(ytr.shape)
if(self.reset_states==True):
for i in range(self.num_epochs):
self.tmodel.fit(xtr, ytr, epochs=1, batch_size=self.batch_size, verbose=2, shuffle=False)
self.losses[i] = self.tmodel.history["loss"][0]
trainPredict = self.tmodel.predict(xtr, batch_size=self.batch_size)
self.tmodel.reset_states()
print("\nEpoch="+str(i))
else:
self.tmodel.fit(xtr, ytr, epochs=self.num_epochs, batch_size=self.batch_size, verbose=2, shuffle=False)
self.losses = self.tmodel.history["loss"]
# We maintain two models tmodel and pmodel. pmodel is used for prediction with batch_size = 1. See this:
# https://stackoverflow.com/questions/43702481/why-does-keras-lstm-batch-size-used-for-prediction-have-to-be-the-same-as-fittin/44228505#44228505
self.tmodel.save_weights('DataSet/lstm_model.h5')
self.pmodel.load_weights('DataSet/lstm_model.h5')
def lstm_model_predict(self, xts):
xts = xts.drop(["PRODUCT_ID", "week_id"], axis=1)
xts = xts.values
print('$',xts.shape[0])
xts = np.reshape(xts, (xts.shape[0], xts.shape[1], 1))
return self.pmodel.predict(xts)
Comments
Post a Comment