'Predicting Future values of Temp using LSTM
Here's the code for forecasting temperature values. I trained and tested it's working fine..but coming to forecasting future values the prediction shows a flat line. kindly give your valuable suggestions. I don't know where I made a mistake. Here's the code for forecasting temperature values. I trained and tested it's working fine..but coming to forecasting future values the prediction shows a flat line. kindly give your valuable suggestions. I don't know where I made a mistake.
df = pd.read_csv(io.BytesIO(uploaded['weather_data_24hr.csv']),usecols=[6])
print(df)
print(len(df))
#Convert pandas dataframe to numpy array
dataset = df.values
dataset = dataset.astype('float32') #COnvert values to float
#LSTM uses sigmoid and tanh that are sensitive to magnitude so values need to be normalized
# normalize the dataset
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler(feature_range=(0, 1)) #Also try QuantileTransformer
dataset = scaler.fit_transform(dataset)
train_size = int(len(dataset) * 0.66)
test_size = len(dataset) - train_size
train, test = dataset[0:train_size,:], dataset[train_size-365:len(dataset),:]
print(dataset)
print(train)
print(test)
print(train.shape)
print(test.shape)
def to_sequences(dataset, seq_size):
x = []
y = []
for i in range(len(dataset)-seq_size):
#print(i)
window = dataset[i:(i+seq_size), 0]
x.append(window)
y.append(dataset[i+seq_size, 0])
return np.array(x),np.array(y)
seq_size = 365 # Number of time steps to look back
#Larger sequences (look further back) may improve forecasting.
trainX, trainY = to_sequences(train, seq_size)
testX, testY = to_sequences(test, seq_size)
trainX = np.reshape(trainX, (trainX.shape[0], trainX.shape[1],1))
testX = np.reshape(testX, (testX.shape[0], testX.shape[1],1))
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import *
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.losses import MeanSquaredError
from tensorflow.keras.metrics import RootMeanSquaredError
from tensorflow.keras.optimizers import Adam
from sklearn.metrics import mean_squared_error
model1 = Sequential()
model1.add(LSTM(100, activation='relu', return_sequences=True, input_shape=(365,1)))
model1.add(Dropout(0.2))
model1.add(LSTM(100,activation='relu', return_sequences=False))
model1.add(Dropout(0.2))
model1.add(Dense(1))
print(model1.summary())
cp1 = ModelCheckpoint('model1/', save_best_only=True)
model1.compile(loss=MeanSquaredError(), optimizer=Adam(learning_rate=0.001), metrics=[RootMeanSquaredError()])
model1.fit(trainX, trainY, validation_data=(testX, testY), epochs=100, callbacks=[cp1])
trainPredict = model1.predict(trainX)
testPredict = model1.predict(testX)
print("Shape of training set: {}".format(testPredict.shape))
print("Shape of test set: {}".format(testY.shape))
trainPredict = scaler.inverse_transform(trainPredict)
trainY_inverse = scaler.inverse_transform([trainY])
testPredict = scaler.inverse_transform(testPredict)
testY_inverse = scaler.inverse_transform([testY])
plt.plot(testPredict)
plt.xlabel('Year')
plt.ylabel('Average Temperature')
print(testY)
x_input=testY[1698:].reshape(1,-1)
x_input.shape
temp_input=list(x_input)
temp_input=temp_input[0].tolist()
# demonstrate prediction for next 100 days
from numpy import array
lst_output=[]
n_steps=5
i=0
while(i<100):
if(len(temp_input)>5):
x_input=np.array(temp_input[1:])
print("{} day input {}".format(i,x_input))
x_input=x_input.reshape(1,-1)
x_input = x_input.reshape((1, n_steps, 1))
#print(x_input)
yhat = model1.predict(x_input, verbose=0)
print("{} day output {}".format(i,yhat))
temp_input.extend(yhat[0].tolist())
temp_input=temp_input[1:]
#print(temp_input)
lst_output.extend(yhat.tolist())
i=i+1
else:
x_input = x_input.reshape((1, n_steps,1))
yhat = model1.predict(x_input, verbose=0)
print(yhat[0])
temp_input.extend(yhat[0].tolist())
print(len(temp_input))
lst_output.extend(yhat.tolist())
i=i+1
#print(lst_output)
plt.plot(lst_output)
plt.xlabel('Year')
plt.ylabel('Average Temperature')
Future predictions
Sources
This article follows the attribution requirements of Stack Overflow and is licensed under CC BY-SA 3.0.
Source: Stack Overflow
| Solution | Source |
|---|

