问题
I have created LSTM based training model:
def load_data_old(df_, seq_len):
data_raw = df_.values
data = []
for index in range(len(data_raw) - seq_len):
data.append(data_raw[index: index + seq_len])
data = np.array(data);
valid_set_size = int(np.round(valid_set_size_percentage/100*data.shape[0]));
test_set_size = int(np.round(test_set_size_percentage/100*data.shape[0]));
train_set_size = data.shape[0] - (valid_set_size + test_set_size);
x_train = data[:train_set_size,:-1,:-1]
y_train = data[:train_set_size,-1,-1:]
x_valid = data[train_set_size:train_set_size+valid_set_size,:-1,:-1]
y_valid = data[train_set_size:train_set_size+valid_set_size,-1,-1:]
x_test = data[train_set_size+valid_set_size:,:-1,:-1]
y_test = data[train_set_size+valid_set_size:,-1,-1:]
return[x_train, y_train, x_valid, y_valid, x_test, y_test]
x_train, y_train, x_valid, y_valid, x_test, y_test = load_data(df,seq_len)
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs],name="input")
y = tf.placeholder(tf.float32, [None, n_outputs],name="output")
n_inputs = x_train.shape[2]
n_outputs = y_train.shape[1]
layers = [tf.contrib.rnn.LSTMCell(num_units=n_neurons,activation=tf.nn.leaky_relu, use_peepholes = True,name="layer"+str(layer))
for layer in range(n_layers)]
multi_layer_cell = tf.contrib.rnn.MultiRNNCell(layers)
rnn_outputs, states = tf.nn.dynamic_rnn(multi_layer_cell, X, dtype=tf.float32)
stacked_rnn_outputs = tf.reshape(rnn_outputs, [-1, n_neurons])
stacked_outputs = tf.layers.dense(stacked_rnn_outputs, n_outputs)
outputs = tf.reshape(stacked_outputs, [-1, n_steps, n_outputs])
outputs = tf.identity(outputs[:,n_steps-1,:], name="prediction")
loss = tf.reduce_mean(tf.square(outputs - y)) # loss function = mean squared error
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(loss)
Suppose the input data is:
input1,input2,input3,input4,input5,input6,input7,input8,output
1,2,3,4,5,6,7,8,1
2,3,4,5,6,7,8,9,0
3,4,5,6,7,8,9,10,-1
4,5,6,7,8,9,10,11,-1
5,6,7,8,9,10,11,12,1
6,7,8,9,10,11,12,13,0
7,8,9,10,11,12,13,14,1
I want to visualize the sequences what is going to the LSTM and what interpretation LSTM is calculating out of the sequences that has been given as input.
Kindly, let me know how I can do that. What I need to do.
来源:https://stackoverflow.com/questions/54789287/how-to-visualize-lstm-cell-tensorflow-matplotlib