import tensorflow as tf import numpy as np isTrain = tf.placeholder(tf.bool) user_input = tf.placeholder(tf.float32) # ema = tf.train.ExponentialMovingAverage(decay=.5) with tf.device('/cpu:0'): beta = tf.Variable(tf.ones([1])) batch_mean = beta.assign(user_input) ema = tf.train.ExponentialMovingAverage(decay=0.5) ema_apply_op = ema.apply([batch_mean]) ema_mean = ema.average(batch_mean) def mean_var_with_update(): with tf.control_dependencies([ema_apply_op]): return tf.identity(batch_mean) mean = tf.cond(isTrain, mean_var_with_update, lambda: (ema_mean)) # ======= End Here ========== saver = tf.train.Saver() init = tf.initialize_all_variables() sess = tf.Session() sess.run(init) u_input = [[2], [3], [4] ] for u in u_input: aa = sess.run([mean], feed_dict={user_input:u, isTrain: True }) print("Train", aa) for u in u_input: aa = sess.run([ema_mean], feed_dict={user_input:u, isTrain: False }) print("Test correct", aa) for u in u_input: aa = sess.run([mean], feed_dict={user_input:u, isTrain: False }) print("Test", aa)
This code snippet should calculate the mean of user_input across training stage and output mean during testing stage.
This is the output result :
('Train', [array([ 2.], dtype=float32)]) ('Train', [array([ 3.], dtype=float32)]) ('Train', [array([ 4.], dtype=float32)]) ('Test correct', [array([ 3.], dtype=float32)]) ('Test correct', [array([ 3.], dtype=float32)]) ('Test correct', [array([ 3.], dtype=float32)]) ('Test', [array([ 2.5], dtype=float32)]) ('Test', [array([ 2.75], dtype=float32)]) ('Test', [array([ 3.375], dtype=float32)])
However, ema_mean
always get evaluated when calling sess.run([mean])
even if isTrain = False
.
Is there any mistake in the code ? tensorflow version is 0.7.1