I just got a new GTX 1070 Founders Addition for my desktop, and I am trying to run tensorflow on this new GPU. I am using tensorflow.device() to run tensorflow on my GPU, bu
Try compare time (GPU vs CPU) with this simple example:
import tensorflow as tf
mnist = tf.keras.datasets.mnist
(x_train, y_train),(x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
def create_model():
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
return model
epoch = 3
print('GPU:')
with tf.device('/gpu:0'):
model = create_model()
model.fit(x_train, y_train, epochs=epoch)
print('\nCPU:')
with tf.device('/cpu:0'):
model = create_model()
model.fit(x_train, y_train, epochs=epoch)