InvalidArgumentError: Input to reshape is a tensor with 27000 values, but the requested shape has 810000 [Op:Reshape]

半城伤御伤魂 提交于 2021-02-05 10:42:07

问题


Getting the following error message when setting up a 3D-GAN for ModelNet10:

InvalidArgumentError: Input to reshape is a tensor with 27000 values, but the requested shape has 810000 [Op:Reshape]

In my opinion the batch is not properly created and thereby the shape of the tensor is not valid. Tried different things but can´t get the batch set up.. I am more than thankful for any hints how to clean up my code! Thanks in advance!

import time

import numpy as np
import tensorflow as tf
np.random.seed(1)

from tensorflow.keras import layers
from IPython import display

# Load the data
modelnet_path = '/modelnet10.npz'
data = np.load(modelnet_path)
X, Y = data['X_train'], data['y_train']
X_test, Y_test = data['X_test'], data['y_test']
X = X.reshape(X.shape[0], 30, 30, 30, 1).astype('float32')

#Hyperparameters
BUFFER_SIZE = 3991
BATCH_SIZE = 30
LEARNING_RATE = 4e-4
BETA_1 = 5e-1
EPOCHS = 100

#Random seed for image generation
n_examples = 16
noise_dim = 100
seed = tf.random.normal([n_examples, noise_dim])

train_dataset = tf.data.Dataset.from_tensor_slices(X).batch(BATCH_SIZE)

# Build the network
def make_discriminator_model():    
    model = tf.keras.Sequential()
    model.add(layers.Reshape((30, 30, 30, 1), input_shape=(30, 30, 30)))  
    model.add(layers.Conv3D(16, 6, strides=2, activation='relu'))
    model.add(layers.Conv3D(64, 5, strides=2, activation='relu'))
    model.add(layers.Conv3D(64, 5, strides=2, activation='relu'))
    model.add(layers.Flatten())
    model.add(layers.Dense(10))

    return model

discriminator = make_discriminator_model()

def make_generator_model():   
    model = tf.keras.Sequential()
    model.add(layers.Dense(15*15*15*128, use_bias=False,input_shape=(100,)))
    model.add(layers.BatchNormalization())
    model.add(layers.ReLU())
    model.add(layers.Reshape((15,15,15,128)))    
    model.add(layers.Conv3DTranspose(64, (5,5,5), strides=(1,1,1), padding='valid', use_bias=False))
    model.add(layers.BatchNormalization())
    model.add(layers.ReLU())  
    model.add(layers.Conv3DTranspose(32, (5,5,5), strides=(2,2,2), padding='valid', use_bias=False, activation='tanh'))

    return model

generator = make_generator_model()

#Optimizer & Loss function
cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)

def discriminator_loss(real_output, fake_output):
    real_loss = cross_entropy(tf.ones_like(real_output), real_output)
    fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output)
    total_loss = real_loss + fake_loss

    return total_loss

def generator_loss(fake_output):
    return cross_entropy(tf.ones_like(fake_output), fake_output)

optimizer = tf.keras.optimizers.Adam(lr=LEARNING_RATE, beta_1=BETA_1)

#Training
def train_step(shapes):
    noise = tf.random.normal([BATCH_SIZE, noise_dim])

    with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
        generated_shapes = generator(noise, training=True)

        real_output = discriminator(shapes, training=True)
        fake_output = discriminator(generated_shapes, training=True)
        gen_loss = generator_loss(fake_output)
        disc_loss = discriminator_loss(real_output, fake_output)

    gen_gradients = gen_tape.gradient(gen_loss, generator.trainable_variables)
    disc_gradients = disc_tape.gradient(disc_loss, discriminator.trainable_variables)

    optimizer.apply_gradients(zip(gen_gradients, generator.trainable_variables))
    optimizer.apply_gradients(zip(disc_gradients, discriminator.trainable_variables))

def train(dataset, epochs):
    for epoch in range(epochs):
        start = time.time()

        for shape_batch in dataset:
            train_step(shape_batch)

        display.clear_output(wait=True)
        print ('Time for epoch {} is {} sec'.format(epoch + 1, time.time()-start))

    display.clear_output(wait=True)      

train(X_test, EPOCHS)

回答1:


X_test is just a list, so in your training loop, only one sample (30*30*30=27000) feed into the model but the model itself asking for 30(batchsize) * 30 * 30 *30=810000.

modelnet_path = '/modelnet10.npz'
data = np.load(modelnet_path)
X, Y = data['X_train'], data['y_train']
X_test, Y_test = data['X_test'], data['y_test']
X = X.reshape(X.shape[0], 30, 30, 30, 1).astype('float32')

...
train_dataset = tf.data.Dataset.from_tensor_slices(X).batch(BATCH_SIZE)
...
def train(dataset, epochs):
    for epoch in range(epochs):
        start = time.time()

        for shape_batch in dataset:
            train_step(shape_batch)

        display.clear_output(wait=True)
        print ('Time for epoch {} is {} sec'.format(epoch + 1, time.time()-start))

    display.clear_output(wait=True)      

train(X_test, EPOCHS)

Consider to train with the train_dataset you created or generate X_test as tf.dataset.

train(train_dataset , EPOCHS)


来源:https://stackoverflow.com/questions/58450109/invalidargumenterror-input-to-reshape-is-a-tensor-with-27000-values-but-the-re

易学教程内所有资源均来自网络或用户发布的内容,如有违反法律规定的内容欢迎反馈
该文章没有解决你所遇到的问题?点击提问,说说你的问题,让更多的人一起探讨吧!