问题
I initially have an X_train size of (384, 640, 3) and a y_train size of (384,). After I reshape X_Train to (1,384,640,3) the length of X_train became 1 and I am getting the ValueError shown below. Is there a way I can still have the length of X_train be 384 without reshaping it to a 3D array?
--------------------------------------------------------------------------- ValueError Traceback (most recent call last) in 1 # Training the model ----> 2 model.fit(np.array(xDataR), np.array(y_train), batch_size= 32, epochs=10) 3 # Problem: Input arrays should have the same number of samples as target arrays. Found 1 input samples and 384 target samples. 4 # Solution: train_data.size % batch_size = 0, or in other words the size of your data arrays needs to be a multiple of the batch size.
~.conda\envs...\lib\site-packages\tensorflow_core\python\keras\engine\training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_freq, max_queue_size, workers, use_multiprocessing, **kwargs) 726 max_queue_size=max_queue_size, 727 workers=workers, --> 728 use_multiprocessing=use_multiprocessing) 729 730 def evaluate(self,
~.conda\envs...\lib\site-packages\tensorflow_core\python\keras\engine\training_v2.py in fit(self, model, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_freq, **kwargs) 222 validation_data=validation_data, 223 validation_steps=validation_steps, --> 224 distribution_strategy=strategy) 225 226 total_samples = _get_total_number_of_samples(training_data_adapter)
~.conda\envs...\lib\site-packages\tensorflow_core\python\keras\engine\training_v2.py in _process_training_inputs(model, x, y, batch_size, epochs, sample_weights, class_weights, steps_per_epoch, validation_split, validation_data, validation_steps, shuffle, distribution_strategy, max_queue_size, workers, use_multiprocessing) 545 max_queue_size=max_queue_size, 546 workers=workers, --> 547 use_multiprocessing=use_multiprocessing) 548 val_adapter = None 549 if validation_data:
~.conda\envs...\lib\site-packages\tensorflow_core\python\keras\engine\training_v2.py in _process_inputs(model, x, y, batch_size, epochs, sample_weights, class_weights, shuffle, steps, distribution_strategy, max_queue_size, workers, use_multiprocessing) 592 batch_size=batch_size, 593 check_steps=False, --> 594 steps=steps) 595 adapter = adapter_cls( 596 x,
~.conda\envs...\lib\site-packages\tensorflow_core\python\keras\engine\training.py in _standardize_user_data(self, x, y, sample_weight, class_weight, batch_size, check_steps, steps_name, steps, validation_split, shuffle, extract_tensors_from_dataset) 2532 # Check that all arrays have the same length. 2533 if not self._distribution_strategy: -> 2534 training_utils.check_array_lengths(x, y, sample_weights) 2535 if self._is_graph_network and not self.run_eagerly: 2536 # Additional checks to avoid users mistakenly using improper loss fns.
~.conda\envs...\lib\site-packages\tensorflow_core\python\keras\engine\training_utils.py in check_array_lengths(inputs, targets, weights) 675 'the same number of samples as target arrays. ' 676 'Found ' + str(list(set_x)[0]) + ' input samples ' --> 677 'and ' + str(list(set_y)[0]) + ' target samples.') 678 if len(set_w) > 1: 679 raise ValueError('All sample_weight arrays should have '
ValueError: Input arrays should have the same number of samples as target arrays. Found 1 input samples and 384 target samples.
来源:https://stackoverflow.com/questions/58927184/maintain-array-length-after-ndarray-reshape