Can not squeeze dim[1], expected a dimension of 1, got 5

。_饼干妹妹 提交于 2021-02-11 09:41:29

问题


I tried different solutions but still facing the issue. Actually I am new in Ml/DL (python). In which case we face this error "Can not squeeze dim1, expected a dimension of 1, got 5"? Please help me here, what I am doing wrong here and what is correct

Here is InvalidArgumentError Traceback (most recent call last)

---------------------------------------------------------------------------

<ipython-input-9-0826122252c2> in <module>()
     98 model.summary()
     99 model.compile(loss='sparse_categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
--> 100 history=model.fit(trainX, trainY, batch_size=32, epochs=10, verbose=1)

4 frames
/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/client/session.py in __call__(self, *args, **kwargs)
   1470         ret = tf_session.TF_SessionRunCallable(self._session._session,
   1471                                                self._handle, args,
-> 1472                                                run_metadata_ptr)
   1473         if run_metadata:
   1474           proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)

InvalidArgumentError: Can not squeeze dim[1], expected a dimension of 1, got 5
     [[{{node metrics_4/acc/Squeeze}}]]

Here is my code

# create a mapping of tags to integers given the loaded mapping file
def create_tag_mapping(mapping_csv):
    # create a set of all known tags
    labels = set()
    for i in range(len(mapping_csv)):
        # convert space separated tags into an array of tags
        tags = mapping_csv['Labels'][i].split(' ')
        # add tags to the set of known labels
        labels.update(tags)
    # convert set of labels to a list to list
    labels = list(labels)
    # order set alphabetically
    labels.sort()
    # dict that maps labels to integers, and the reverse
    labels_map = {labels[i]:i for i in range(len(labels))}
    inv_labels_map = {i:labels[i] for i in range(len(labels))}
    return labels_map, inv_labels_map

# create a mapping of filename to tags
def create_file_mapping(mapping_csv):
    mapping = dict()
    for i in range(len(mapping_csv)):
        name, tags = mapping_csv['Id'][i], mapping_csv['Labels'][i]
        mapping[name] = tags.split(' ')
    return mapping

# create encoding for one list of tags
def custom_encode(tags, mapping):
    # create an empty vector
    encoding=[]
    for tag in tags:
        if tag == 'L':
            encoding.append(1)
        elif tag == 'M':
            encoding.append(2)
        else:
            encoding.append(3)
    return encoding

def load_dataset(path, file_mapping, tag_mapping):
    photos, targets = list(), list()
    # enumerate files in the directory
    for filename in os.listdir(path):
        # load image
        photo = load_img(path + filename, target_size=(760,415))
        # convert to numpy array
        photo = img_to_array(photo, dtype='uint8')
        # get tags
        tags = file_mapping[filename[:-4]]
        # one hot encode tags
        target = custom_encode(tags, tag_mapping)
        # one hot encode tags
        #print(target)
        # store
        photos.append(photo)
        targets.append(target)
    X = np.asarray(photos, dtype='uint8')
    y = np.asarray(targets, dtype='uint8')
    return X, y

# define location of dataset
trainingLabels = csvPath
# load the mapping file
mapping_csv = pd.read_csv(trainingLabels)
# create a mapping of tags to integers
tag_mapping, _ = create_tag_mapping(mapping_csv)
# create a mapping of filenames to tag lists
file_mapping = create_file_mapping(mapping_csv)

# load the png images
folder = '/dataset/'

X, y = load_dataset(folder, file_mapping, tag_mapping)
print(X.shape, y.shape)

trainX, testX, trainY, testY = train_test_split(X, y, test_size=0.3, random_state=1)
print(trainX.shape, trainY.shape, testX.shape, testY.shape)

img_x,img_y=760,415

model = Sequential()
model.add(Conv2D(32, (5, 5), strides=(1,1), activation='relu', input_shape=(img_x, img_y,3)))
model.add(MaxPooling2D((2, 2), strides=(2,2)))
model.add(Flatten())
model.add(Dense(128, activation='relu', kernel_initializer='he_uniform'))
model.add(Dense(15))
model.summary()
model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
history=model.fit(trainX, trainY, batch_size=32, epochs=10, verbose=1)

回答1:


Facing this error because I have a total of 5 attributes and each has 3 tags according to the above scenario, We will not face the above error If we encode labels in such a way

def custom_encode(tags, mapping):
    # create empty vector
    encoding=[]
    for tag in tags:
        if tag == 'L':
            encoding.append([1,0,0])
        elif tag == 'M':
            encoding.append([0,1,0])
        else:
            encoding.append([0,0,1])
    return encoding

and create the final layer like this

model.add(Dense(15)) #because we have total 5 labels and each has 3 tags so 15 neurons will be on final layer
model.add(Reshape((5,3))) # each 5 have further 3 tags we need to reshape it
model.add(Activation('softmax'))

model.compile(optimizer=opt, loss='categorical_crossentropy',
                  metrics=['accuracy'])


来源:https://stackoverflow.com/questions/58843073/can-not-squeeze-dim1-expected-a-dimension-of-1-got-5

易学教程内所有资源均来自网络或用户发布的内容,如有违反法律规定的内容欢迎反馈
该文章没有解决你所遇到的问题?点击提问,说说你的问题,让更多的人一起探讨吧!