问题
I have a custom image-set where I am trying to localize 4 features in that image. Those values are x,y coordinates. I've ran some basic CNN's and those run fine. My goal now is to convert to MobileNet.
I had trouble using Keras's built-in MobileNet & code... so I mimicked the structure with the appropriate layers. It seems that the base model is geared towards classification, where mine is really just trying to locate the 8 x,y coordinates. I've done my best to fit the differing output layers, but my loss so far is pretty bad with a high learning rate.
Where am I going wrong? I feel as though i am missing something by not being able to use the internal MN model, as MobileNet is advertised to accommodate object detection as well as classificaiton.
My implementation (the tricky bit seems to be the end):
model = Sequential()
model.add(Conv2D(32, (3, 3), strides=(2, 2), padding='same',
use_bias=False, input_shape=(224, 224, 3)))
model.add(BatchNormalization())
model.add(Activation('relu'))
# block 1 ###########
model.add(ZeroPadding2D(padding=(1, 1)))
model.add(DepthwiseConv2D((3, 3), padding='valid',
depth_multiplier=1, strides=(1, 1), use_bias=False))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(64, (1, 1), strides=(1, 1), padding='same', use_bias=False))
model.add(BatchNormalization())
model.add(Activation('relu'))
###########
# block 2 ###########
model.add(ZeroPadding2D(padding=(1, 1)))
model.add(DepthwiseConv2D((3, 3), padding='valid',
depth_multiplier=1, strides=(2, 2), use_bias=False))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(128, (1, 1), strides=(1, 1), padding='same', use_bias=False))
model.add(BatchNormalization())
model.add(Activation('relu'))
###########
# block 3 ###########
model.add(ZeroPadding2D(padding=(1, 1)))
model.add(DepthwiseConv2D((3, 3), padding='valid',
depth_multiplier=1, strides=(1, 1), use_bias=False))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(128, (1, 1), strides=(1, 1), padding='same', use_bias=False))
model.add(BatchNormalization())
model.add(Activation('relu'))
###########
# block 4 ###########
model.add(ZeroPadding2D(padding=(1, 1)))
model.add(DepthwiseConv2D((3, 3), padding='valid',
depth_multiplier=1, strides=(2, 2), use_bias=False))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(256, (1, 1), strides=(1, 1), padding='same', use_bias=False))
model.add(BatchNormalization())
model.add(Activation('relu'))
###########
# block 5 ###########
model.add(ZeroPadding2D(padding=(1, 1)))
model.add(DepthwiseConv2D((3, 3), padding='valid',
depth_multiplier=1, strides=(1, 1), use_bias=False))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(256, (1, 1), strides=(1, 1), padding='same', use_bias=False))
model.add(BatchNormalization())
model.add(Activation('relu'))
###########
# block 6 ###########
model.add(ZeroPadding2D(padding=(1, 1)))
model.add(DepthwiseConv2D((3, 3), padding='valid',
depth_multiplier=1, strides=(2, 2), use_bias=False))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(512, (1, 1), strides=(1, 1), padding='same', use_bias=False))
model.add(BatchNormalization())
model.add(Activation('relu'))
###########
# block 7 ###########
model.add(ZeroPadding2D(padding=(1, 1)))
model.add(DepthwiseConv2D((3, 3), padding='valid',
depth_multiplier=1, strides=(1, 1), use_bias=False))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(512, (1, 1), strides=(1, 1), padding='same', use_bias=False))
model.add(BatchNormalization())
model.add(Activation('relu'))
###########
# block 8 ###########
model.add(ZeroPadding2D(padding=(1, 1)))
model.add(DepthwiseConv2D((3, 3), padding='valid',
depth_multiplier=1, strides=(1, 1), use_bias=False))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(512, (1, 1), strides=(1, 1), padding='same', use_bias=False))
model.add(BatchNormalization())
model.add(Activation('relu'))
###########
# block 9 ###########
model.add(ZeroPadding2D(padding=(1, 1)))
model.add(DepthwiseConv2D((3, 3), padding='valid',
depth_multiplier=1, strides=(1, 1), use_bias=False))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(512, (1, 1), strides=(1, 1), padding='same', use_bias=False))
model.add(BatchNormalization())
model.add(Activation('relu'))
###########
# block 10 ###########
model.add(ZeroPadding2D(padding=(1, 1)))
model.add(DepthwiseConv2D((3, 3), padding='valid',
depth_multiplier=1, strides=(1, 1), use_bias=False))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(512, (1, 1), strides=(1, 1), padding='same', use_bias=False))
model.add(BatchNormalization())
model.add(Activation('relu'))
###########
# block 11 ###########
model.add(ZeroPadding2D(padding=(1, 1)))
model.add(DepthwiseConv2D((3, 3), padding='valid',
depth_multiplier=1, strides=(1, 1), use_bias=False))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(512, (1, 1), strides=(1, 1), padding='same', use_bias=False))
model.add(BatchNormalization())
model.add(Activation('relu'))
###########
# block 12 ###########
model.add(ZeroPadding2D(padding=(1, 1)))
model.add(DepthwiseConv2D((3, 3), padding='valid',
depth_multiplier=1, strides=(2, 2), use_bias=False))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(1024, (1, 1), strides=(1, 1), padding='same', use_bias=False))
model.add(BatchNormalization())
model.add(Activation('relu'))
###########
# block 13 ###########
model.add(ZeroPadding2D(padding=(1, 1)))
model.add(DepthwiseConv2D((3, 3), padding='valid',
depth_multiplier=1, strides=(1, 1), use_bias=False))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(1024, (1, 1), strides=(1, 1), padding='same', use_bias=False))
model.add(BatchNormalization())
model.add(Activation('relu'))
###########
# final -- tried shaping for my output
model.add(GlobalAveragePooling2D())
model.add(Reshape((1, 1, 1024)))
model.add(Dropout(1e-3))
model.add(Conv2D(8, (1, 1), padding='same'))
model.add(Activation('softmax'))
model.add(Reshape((8,)))
# output layers that give me a better result for 8 keypoints
# model.add(Flatten())
# model.add(Dense(2000))
# model.add(Activation('relu'))
# model.add(Dropout(0.5)) # !
# model.add(Dense(2000))
# model.add(Activation('relu'))
# model.add(Dense(8))
来源:https://stackoverflow.com/questions/49473281/keras-mobilenet-to-localize-image-features