kaggele-Unet-steel-defeat-detection

旧巷老猫 提交于 2019-11-26 10:34:17

brief

 今天算是正式第一次打kaggle比赛,记录一下学习成功。这个比赛是用于检测钢铁是否存在缺陷,给出的训练数据集中有四类的缺陷情形,当然也存在没有划痕的情形。我一个打酱油的先是研读了一份开源的kernel代码,今天对同组的同学的Unet深入的学习的一下,说一下具体思路。训练是采取的两次训练的方法,第一次先是在所有的数据集上进行训练一次,第二次再在全是缺陷的数据集上进行测试;细节方面提一下最后的Loss那里采取的是4个channel的输出,然后对每一个的channel表示一个二分类,采用交叉熵为损失函数。

代码部分

model.py

from keras.layers import Conv2D, MaxPooling2D, Input, BatchNormalization, Deconv2D, Lambda
import keras.backend as K
from keras.models import Model
from keras.optimizers import Adam
import tensorflow as tf

def downsampling_conv_block(net, filters, kernel=3):#U-net下采样部分 
    net = Conv2D(filters, kernel, padding='same', activation='relu')(net)
    net = BatchNormalization()(net)
    net = Conv2D(filters, kernel, padding='same', activation='relu')(net)
    _net = BatchNormalization()(net)
    net = MaxPooling2D()(_net)
    return net, _net#_net是为了后面的跳跃连接

# keras 搭建网络时,只可以使用keras类型的变量,若要用一些tf的操作则需考虑使用Lambda层 如下所示,其中K为调用keras的底层,常用如tf、theano、mxnet等
def upsampling_conv_block(net1, net2, filters, kernel=3):
    net1 = Deconv2D(filters, kernel, strides=2, padding='same', activation='relu')(net1)
    net = Lambda(lambda x: K.concatenate(x, -1))([net1, net2])
    net = Conv2D(filters, kernel, padding='same', activation='relu')(net)
    net = BatchNormalization()(net)
    net = Conv2D(filters, kernel, padding='same', activation='relu')(net)
    net = BatchNormalization()(net)
    return net

# 在搭建工程时,网络常常用类搭建,本程序为自娱自乐,用函数节省时间
def unet(istraining=True):
    input = Input(shape=(256, 512, 1))
    net1, _net1 = downsampling_conv_block(input, 32)
    net2, _net2 = downsampling_conv_block(net1, 64)
    net3, _net3 = downsampling_conv_block(net2, 128)
    net4, _net4 = downsampling_conv_block(net3, 256)
    net5 = Conv2D(512, 3, padding='same', activation='relu')(net4)
    net5 = BatchNormalization()(net5)
    net5 = Conv2D(256, 1, activation='relu')(net5)
    net5 = BatchNormalization()(net5)
    net5 = Conv2D(256, 3, padding='same', activation='relu')(net5)
    net5 = BatchNormalization()(net5)
    net5 = Conv2D(512, 1, activation='relu')(net5)
    net5 = BatchNormalization()(net5)
    net6 = upsampling_conv_block(net5, _net4, 256)
    net7 = upsampling_conv_block(net6, _net3, 128)
    net8 = upsampling_conv_block(net7, _net2, 64)
    net9 = upsampling_conv_block(net8, _net1, 32)
    output = Conv2D(4, 1, padding='same', activation='sigmoid')(net9)
    model = Model(inputs=input, outputs=output)
    if istraining:
        compile_model(model)
    return model

def focal_loss(gamma=2.):
    def focal_loss_fixed(y_true, y_pred):
        eps = 0.0001
        pt_1 = tf.where(tf.equal(y_true, 1), y_pred, tf.ones_like(y_pred))#预测值和真实值都是1
        pt_0 = tf.where(tf.equal(y_true, 0), y_pred, tf.zeros_like(y_pred))#预测值和真实值都是0
        return -K.mean(K.pow(1 - pt_1, gamma) * K.log(pt_1 + eps)) - K.mean(K.pow(pt_0, gamma) * K.log(1 - pt_0 + eps))
    return focal_loss_fixed


def dice_coefficient(y_pre, y_true):
    eps = 0.0001
    y_pre = tf.where(y_pre > 0.5, K.ones_like(y_pre), K.zeros_like(y_pre))
    # int_sec = K.sum(y_pre * y_true, [1, 2, 3])
    # xy_sum = K.sum(y_true, [1, 2, 3]) + K.sum(y_pre, [1, 2, 3])
    int_sec = K.sum(y_pre * y_true)
    xy_sum = K.sum(y_true) + K.sum(y_pre)
    return (2 * int_sec + eps) / (xy_sum + eps)

# loss可以使用keras.losses里自带的loss,也可以如上自己定义,但是损失函数默认两个参数必须为y_true和y_pred,若欲使用其他参数
# 可以如上focal_loss函数嵌套的方法调用,metrics也同理
def compile_model(model):
    opt = Adam(lr=0.0005)
    model.compile(opt, focal_loss(), metrics=[dice_coefficient])



data.py

from keras.layers import Conv2D, MaxPooling2D, Input, BatchNormalization, Deconv2D, Lambda
import keras.backend as K
from keras.models import Model
from keras.optimizers import Adam
import tensorflow as tf

def downsampling_conv_block(net, filters, kernel=3):#U-net下采样部分 
    net = Conv2D(filters, kernel, padding='same', activation='relu')(net)
    net = BatchNormalization()(net)
    net = Conv2D(filters, kernel, padding='same', activation='relu')(net)
    _net = BatchNormalization()(net)
    net = MaxPooling2D()(_net)
    return net, _net#_net是为了后面的跳跃连接

# keras 搭建网络时,只可以使用keras类型的变量,若要用一些tf的操作则需考虑使用Lambda层 如下所示,其中K为调用keras的底层,常用如tf、theano、mxnet等
def upsampling_conv_block(net1, net2, filters, kernel=3):
    net1 = Deconv2D(filters, kernel, strides=2, padding='same', activation='relu')(net1)
    net = Lambda(lambda x: K.concatenate(x, -1))([net1, net2])
    net = Conv2D(filters, kernel, padding='same', activation='relu')(net)
    net = BatchNormalization()(net)
    net = Conv2D(filters, kernel, padding='same', activation='relu')(net)
    net = BatchNormalization()(net)
    return net

# 在搭建工程时,网络常常用类搭建,本程序为自娱自乐,用函数节省时间
def unet(istraining=True):
    input = Input(shape=(256, 512, 1))
    net1, _net1 = downsampling_conv_block(input, 32)
    net2, _net2 = downsampling_conv_block(net1, 64)
    net3, _net3 = downsampling_conv_block(net2, 128)
    net4, _net4 = downsampling_conv_block(net3, 256)
    net5 = Conv2D(512, 3, padding='same', activation='relu')(net4)
    net5 = BatchNormalization()(net5)
    net5 = Conv2D(256, 1, activation='relu')(net5)
    net5 = BatchNormalization()(net5)
    net5 = Conv2D(256, 3, padding='same', activation='relu')(net5)
    net5 = BatchNormalization()(net5)
    net5 = Conv2D(512, 1, activation='relu')(net5)
    net5 = BatchNormalization()(net5)
    net6 = upsampling_conv_block(net5, _net4, 256)
    net7 = upsampling_conv_block(net6, _net3, 128)
    net8 = upsampling_conv_block(net7, _net2, 64)
    net9 = upsampling_conv_block(net8, _net1, 32)
    output = Conv2D(4, 1, padding='same', activation='sigmoid')(net9)
    model = Model(inputs=input, outputs=output)
    if istraining:
        compile_model(model)
    return model

def focal_loss(gamma=2.):
    def focal_loss_fixed(y_true, y_pred):
        eps = 0.0001
        pt_1 = tf.where(tf.equal(y_true, 1), y_pred, tf.ones_like(y_pred))#预测值和真实值都是1
        pt_0 = tf.where(tf.equal(y_true, 0), y_pred, tf.zeros_like(y_pred))#预测值和真实值都是0
        return -K.mean(K.pow(1 - pt_1, gamma) * K.log(pt_1 + eps)) - K.mean(K.pow(pt_0, gamma) * K.log(1 - pt_0 + eps))
    return focal_loss_fixed


def dice_coefficient(y_pre, y_true):
    eps = 0.0001
    y_pre = tf.where(y_pre > 0.5, K.ones_like(y_pre), K.zeros_like(y_pre))
    # int_sec = K.sum(y_pre * y_true, [1, 2, 3])
    # xy_sum = K.sum(y_true, [1, 2, 3]) + K.sum(y_pre, [1, 2, 3])
    int_sec = K.sum(y_pre * y_true)
    xy_sum = K.sum(y_true) + K.sum(y_pre)
    return (2 * int_sec + eps) / (xy_sum + eps)

# loss可以使用keras.losses里自带的loss,也可以如上自己定义,但是损失函数默认两个参数必须为y_true和y_pred,若欲使用其他参数
# 可以如上focal_loss函数嵌套的方法调用,metrics也同理
def compile_model(model):
    opt = Adam(lr=0.0005)
    model.compile(opt, focal_loss(), metrics=[dice_coefficient])



train.py

from model import *
from data import *
from sklearn.model_selection import train_test_split
from keras.callbacks import ModelCheckpoint

epochs = 30
batchsize = 16
is_mining_train = True

path = '../severstal-steel-defect-detection/train.csv'
data, label = read_data(path)
if is_mining_train:
    #data, _, label, _ = split_nan(data, label)#这里只取了有缺陷的做训练
    data,label=split_all(data,label)#取所有的数据进行第一次的测试
#train_test_split函数用于将矩阵随机划分为训练子集和测试子集,并返回划分好的训练集测试集样本和训练集测试集标签。
td, vd, tl, vl = train_test_split(data, label, test_size=0.2)#td,vd用于训练,20%的tl和vl用于测试
tg = generator(td, tl, batchsize)
vg = generator(vd, vl, batchsize)
net = unet()

#net.load_weights('./base_models/ver1.hdf5')

save_path = './models/{epoch:02d}-{val_loss:.2f}.hdf5'#参数什么意思?如何保存的name为ver2
ckpt = ModelCheckpoint(save_path)
# 网络训练常用fit或fit_genertaor,两者区别在于前者必须将数据全部读入内存,这在实际应用中很少能做到,所以常用后者,后者需要传入一个generator
# generator函数参照python的yield用法,另外,若不方便使用划分epoch,则可以用for循环和train_on_batch训练
# callbacks回调个人常用ModelCheckpoint和EarlyStopping或继承CallBack实现一些自己的功能
net.fit_generator(tg, len(td) // batchsize, epochs, callbacks=[ckpt], validation_data=vg,
                  validation_steps=len(vd) // batchsize)

我这里是只采用了第一次的训练的train代码。
作为新手,我还是有几个问题需要问:

  • save_path = ‘./models/{epoch:02d}-{val_loss:.2f}.hdf5’#参数什么意思?如何保存的name为ver2
  • metrics函数就是度量函数吗?

最后,今天七夕,大家玩的快乐!

易学教程内所有资源均来自网络或用户发布的内容,如有违反法律规定的内容欢迎反馈
该文章没有解决你所遇到的问题?点击提问,说说你的问题,让更多的人一起探讨吧!