yolov3 权重转换

匿名 (未验证) 提交于 2019-12-02 23:43:01

 import os  import torch.nn.functional as F  from utils.parse_config import * from utils.utils import *  ONNX_EXPORT = False   def create_modules(module_defs):     """     Constructs module list of layer blocks from module configuration in module_defs     """     hyperparams = module_defs.pop(0)     output_filters = [int(hyperparams['channels'])]     module_list = nn.ModuleList()     yolo_layer_count = 0     for i, module_def in enumerate(module_defs):         modules = nn.Sequential()          if module_def['type'] == 'convolutional':             bn = int(module_def['batch_normalize'])             filters = int(module_def['filters'])             kernel_size = int(module_def['size'])             pad = (kernel_size - 1) // 2 if int(module_def['pad']) else 0             modules.add_module('conv_%d' % i, nn.Conv2d(in_channels=output_filters[-1],                                                         out_channels=filters,                                                         kernel_size=kernel_size,                                                         stride=int(module_def['stride']),                                                         padding=pad,                                                         bias=not bn))             if bn:                 modules.add_module('batch_norm_%d' % i, nn.BatchNorm2d(filters))             if module_def['activation'] == 'leaky':                 modules.add_module('leaky_%d' % i, nn.LeakyReLU(0.1, inplace=True))          elif module_def['type'] == 'maxpool':             kernel_size = int(module_def['size'])             stride = int(module_def['stride'])             if kernel_size == 2 and stride == 1:                 modules.add_module('_debug_padding_%d' % i, nn.ZeroPad2d((0, 1, 0, 1)))             maxpool = nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=int((kernel_size - 1) // 2))             modules.add_module('maxpool_%d' % i, maxpool)          elif module_def['type'] == 'upsample':             # upsample = nn.Upsample(scale_factor=int(module_def['stride']), mode='nearest')  # WARNING: deprecated             upsample = Upsample(scale_factor=int(module_def['stride']))             modules.add_module('upsample_%d' % i, upsample)          elif module_def['type'] == 'route':             layers = [int(x) for x in module_def['layers'].split(',')]             filters = sum([output_filters[i + 1 if i > 0 else i] for i in layers])             modules.add_module('route_%d' % i, EmptyLayer())          elif module_def['type'] == 'shortcut':             filters = output_filters[int(module_def['from'])]             modules.add_module('shortcut_%d' % i, EmptyLayer())          elif module_def['type'] == 'yolo':             anchor_idxs = [int(x) for x in module_def['mask'].split(',')]             # Extract anchors             anchors = [float(x) for x in module_def['anchors'].split(',')]             anchors = [(anchors[i], anchors[i + 1]) for i in range(0, len(anchors), 2)]             anchors = [anchors[i] for i in anchor_idxs]             nc = int(module_def['classes'])  # number of classes             img_size = hyperparams['height']             # Define detection layer             yolo_layer = YOLOLayer(anchors, nc, img_size, yolo_layer_count, cfg=hyperparams['cfg'])             modules.add_module('yolo_%d' % i, yolo_layer)             yolo_layer_count += 1          # Register module list and number of output filters         module_list.append(modules)         output_filters.append(filters)      return hyperparams, module_list   class EmptyLayer(nn.Module):     """Placeholder for 'route' and 'shortcut' layers"""      def __init__(self):         super(EmptyLayer, self).__init__()      def forward(self, x):         return x   class Upsample(nn.Module):     # Custom Upsample layer (nn.Upsample gives deprecated warning message)      def __init__(self, scale_factor=1, mode='nearest'):         super(Upsample, self).__init__()         self.scale_factor = scale_factor         self.mode = mode      def forward(self, x):         return F.interpolate(x, scale_factor=self.scale_factor, mode=self.mode)   class YOLOLayer(nn.Module):     def __init__(self, anchors, nc, img_size, yolo_layer, cfg):         super(YOLOLayer, self).__init__()          self.anchors = torch.Tensor(anchors)         self.na = len(anchors)  # number of anchors (3)         self.nc = nc  # number of classes (80)         self.nx = 0  # initialize number of x gridpoints         self.ny = 0  # initialize number of y gridpoints          if ONNX_EXPORT:  # grids must be computed in __init__             stride = [32, 16, 8][yolo_layer]  # stride of this layer             nx = int(img_size[1] / stride)  # number x grid points             ny = int(img_size[0] / stride)  # number y grid points             create_grids(self, max(img_size), (nx, ny))      def forward(self, p, img_size, var=None):         if ONNX_EXPORT:             bs = 1  # batch size         else:             bs, ny, nx = p.shape[0], p.shape[-2], p.shape[-1]             if (self.nx, self.ny) != (nx, ny):                 create_grids(self, img_size, (nx, ny), p.device)          # p.view(bs, 255, 13, 13) -- > (bs, 3, 13, 13, 85)  # (bs, anchors, grid, grid, classes + xywh)         p = p.view(bs, self.na, self.nc + 5, self.ny, self.nx).permute(0, 1, 3, 4, 2).contiguous()  # prediction          if self.training:             return p          elif ONNX_EXPORT:             # Constants CAN NOT BE BROADCAST, ensure correct shape!             ngu = self.ng.repeat((1, self.na * self.nx * self.ny, 1))             grid_xy = self.grid_xy.repeat((1, self.na, 1, 1, 1)).view((1, -1, 2))             anchor_wh = self.anchor_wh.repeat((1, 1, self.nx, self.ny, 1)).view((1, -1, 2)) / ngu              # p = p.view(-1, 5 + self.nc)             # xy = torch.sigmoid(p[..., 0:2]) + grid_xy[0]  # x, y             # wh = torch.exp(p[..., 2:4]) * anchor_wh[0]  # width, height             # p_conf = torch.sigmoid(p[:, 4:5])  # Conf             # p_cls = F.softmax(p[:, 5:85], 1) * p_conf  # SSD-like conf             # return torch.cat((xy / ngu[0], wh, p_conf, p_cls), 1).t()              p = p.view(1, -1, 5 + self.nc)             xy = torch.sigmoid(p[..., 0:2]) + grid_xy  # x, y             wh = torch.exp(p[..., 2:4]) * anchor_wh  # width, height             p_conf = torch.sigmoid(p[..., 4:5])  # Conf             p_cls = p[..., 5:5 + self.nc]             # Broadcasting only supported on first dimension in CoreML. See onnx-coreml/_operators.py             # p_cls = F.softmax(p_cls, 2) * p_conf  # SSD-like conf             p_cls = torch.exp(p_cls).permute((2, 1, 0))             p_cls = p_cls / p_cls.sum(0).unsqueeze(0) * p_conf.permute((2, 1, 0))  # F.softmax() equivalent             p_cls = p_cls.permute(2, 1, 0)             return torch.cat((xy / ngu, wh, p_conf, p_cls), 2).squeeze().t()          else:  # inference             io = p.clone()  # inference output             io[..., 0:2] = torch.sigmoid(io[..., 0:2]) + self.grid_xy  # xy             io[..., 2:4] = torch.exp(io[..., 2:4]) * self.anchor_wh  # wh yolo method             # io[..., 2:4] = ((torch.sigmoid(io[..., 2:4]) * 2) ** 3) * self.anchor_wh  # wh power method             io[..., 4:] = torch.sigmoid(io[..., 4:])  # p_conf, p_cls             # io[..., 5:] = F.softmax(io[..., 5:], dim=4)  # p_cls             io[..., :4] *= self.stride             if self.nc == 1:                 io[..., 5] = 1  # single-class model https://github.com/ultralytics/yolov3/issues/235              # reshape from [1, 3, 13, 13, 85] to [1, 507, 85]             return io.view(bs, -1, 5 + self.nc), p   class Darknet(nn.Module):     """YOLOv3 object detection model"""      def __init__(self, cfg, img_size=(416, 416)):         super(Darknet, self).__init__()          self.module_defs = parse_model_cfg(cfg)         self.module_defs[0]['cfg'] = cfg         self.module_defs[0]['height'] = img_size         self.hyperparams, self.module_list = create_modules(self.module_defs)         self.yolo_layers = get_yolo_layers(self)          # Darknet Header https://github.com/AlexeyAB/darknet/issues/2914#issuecomment-496675346         self.version = np.array([0, 2, 5], dtype=np.int32)  # (int32) version info: major, minor, revision         self.seen = np.array([0], dtype=np.int64)  # (int64) number of images seen during training      def forward(self, x, var=None):         img_size = max(x.shape[-2:])         layer_outputs = []         output = []          for i, (module_def, module) in enumerate(zip(self.module_defs, self.module_list)):             mtype = module_def['type']             if mtype in ['convolutional', 'upsample', 'maxpool']:                 x = module(x)             elif mtype == 'route':                 layer_i = [int(x) for x in module_def['layers'].split(',')]                 if len(layer_i) == 1:                     x = layer_outputs[layer_i[0]]                 else:                     x = torch.cat([layer_outputs[i] for i in layer_i], 1)             elif mtype == 'shortcut':                 layer_i = int(module_def['from'])                 x = layer_outputs[-1] + layer_outputs[layer_i]             elif mtype == 'yolo':                 x = module[0](x, img_size)                 output.append(x)             layer_outputs.append(x)          if self.training:             return output         elif ONNX_EXPORT:             output = torch.cat(output, 1)  # cat 3 layers 85 x (507, 2028, 8112) to 85 x 10647             nc = self.module_list[self.yolo_layers[0]][0].nc  # number of classes             return output[5:5 + nc].t(), output[:4].t()  # ONNX scores, boxes         else:             io, p = list(zip(*output))  # inference output, training output             return torch.cat(io, 1), p      def fuse(self):         # Fuse Conv2d + BatchNorm2d layers throughout model         fused_list = nn.ModuleList()         for a in list(self.children())[0]:             for i, b in enumerate(a):                 if isinstance(b, nn.modules.batchnorm.BatchNorm2d):                     # fuse this bn layer with the previous conv2d layer                     conv = a[i - 1]                     fused = torch_utils.fuse_conv_and_bn(conv, b)                     a = nn.Sequential(fused, *list(a.children())[i + 1:])                     break             fused_list.append(a)         self.module_list = fused_list         # model_info(self)  # yolov3-spp reduced from 225 to 152 layers   def get_yolo_layers(model):     a = [module_def['type'] == 'yolo' for module_def in model.module_defs]     return [i for i, x in enumerate(a) if x]  # [82, 94, 106] for yolov3   def create_grids(self, img_size=416, ng=(13, 13), device='cpu'):     nx, ny = ng  # x and y grid size     self.img_size = img_size     self.stride = img_size / max(ng)      # build xy offsets     yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)])     self.grid_xy = torch.stack((xv, yv), 2).to(device).float().view((1, 1, ny, nx, 2))      # build wh gains     self.anchor_vec = self.anchors.to(device) / self.stride     self.anchor_wh = self.anchor_vec.view(1, self.na, 1, 1, 2).to(device)     self.ng = torch.Tensor(ng).to(device)     self.nx = nx     self.ny = ny   def load_darknet_weights(self, weights, cutoff=-1):     # Parses and loads the weights stored in 'weights'     # cutoff: save layers between 0 and cutoff (if cutoff = -1 all are saved)     weights_file = weights.split(os.sep)[-1]      # Try to download weights if not available locally     if not os.path.isfile(weights):         try:             os.system('wget https://pjreddie.com/media/files/' + weights_file + ' -O ' + weights)         except IOError:             print(weights + ' not found.\nTry https://drive.google.com/drive/folders/1uxgUBemJVw9wZsdpboYbzUN4bcRhsuAI')      # Establish cutoffs     if weights_file == 'darknet53.conv.74':         cutoff = 75     elif weights_file == 'yolov3-tiny.conv.15':         cutoff = 15      # Read weights file     with open(weights, 'rb') as f:         # Read Header https://github.com/AlexeyAB/darknet/issues/2914#issuecomment-496675346         self.version = np.fromfile(f, dtype=np.int32, count=3)  # (int32) version info: major, minor, revision         self.seen = np.fromfile(f, dtype=np.int64, count=1)  # (int64) number of images seen during training          weights = np.fromfile(f, dtype=np.float32)  # The rest are weights      ptr = 0     for i, (module_def, module) in enumerate(zip(self.module_defs[:cutoff], self.module_list[:cutoff])):         if module_def['type'] == 'convolutional':             conv_layer = module[0]             if module_def['batch_normalize']:                 # Load BN bias, weights, running mean and running variance                 bn_layer = module[1]                 num_b = bn_layer.bias.numel()  # Number of biases                 # Bias                 bn_b = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(bn_layer.bias)                 bn_layer.bias.data.copy_(bn_b)                 ptr += num_b                 # Weight                 bn_w = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(bn_layer.weight)                 bn_layer.weight.data.copy_(bn_w)                 ptr += num_b                 # Running Mean                 bn_rm = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(bn_layer.running_mean)                 bn_layer.running_mean.data.copy_(bn_rm)                 ptr += num_b                 # Running Var                 bn_rv = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(bn_layer.running_var)                 bn_layer.running_var.data.copy_(bn_rv)                 ptr += num_b             else:                 # Load conv. bias                 num_b = conv_layer.bias.numel()                 conv_b = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(conv_layer.bias)                 conv_layer.bias.data.copy_(conv_b)                 ptr += num_b             # Load conv. weights             num_w = conv_layer.weight.numel()             conv_w = torch.from_numpy(weights[ptr:ptr + num_w]).view_as(conv_layer.weight)             conv_layer.weight.data.copy_(conv_w)             ptr += num_w      return cutoff   def save_weights(self, path='model.weights', cutoff=-1):     # Converts a PyTorch model to Darket format (*.pt to *.weights)     # Note: Does not work if model.fuse() is applied     with open(path, 'wb') as f:         # Write Header https://github.com/AlexeyAB/darknet/issues/2914#issuecomment-496675346         self.version.tofile(f)  # (int32) version info: major, minor, revision         self.seen.tofile(f)  # (int64) number of images seen during training          # Iterate through layers         for i, (module_def, module) in enumerate(zip(self.module_defs[:cutoff], self.module_list[:cutoff])):             if module_def['type'] == 'convolutional':                 conv_layer = module[0]                 # If batch norm, load bn first                 if module_def['batch_normalize']:                     bn_layer = module[1]                     bn_layer.bias.data.cpu().numpy().tofile(f)                     bn_layer.weight.data.cpu().numpy().tofile(f)                     bn_layer.running_mean.data.cpu().numpy().tofile(f)                     bn_layer.running_var.data.cpu().numpy().tofile(f)                 # Load conv bias                 else:                     conv_layer.bias.data.cpu().numpy().tofile(f)                 # Load conv weights                 conv_layer.weight.data.cpu().numpy().tofile(f)   def convert(cfg='cfg/yolov3-spp.cfg', weights='weights/yolov3-spp.weights'):     # Converts between PyTorch and Darknet format per extension (i.e. *.weights convert to *.pt and vice versa)     # from models import *; convert('cfg/yolov3-spp.cfg', 'weights/yolov3-spp.weights')      # Initialize model     model = Darknet(cfg)      # Load weights and save     if weights.endswith('.pt'):  # if PyTorch format         model.load_state_dict(torch.load(weights, map_location='cpu')['model'])           print("Success: converted '%s' to 'converted.weights'" % weights)      elif weights.endswith('.weights'):  # darknet format         _ = load_darknet_weights(model, weights)         chkpt = {'epoch': -1, 'best_loss': None, 'model': model.state_dict(), 'optimizer': None}         torch.save(chkpt, 'converted.pt')         print("Success: converted '%s' to 'converted.pt'" % weights)      else:         print('Error: extension not supported.') 

文章来源: https://blog.csdn.net/jacke121/article/details/92011159
标签
易学教程内所有资源均来自网络或用户发布的内容,如有违反法律规定的内容欢迎反馈
该文章没有解决你所遇到的问题?点击提问,说说你的问题,让更多的人一起探讨吧!