纬=伪胃
纬纬纬纬
func renderer(_ renderer: SCNSceneRenderer, updateAtTime time: TimeInterval) { guard let renderLocations = self.netGroupInfo?.locations?.filter({ (userLocation) -> Bool in return userLocation.userId != GroupMenberManager.sharedInstance.getCurrentUserID() }) else { return } DispatchQueue.main.async { guard let camera = self.sceneView.pointOfView else { return } //当前用户定位 let currentLocation = UserLocation() currentLocation.latitude = GroupMenberManager.sharedInstance.userLatitude currentLocation.longitude = GroupMenberManager.sharedInstance.userLongitute // 循环处理当前组内其他成员 for renderLocation in renderLocations { // 两点间距离公式求得距离用来控制3D模型字体大小,直观的反应距离的远近 let distance = currentLocation.distanceFrom(renderLocation) // 求得两个用户间的坐标关系 let angle = currentLocation.angleFrom(renderLocation) // 根据上述公式求得3D模型要渲染的最终位置 compassAngle为实时获取的陀螺仪指南针方向 var position = SCNVector3(x: 0, y: 0, z: -3).roateInHorizontalPlaneBy(angle: self.compassAngle - angle) position = camera.convertPosition(position, to: nil) //稳定在水平上 position.y = 0; //更新位置 self.virtualObjectManager.findVirtualObject(renderLocation.userId ?? "")?.scnNode.position = position //根据距离更新模型文字和大小 self.virtualObjectManager.findVirtualObject(renderLocation.userId ?? "")?.changeNodeTextAnSize(text: renderLocation.userTitle, distance: distance) } } } import coremltools # 调用caffe转换器的convert方法执行转换 coreml_model = coremltools.converters.caffe.convert(('web_car.caffemodel', 'deploy.prototxt'), image_input_names = 'data', class_labels = 'class_labels.txt') # 保存转换生成的分类器模型文件 coreml_model.save('CarRecognition.mlmodel') import coremltools # 调用caffe转换器的convert方法执行转换 coreml_model = coremltools.converters.caffe.convert(('googlenet_finetune_web_car.caffemodel', 'deploy.prototxt'), image_input_names = 'data', class_labels = 'cars.txt') # 设置元数据 coreml_model.author = 'Audebert, Nicolas and Le Saux, Bertrand and Lefevre Sebastien' coreml_model.license = 'MIT' coreml_model.short_description = 'Predict the brand & model of a car.' coreml_model.input_description['data'] = 'An image of a car.' coreml_model.output_description['prob'] = 'The probabilities that the input image is a car.' coreml_model.output_description['classLabel'] = 'The most likely type of car, for the given input.' # 保存转换生成的分类器模型文件 coreml_model.save('CarRecognition.mlmodel') let carModel = CarRecognition() let output = try carModel.prediction(image: ref) 文章来源: 美团-深度学习及AR在移动端打车场景下的应用