Multiprocessing taking more time then no multiprocessing - Python

两盒软妹~` 提交于 2019-12-12 20:54:27

问题


I'm working on a scientific project where I have a method that takes much time to terminates and which is call more than 20 times. That method could be easily parallelized too. The problem is that the parallelized code is taking much more time than the not parallelized one (commented in the code).

Here is a piece of my code just to show how I am doing such thing:

import copy_reg
import types
from itertools import product
import multiprocessing as mp

def _pickle_method(method):
    """
    Author: Steven Bethard (author of argparse)
    http://bytes.com/topic/python/answers/552476-why-cant-you-pickle-instancemethods
    """
    func_name = method.im_func.__name__
    obj = method.im_self
    cls = method.im_class
    cls_name = ''
    if func_name.startswith('__') and not func_name.endswith('__'):
        cls_name = cls.__name__.lstrip('_')
    if cls_name:
        func_name = '_' + cls_name + func_name
    return _unpickle_method, (func_name, obj, cls)


def _unpickle_method(func_name, obj, cls):
    """
    Author: Steven Bethard
    http://bytes.com/topic/python/answers/552476-why-cant-you-pickle-instancemethods
    """
    for cls in cls.mro():
        try:
            func = cls.__dict__[func_name]
        except KeyError:
            pass
        else:
            break
    return func.__get__(obj, cls)

copy_reg.pickle(types.MethodType, _pickle_method, _unpickle_method)

class ImageData(object):

    def __init__(self, width=60, height=60):
        self.width = width
        self.height = height
        self.data = []
        for i in range(width):
            self.data.append([0] * height)      

def parallel_orientation_uncertainty_calculus(x, y, mean_gradient, mean_gradient_direction, gradient_covariance,
                                               gradient_correlation, bins):
    v = mean_gradient_direction.data[x][y]
    theta_sigma = Uts.Utils.translate_to_polar_coordinates(v[0].item(0), v[1].item(0))
    sigma_theta = 0.0
    for i in range(bins):
        n1 = mt.pow(-mt.pi / 2 + mt.pi * i / bins, 2)
        n2 = VariabilityOfGradients.calculate_gradient_orientation_probability_density_function(
            mean_gradient, gradient_covariance, gradient_correlation, x, y,
            (theta_sigma - mt.pi / 2 + mt.pi * i / bins))
        sigma_theta += n1 * n2
    return [x, y, sigma_theta]

class VariabilityOfGradients(object):
    parallel_orientation_uncertainty_calculus = staticmethod(parallel_orientation_uncertainty_calculus)

    @staticmethod
    def calculate_orientation_uncertainty(mean_gradient, mean_gradient_direction, gradient_covariance, gradient_correlation, bins):
        output = ImD.ImageData(range_min=0, range_max=1)

        results = []
        pool = Pool()
        for x, y in product(range(1, output.width - 1), range(1, output.height - 1)):
            print "Iteration ", x, y
            result = pool.apply_async(VariabilityOfGradients.parallel_orientation_uncertainty_calculus,
                                  args=[x, y, mean_gradient, mean_gradient_direction, gradient_covariance,
                                        gradient_correlation, bins])
            results.append(result.get())
        pool.close()
        pool.join()        
        for i, result in enumerate(results):
            result = results[i]
            print result
            output.data[result[0], result[1]] = result[2]

        # for x, y in product(range(1, output.width - 1), range(1, output.height - 1)):
        #     print "Iteration ", x, y
        #     v = mean_gradient_direction.data[x][y]
        #     theta_sigma = Uts.Utils.translate_to_polar_coordinates(v[0].item(0), v[1].item(0))
        #     sigma_theta = 0.0
        #     for i in range(bins):
        #         n1 = mt.pow(-mt.pi / 2 + mt.pi * i / bins, 2)
        #         n2 = VariabilityOfGradients.calculate_gradient_orientation_probability_density_function(
                mean_gradient, gradient_covariance, gradient_correlation, x, y,
                (theta_sigma - mt.pi / 2 + mt.pi * i / bins))
        #         sigma_theta += n1 * n2
        #     output.data[x][y] = sigma_theta

        return output    

if __name__ == '__main__':  
    VariabilityOfGradients.calculate_orientation_uncertainty()  

I'm wondering what I'm doing wrong. Am I using multiprocessing wrong?

Thank you in advance.

来源:https://stackoverflow.com/questions/21136404/multiprocessing-taking-more-time-then-no-multiprocessing-python

易学教程内所有资源均来自网络或用户发布的内容,如有违反法律规定的内容欢迎反馈
该文章没有解决你所遇到的问题?点击提问,说说你的问题,让更多的人一起探讨吧!