Resuming an optimization in scipy.optimize?

為{幸葍}努か 提交于 2019-12-04 05:24:03

The general answer is no, there's no general solution apart from, just as you say, starting from the last estimate from the previous run.

For differential evolution specifically though, you can can instantiate the DifferentialEvolutionSolver, which you can pickle at checkpoint and unpickle to resume. (The suggestion comes from https://github.com/scipy/scipy/issues/6517)

The following can save and restart from a previous x, but I gather you want to save and restart more state, e.g. gradients, too; can you clarify that ?

See also basinhopping, which has a nice-looking gui, pele-python .

#!/usr/bin/env python
""" Funcgradmn: wrap f() and grad(), save all x[] f[] grad[] to plot or restart """

from __future__ import division
import numpy as np

__version__ = "2016-10-18 oct denis"


class Funcgradmon(object):
    """ Funcgradmn: wrap f() and grad(), save all x[] f[] grad[] to plot or restart

    Example: minimize, save, restart --

    fg = Funcgradmon( func, gradfunc, verbose=1 )
        # fg(x): f(x), g(x)  for minimize( jac=True )

        # run 100 iter (if linesearch, 200-300 calls of fg()) --
    options = dict( maxiter=100 )  # ... 
    min0 = minimize( fg, x0, jac=True, options=options )
    fg.savez( "0.npz", paramstr="..." )  # to plot or restart

        # restart from x[50] --
        # (won't repeat the previous path from 50
        # unless you save and restore the whole state of the optimizer)
    x0 = fg.restart( 50 )
    # change params ...
    min50 = minimize( fg, x0, jac=True, options=options )
    """

    def __init__( self, func, gradfunc, verbose=1 ):
        self.func = func
        self.gradfunc = gradfunc
        self.verbose = verbose
        self.x, self.f, self.g = [], [], []  # growing lists
        self.t = 0

    def __call__( self, x ):
        """ f, g = func(x), gradfunc(x); save them; return f, g """
        x = np.asarray_chkfinite( x )  # always
        f = self.func(x)
        g = self.gradfunc(x)
        g = np.asarray_chkfinite( g )
        self.x.append( np.copy(x) )
        self.f.append( _copy( f ))
        self.g.append( np.copy(g) )
        if self.verbose:
            print "%3d:" % self.t ,
            fmt = "%-12g" if np.isscalar(f)  else "%s\t"
            print fmt % f ,
            print "x: %s" % x ,  # with user's np.set_printoptions
            print "\tgrad: %s" % g
                # better df dx dg
        # callback: plot
        self.t += 1
        return f, g

    def restart( self, n ):
        """ x0 = fg.restart( n )  returns x[n] to minimize( fg, x0 )
        """
        x0 = self.x[n]  # minimize from here
        del self.x[:n]
        del self.f[:n]
        del self.g[:n]
        self.t = n
        if self.verbose:
            print "Funcgradmon: restart from x[%d] %s" % (n, x0)
        return x0

    def savez( self, npzfile, **kw ):
        """ np.savez( npzfile, x= f= g= ) """
        x, f, g = map( np.array, [self.x, self.f, self.g] )
        if self.verbose:
            asum = "f: %s \nx: %s \ng: %s" % (
                _asum(f), _asum(x), _asum(g) )
            print "Funcgradmon: saving to %s: \n%s \n" % (npzfile, asum)
        np.savez( npzfile, x=x, f=f, g=g, **kw )

    def load( self, npzfile ):
        load = np.load( npzfile )
        x, f, g = load["x"], load["f"], load["g"]
        if self.verbose:
            asum = "f: %s \nx: %s \ng: %s" % (
                _asum(f), _asum(x), _asum(g) )
            print "Funcgradmon: load %s: \n%s \n" % (npzfile, asum)
        self.x = list( x )
        self.f = list( f )
        self.g = list( g )
        self.loaddict = load
        return self.restart( len(x) - 1 )


def _asum( X ):
    """ one-line array summary: "shape type min av max" """
    if not hasattr( X, "dtype" ):
        return str(X)
    return "%s %s  min av max %.3g %.3g %.3g" % (
            X.shape, X.dtype, X.min(), X.mean(), X.max() )

def _copy( x ):
    return x if x is None  or np.isscalar(x) \
        else np.copy( x )

#...............................................................................
if __name__ == "__main__":
    import sys
    from scipy.optimize import minimize, rosen, rosen_der

    np.set_printoptions( threshold=20, edgeitems=10, linewidth=140,
            formatter = dict( float = lambda x: "%.3g" % x ))  # float arrays %.3g

    dim = 3
    method = "cg"
    maxiter = 10  # 1 linesearch -> 2-3 calls of fg

    # to change these params, run this.py a=1 b=None 'c = ...'  in sh or ipython
    for arg in sys.argv[1:]:
        exec( arg )

    print "\n", 80 * "-"
    print "Funcgradmon: dim %d  method %s  maxiter %d \n" % (
            dim, method, maxiter )
    x0 = np.zeros( dim )

    #...........................................................................
    fg = Funcgradmon( rosen, rosen_der, verbose=1 )
    options = dict( maxiter=maxiter )  # ... 

    min0 = minimize( fg, x0, jac=True, method=method, options=options )
    fg.savez( "0.npz", paramstr="..." )  # to plot or restart

    x0 = fg.restart( 5 )  # = fg.x[5]
    # change params, print them all
    min5 = minimize( fg, x0, jac=True, method=method, options=options )

    fg.savez( "5.npz", paramstr="..." )
易学教程内所有资源均来自网络或用户发布的内容,如有违反法律规定的内容欢迎反馈
该文章没有解决你所遇到的问题?点击提问,说说你的问题,让更多的人一起探讨吧!