tnonlinear.py - pism - [fork] customized build of PISM, the parallel ice sheet model (tillflux branch)
 (HTM) git clone git://src.adamsgaard.dk/pism
 (DIR) Log
 (DIR) Files
 (DIR) Refs
 (DIR) LICENSE
       ---
       tnonlinear.py (22463B)
       ---
            1 ############################################################################
            2 #
            3 #  This file is a part of siple.
            4 #
            5 #  Copyright 2010, 2014 David Maxwell
            6 #
            7 #  siple is free software: you can redistribute it and/or modify
            8 #  it under the terms of the GNU General Public License as published by
            9 #  the Free Software Foundation, either version 2 of the License, or
           10 #  (at your option) any later version.
           11 # 
           12 ############################################################################
           13 
           14 import numpy as np
           15 from siple.reporting import msg
           16 from siple.exceptions import IterationCountFailure, NumericalFailure
           17 from siple import Parameters
           18 from siple.opt import linesearchHZ, linesearchCR
           19 from .forward import ForwardProblemLineSearchAdaptor
           20 from .linear import KrylovSolver, BasicKrylovCGNE
           21 from math import sqrt
           22 
           23 class InvertIGN:
           24   """
           25   Implements solving an ill-posed minimization problem of the form
           26   
           27   .. math::       J(x) = B(y-\calF(x),y-\calF(x))
           28 
           29   where :math:`B` is a symmetric positive semidefinite bilinear form and :math:`\calF` is a (possibly nonlinear) function mapping between
           30   two Hilbert spaces.
           31 
           32   Minimization is done applying an incomplete Gauss-Newton algorithm until a stopping criterion is met.
           33   """
           34 
           35   @staticmethod
           36   def defaultParameters():
           37     """Default parameters that can subsequently be modified and passed to the constructor.
           38 
           39     :param ITER_MAX: maximum number of minimization iterations before failure
           40     :param mu: Morozov discrepancy principle scaling paramter
           41     :param cg_reset: number of iterations before a conjugate gradient reset (0=default)
           42     :param thetaMin: smallest acceptable goal improvment for linearized step
           43     :param thetaMax: maximum attempted goal improvment for linearized step
           44     :param kappaTrust: factor to shrink :data:`theta` by when linearized step fails
           45     :param rhoLow: linearized step fails when goal improves by less than :data:`rhoLow`*:data:`theta`
           46     :param rhoHigh: linearized step is very successful when goal improves by more than :data:`rhoHigh`*:data:`theta`
           47     :param verbose: True if extra logging output is desired
           48     """
           49     params = Parameters('InvIGN', ITER_MAX=200, mu=1.1, cg_reset=0, thetaMin=2**(-20), thetaMax=0.5, 
           50                                         kappaTrust=0.5, rhoLow=0.1, rhoHigh=0.5, verbose=False)
           51 
           52     lsparams = linesearchHZ.LinesearchHZ.defaultParameters()
           53     lsparams.sigma=0.1
           54     lsparams.delta=0.05
           55     lsparams.rename('linesearch')
           56     params.add(lsparams)
           57 
           58     linearparams = KrylovSolver.defaultParameters()
           59     linearparams.mu=1.
           60     linearparams.rename('linearsolver')
           61     params.add(linearparams)
           62     return params
           63 
           64   def __init__(self, params=None):
           65     self.params = self.defaultParameters()
           66     if not params is None: self.params.update(params)
           67     self.iteration_listeners = []
           68     self.linear_iteration_listeners = []
           69     self.x_update_listeners = []
           70 
           71   def addIterationListener(self,listener):
           72     """
           73     Add an object to be called after each iteration.
           74     """
           75     self.iteration_listeners.append(listener)
           76 
           77   def addLinearIterationListener(self,listener):
           78     """
           79     Add an object to be called after each iteration during solution
           80     of the linearized ill-posed problem.
           81     """
           82     self.linear_iteration_listeners.append(listener)
           83 
           84   def addXUpdateListener(self,listener):
           85     self.x_update_listeners.append(listener)
           86 
           87   ###################################################################################
           88   ##
           89   ## The methods up to the comment below are the ones that can or must be overridden by a subclass.
           90   ##
           91   ## Those implemented with a NotImpelmentedError must be overridden.
           92 
           93   def initialize(self,x,y,*args):
           94     """
           95     Hook called at the start of solve.  This gives the class a chance to massage the input.
           96     For example, x and y might be dolfin (finite element) Functions; this method should return 
           97     the associated dolfin GenericVectors.
           98 
           99     The remaining arguments are passed directly from :func:`solve`, and might be used for determining the
          100     final stopping criterion.
          101 
          102     Returns vectors corresponding to the initial value of *x* and the desired value of *y=F(x)* along
          103     with the desired terminal discrepancy.
          104     """
          105     raise NotImplementedError()
          106 
          107   def discrepancy(self,x,y,r):
          108     """
          109     During the computation, a class-defined scalar called a discrepancy is maintained that 
          110     signifies in a class-defined way the difference between the currently computed value of F(x)
          111     and the desired value y.  Typically it will be
          112     
          113     .. math::         {\\rm discrepancy} = ||y-\calF(x)||_Y
          114         
          115     but there are scenarios where some other scalar is maintained.  During each linear step of
          116     the computations, a fraction of the discrepancy is eliminated.
          117 
          118     :param x: The current point in the domain.
          119     :param y: The desired value of F(x)
          120     :param r: The residual y-F(x)
          121     """
          122     return sqrt(abs(self.forwardProblem().rangeIP(r,r)))
          123     
          124   def forwardProblem(self):
          125     """
          126     Returns the :class:`NonlinearForwardProblem` that defines the maps F, T, T*, and the inner products on the 
          127     domain and range.
          128     """
          129     raise NotImplementedError()
          130 
          131   def temper_d(self,x,d,y,residual):
          132     pass
          133 
          134   def linearInverseSolve(self,x,y,r,discrepancyLin):
          135     """Internal method for solving the linearized inverse problems."""
          136     x0 = x.zero_like()
          137     
          138     forward_problem = self.forwardProblem()
          139     
          140     linSolver = BasicKrylovCGNE(forward_problem, params=self.params.linearsolver)
          141     
          142     for l in self.linear_iteration_listeners:
          143       linSolver.addIterationListener(l)
          144     
          145     (h,Th) = linSolver.solve(x0,r,discrepancyLin)
          146     return h
          147     
          148   def finalize(self,x,y):
          149     """
          150     Hook called at the end of :func:`solve`.  Gives the chance to massage the return values.
          151     """
          152     return (x,y)
          153 
          154   def iterationHook(self,count,x,Fx,y,d,r,Td):
          155     """
          156     Called during each iteration with the pertinent computations.  Handy for debugging and visualization.
          157     """
          158     for listener in self.iteration_listeners:
          159       listener(self,count,x,Fx,y,d,r,Td)
          160 
          161   def xUpdateHook(self,count,x,Fx,y,r):
          162     for listener in self.x_update_listeners:
          163       listener(self,count,x,Fx,y,r)
          164 
          165   ##
          166   ## End of methods to be overridden by a subclass.
          167   ##
          168   ########################################################################################################
          169 
          170   def solve(self, x0, y, *args):
          171     """Main routine to solve the inverse problem F(x)=y.  Initial guess is x=x0.
          172     Extra arguments are passed to :func:`initialize`."""
          173     (x,y,targetDiscrepancy) = self.initialize(x0,y,*args)
          174 
          175     self.discrepancy_history=[]
          176 
          177     forward_problem = self.forwardProblem()
          178     params = self.params
          179     
          180     cg_reset = x.size()
          181     if (self.params.cg_reset != 0): cg_reset = self.params.cg_reset
          182 
          183     # Initial functional evalutation
          184     Fx = forward_problem.evalFandLinearize(x)
          185 
          186     # Prepare some storage
          187     Td = None
          188 
          189     residual = y.copy()
          190     residual.axpy(-1, Fx)
          191 
          192     discrepancy = self.discrepancy(x,y,residual);
          193     
          194     # The class that performs our linesearches.
          195     line_search = linesearchHZ.LinesearchHZ(params=self.params.linesearch)
          196     line_searchee = ForwardProblemLineSearchAdaptor(forward_problem)
          197 
          198     # Main loop
          199     count = 0
          200     theta = params.thetaMax;
          201     # try:
          202     for kkkkk in range(1):
          203       while True:
          204         self.discrepancy_history.append(discrepancy)
          205 
          206         if count > self.params.ITER_MAX:
          207           raise IterationCountFailure(self.params.ITER_MAX)
          208         count += 1
          209 
          210         if theta < self.params.thetaMin:
          211           raise NumericalFailure('Reached smallest trust region size.')
          212 
          213         # Error to correct:
          214         discrepancyLin = (1-theta)*discrepancy + theta*targetDiscrepancy
          215         msg('(%d) discrepancy: current %g linear goal:%g goal: %g\n---', count, discrepancy, discrepancyLin, targetDiscrepancy)
          216 
          217         if discrepancy <= self.params.mu*targetDiscrepancy:
          218           msg('done at iteration %d', count)
          219           break
          220 
          221         # try:
          222         d = self.linearInverseSolve(x,y,residual,discrepancyLin)
          223         # except Exception as e:
          224         #   theta *= self.params.kappaTrust
          225         #   msg('Exception during linear inverse solve:\n%s\nShriking theta to %g.',str(e),theta)
          226         #   continue
          227 
          228         # forward_problem.evalFandLinearize(x,out=Fx)
          229         # residual[:] = y
          230         # residual -= Fx
          231         Td = forward_problem.T(d,out=Td)
          232         Jp = -forward_problem.rangeIP(Td,residual)
          233 
          234         if Jp >= 0:
          235           theta *= self.params.kappaTrust
          236           msg('Model problem found an uphill direction.  Shrinking theta to %g.',theta)
          237           continue
          238 
          239           # % Sometimes in the initial stages, the linearization asks for an adjustment many orders of magnitude
          240           # % larger than the size of the coefficient gamma.  This is due to the very shallow derivatives
          241           # % in the coefficent function (and hence large derivatives in its inverse).  We've been 
          242           # % guaranteed that dh is pointing downhill, so scale it so that its size is on the order 
          243           # % of the size of the current gamma and try it out.  If this doesn't do a good job, we'll end
          244           # % up reducing theta later.
          245           # if (params.forceGammaPositive)
          246 
          247         self.temper_d(x,d,y,residual)
          248 
          249         self.iterationHook(count,x,Fx,y,d,residual,Td)
          250 
          251         # Do a linesearch in the determined direction.
          252         Phi = lambda t: line_searchee.eval(x,d,y,t)
          253         Jx = 0.5*forward_problem.rangeIP(residual,residual)
          254         line_search.search(Phi,Jx,Jp,1)
          255         if line_search.error():
          256           msg('Linesearch failed: %s. Shrinking theta.', line_search.errMsg);        
          257           theta *= self.params.kappaTrust
          258           continue
          259 
          260         discrepancyPrev = discrepancy
          261         t = line_search.value.t
          262         x.axpy(t,d)
          263 
          264         forward_problem.evalFandLinearize(x,out=Fx,guess=Fx)
          265         residual.set(y)
          266         residual -= Fx
          267         discrepancy = self.discrepancy(x,y,residual);
          268 
          269         self.xUpdateHook(count,x,Fx,y,residual)
          270         
          271         # Check to see if we did a good job reducing the misfit (compared to the amount that we asked
          272         # the linearized problem to correct).
          273         rho = (discrepancy-discrepancyPrev)/(discrepancyLin-discrepancyPrev)
          274       
          275         # assert(rho>0)
          276 
          277         # Determine if the trust region requires any adjustment.
          278         if rho > self.params.rhoLow:
          279           # We have a good fit.
          280           if rho > self.params.rhoHigh:
          281             if theta < self.params.thetaMax:
          282               theta = min(theta/self.params.kappaTrust, self.params.thetaMax);
          283               msg('Very good decrease (rho=%g).  New theta %g', rho, theta);
          284             else:
          285               msg('Very good decrease (rho=%g).  Keeping theta %g', rho, theta);
          286           else:
          287             msg('Reasonable decrease (rho=%g). Keeping theta %g',rho, theta);
          288         else:
          289           # We have a lousy fit.  Try asking for less correction.
          290           theta = theta * params.kappaTrust;
          291           msg('Poor decrease (rho=%g) from %g to %g;', rho, discrepancyPrev, discrepancy)
          292           msg('wanted %g.  New theta %g', discrepancyLin, theta);
          293     # except Exception as e:
          294     #   # Store the current x and y values in case they are interesting to the caller, then
          295     #   # re-raise the exception.
          296     #   self.finalState = self.finalize(x,Fx)
          297     #   raise e
          298       
          299     return self.finalize(x, Fx)
          300 
          301 class BasicInvertIGN(InvertIGN):
          302   """Inversion of a forward problem using nonlinear conjugate gradient minimization
          303   and the Morozov discrepancy principle."""
          304 
          305   def __init__(self,forward_problem,params=None):
          306     InvertIGN.__init__(self,params=params)
          307     self.forward_problem = forward_problem
          308 
          309   def forwardProblem(self):
          310     """
          311     Returns the NonlinearForwardProblem that defines the inverse problem. 
          312     """
          313     return self.forward_problem
          314 
          315   def solve(self,x0,y,targetDiscrepancy):
          316     """
          317     Run the iterative method starting from the initial point *x0*.
          318 
          319     The third argument is the desired value of :math:`||y-T(x)||_Y`
          320     """
          321     return InvertIGN.solve(self,x0,y,targetDiscrepancy)
          322 
          323   def initialize(self,x0,y,targetDiscrepancy):
          324     """
          325     This method is a hook called at the beginning of a run.  It gives an opportunity for the class to 
          326     set up information needed to decide conditions for the final stopping criterion.
          327 
          328     It may be that the initial data 'x0' expresses the the initial data for the problem T(x)=y
          329     indirectly. Or it could be that x0 and y are expressed as dolfin.Function's rather than dolfind.GenericVectors.
          330     So initialize returns a triple of vectors (x0,y) which are possibly modified versions of the input data.
          331 
          332     The arguments \*args are passed directly from 'run'.
          333     """
          334     return (x0,y,targetDiscrepancy)
          335 
          336 
          337 class InvertNLCG:
          338   """Implements solving an ill-posed minimization problem of the form
          339   
          340 .. math::     J(x) = B(y-F(x),y-F(x))
          341 
          342 where B is a symmetric positive semidefinite bilinear form and F is a (possibly nonlinear) function mapping between
          343 two Hilbert spaces.
          344 
          345 Minimization is done applying a nonlinear conjugate gradient algorithm until a stopping criterion is met.
          346 """
          347 
          348 
          349   ###################################################################################
          350   ##
          351   ## The methods up to the comment below are the ones that can or must be overridden by a subclass.
          352   ##
          353   ## Those implemented with a NotImpelmentedError must be overridden.
          354 
          355   def iterationHook(self,count,x,Fx,y,d,r,TStarR):
          356     """
          357     Called during each iteration with the pertinent computations.  Handy for debugging and visualization.
          358     """
          359     for listener in self.iteration_listeners:
          360       listener(self,count,x,Fx,y,d,r,TStarR)
          361 
          362   def xUpdateHook(self,count,x,Fx,y,r):
          363     for listener in self.x_update_listeners:
          364       listener(self,count,x,Fx,y,r)
          365 
          366   def stopConditionMet(self,count,x,Fx,y,r):
          367     """
          368     Determines if minimization should be halted (based, e.g. on a Morozov discrepancy principle)
          369     
          370     In: 
          371         * count: current iteration count
          372         * x:     point in domain of potential minimizer.
          373         * Fx:    value of nonlinear function at x
          374         * y:     desired value of F(x)
          375         * r:     current residual    
          376     """
          377     raise NotImplementedError()
          378 
          379   def initialize(self,x,y,*args):
          380     """
          381     Hook called at the start of solve.  This gives the class a chance to massage the input.
          382     For example, x and y might be dolfin (finite element) Functions; this method should return 
          383     the associated dolfin GenericVectors.
          384     
          385     The remaining arguments are passed directly from 'solve', and might be used for determining the
          386     final stopping criterion.
          387     
          388     Returns dolfin vectors corresponding to the initial value of x and the desired value of y=F(x).    
          389     """
          390     raise NotImplementedError()
          391 
          392   def forwardProblem(self):
          393     """
          394     Returns the NonlinearForwardProblem that defines the maps F, T, T*, and the inner products on the 
          395     domain and range.
          396     """
          397     raise NotImplementedError()
          398     
          399   def finalize(self,x,y):
          400     """
          401     Hook called at the end of 'solve'.  Gives the chance to massage the return values.
          402     By default returns (x,y).
          403     """
          404     return (x,y)
          405 
          406   ## End of methods to be overridden by a subclass.
          407   ##
          408   ########################################################################################################
          409 
          410   @staticmethod
          411   def defaultParameters():
          412     """Parameters:
          413       
          414       * ITER_MAX: maximum iteration count
          415       * mu: scaling parameter for Morozov discrepency principle.
          416       * cg_reset: reset conjugate gradient method after this many iterations.  Zero for a default behaviour.
          417       * steepest_descent: Use steepest descent rather than full NLCG
          418       * linesearch: parameters to be passed to the linesearch algorithm
          419       * verbose: print out extra output
          420     """
          421     params = Parameters('InvNLCG', ITER_MAX=200, mu=1.1, cg_reset=0, deriv_eps=1, steepest_descent=False, verbose=False)
          422     lsparams = linesearchHZ.LinesearchHZ.defaultParameters()
          423     lsparams.sigma=0.1
          424     lsparams.delta=0.05
          425     lsparams.rename('linesearch')
          426     params.add(lsparams)
          427     return params
          428     
          429   def __init__(self, params=None):
          430     self.params = self.defaultParameters()
          431     if not params is None: self.params.update(params)
          432     self.iteration_listeners = []
          433     self.x_update_listeners = []
          434 
          435   def addIterationListener(self,listener):
          436     """
          437     Add an object to be called after each iteration.
          438     """
          439     self.iteration_listeners.append(listener)
          440 
          441   def addXUpdateListener(self,listener):
          442     """
          443     Add an object to be called after each new value of x is computed.
          444     """
          445     self.x_update_listeners.append(listener)
          446 
          447   def solve(self, x0, y, *args):
          448     """Solve the inverse problem F(x)=y.  The initial estimate is x=x0.
          449     Any extra arguments are passed to :func:`initialize`.
          450     """
          451     (x,y) = self.initialize(x0,y,*args)
          452 
          453     # self.discrepancy_history=[]
          454 
          455     forward_problem = self.forwardProblem()
          456     cg_reset = x.size()
          457     if (self.params.cg_reset != 0): cg_reset = self.params.cg_reset
          458 
          459     # Initial functional evalutation
          460     if self.params.verbose: msg('initial evaluation')
          461     Fx = forward_problem.evalFandLinearize(x)
          462 
          463     residual = y.copy()
          464     residual.axpy(-1, Fx)
          465 
          466     Jx = 0.5*forward_problem.rangeIP(residual,residual)
          467 
          468     TStarR = forward_problem.TStar(residual)
          469     TStarRLast = TStarR.copy()
          470 
          471     # Compute our first guess for the descent direction.  
          472     d = TStarR.copy();
          473     Jp = -forward_problem.domainIP(TStarR,d)
          474 
          475     if self.params.verbose: msg('initial J %g Jp %g', Jx, Jp)
          476 
          477     # We need to give an initial guess for the stepsize in the linesearch routine
          478     # t0 = Jx/(1-Jp);
          479     t0 = Jx/(self.params.deriv_eps-Jp)
          480 
          481     # An analog of matlab's realmin
          482     realmin = np.finfo(np.double).tiny
          483     
          484     # The class that performs our linesearches.
          485     line_search = linesearchHZ.LinesearchHZ(params=self.params.linesearch)
          486     line_searchee = ForwardProblemLineSearchAdaptor(forward_problem)
          487 
          488     # Keep track of multiple line search failures.
          489     prevLineSearchFailed = False
          490 
          491     try:
          492       # Main loop
          493       count = 0
          494       while True:
          495         # self.discrepancy_history.append(sqrt(2*Jx))
          496 
          497         if count > self.params.ITER_MAX:
          498           raise IterationCountFailure(self.params.ITER_MAX)
          499         count += 1
          500 
          501         self.iterationHook(count,x,Fx,y,d,residual,TStarR)
          502 
          503         if self.stopConditionMet(count,x,Fx,y,residual):
          504           msg('done at iteration %d', count)
          505           break
          506 
          507         # Phi = lambda t: self.evalPhiAndPhiPrime(forward_problem,x,d,y,t)
          508         Phi = lambda t: line_searchee.eval(x,d,y,t)
          509         line_search.search(Phi,Jx,Jp,t0)
          510         if line_search.error():
          511           if prevLineSearchFailed:
          512             raise NumericalFailure('linesearch failed twice in a row: %s' % line_search.errMsg);
          513           else:
          514             msg('linesearch failed: %s, switching to steepest descent', line_search.errMsg);
          515             d = TStarR;
          516             t = 1/(self.params.deriv_eps-Jp);
          517             prevLineSearchFailed = True;
          518             continue      
          519 
          520         prevLineSearchFailed = False;
          521 
          522         t = line_search.value.t;
          523         x.axpy(t,d)
          524         TStarRLast.set(TStarR)
          525 
          526         Fx = forward_problem.evalFandLinearize(x,out=Fx,guess=Fx)
          527         residual.set(y)
          528         residual -= Fx
          529 
          530         self.xUpdateHook(count,x,Fx,y,residual)
          531 
          532         TStarR = forward_problem.TStar(residual,out=TStarR)
          533 
          534         Jx = 0.5*forward_problem.rangeIP(residual,residual)
          535 
          536         if self.params.steepest_descent:
          537           beta = 0
          538         else:
          539           # Polak-Ribiere
          540           beta = forward_problem.domainIP(TStarR,TStarR-TStarRLast)/forward_problem.domainIP(TStarRLast,TStarRLast)
          541 
          542           # If we have done more iterations than we have points, reset the conjugate gradient method.
          543           if count > cg_reset:
          544             beta = 0
          545 
          546         d *= beta
          547         d += TStarR
          548 
          549         JpLast = Jp
          550         Jp =  -forward_problem.domainIP(TStarR,d)
          551         if Jp >=0:
          552           if self.params.verbose:
          553             msg('found an uphill direction; resetting!');
          554           d.set(TStarR)
          555           Jp = -forward_problem.domainIP(TStarR,d);
          556           t0 = Jx/(self.params.deriv_eps-Jp);
          557         else:
          558           t0 =  t* min(10, JpLast/(Jp - realmin));
          559     except Exception as e:
          560       # Store the current x and y values in case they are interesting to the caller, then
          561       # re-raise the exception.
          562       import traceback
          563       traceback.print_exc()
          564       self.finalState = self.finalize(x,Fx)
          565       raise e
          566 
          567     return self.finalize(x, Fx)
          568 
          569 class BasicInvertNLCG(InvertNLCG):
          570   """Inversion of a forward problem using nonlinear conjugate gradient minimization
          571   and the Morozov discrepancy principle."""
          572   
          573   def __init__(self,forward_problem,params=None):
          574     InvertNLCG.__init__(self,params=params)
          575     self.forward_problem = forward_problem
          576     
          577   def forwardProblem(self):
          578     """
          579     Returns the NonlinearForwardProblem that defines the inverse problem. 
          580     """
          581     return self.forward_problem
          582 
          583   def solve(self,x0,y,targetDiscrepancy):
          584     """
          585     Run the iterative method starting from the initial point *x0*.
          586 
          587     The third argument is the desired value of :math:`||y-T(x)||_Y`
          588     """
          589     return InvertNLCG.solve(self,x0,y,targetDiscrepancy)
          590 
          591   def initialize(self,x0,y,targetDiscrepancy):
          592     """
          593     This method is a hook called at the beginning of a run.  It gives an opportunity for the class to 
          594     set up information needed to decide conditions for the final stopping criterion.
          595 
          596     It may be that the initial data 'x0' expresses the the initial data for the problem T(x)=y
          597     indirectly. Or it could be that x0 and y are expressed as dolfin.Function's rather than dolfind.GenericVectors.
          598     So initialize returns a triple of vectors (x0,y) which are possibly modified versions of the input data.
          599 
          600     The arguments \*args are passed directly from 'run'.
          601     """
          602     self.targetDiscrepancy = targetDiscrepancy
          603     return (x0,y)
          604 
          605   def stopConditionMet(self,count,x,Fx,y,r):
          606     """
          607     Determines if minimization should be halted (based, e.g. on a Morozov discrepancy principle)
          608 
          609     In: 
          610         * count: current iteration count
          611         * x:     point in domain of potential minimizer.
          612         * Fx:    value of nonlinear function at x
          613         * y:     desired value of F(x)
          614         * r:     current residual    
          615     """
          616     J = sqrt(abs(self.forward_problem.rangeIP(r,r)));
          617     Jgoal = self.params.mu*self.targetDiscrepancy
          618     
          619     msg('(%d) J=%g goal=%g',count,J,Jgoal)
          620     
          621     return J <= Jgoal
          622 
          623