diff --git a/src/easyscience/fitting/fitter.py b/src/easyscience/fitting/fitter.py index 3d96fa75..daea7782 100644 --- a/src/easyscience/fitting/fitter.py +++ b/src/easyscience/fitting/fitter.py @@ -26,11 +26,13 @@ class Fitter: def __init__(self, fit_object, fit_function: Callable): self._fit_object = fit_object self._fit_function = fit_function - self._dependent_dims = None + self._dependent_dims: int = None + self._tolerance: float = None + self._max_evaluations: int = None - self._enum_current_minimizer = DEFAULT_MINIMIZER - self._minimizer: MinimizerBase # _minimizer is set in the create method - self._update_minimizer(self._enum_current_minimizer) + self._minimizer: MinimizerBase = None # set in _update_minimizer + self._enum_current_minimizer: AvailableMinimizers = None # set in _update_minimizer + self._update_minimizer(DEFAULT_MINIMIZER) def fit_constraints(self) -> list: return self._minimizer.fit_constraints() @@ -110,6 +112,42 @@ def minimizer(self) -> MinimizerBase: """ return self._minimizer + @property + def tolerance(self) -> float: + """ + Get the tolerance for the minimizer. + + :return: Tolerance for the minimizer + """ + return self._tolerance + + @tolerance.setter + def tolerance(self, tolerance: float) -> None: + """ + Set the tolerance for the minimizer. + + :param tolerance: Tolerance for the minimizer + """ + self._tolerance = tolerance + + @property + def max_evaluations(self) -> int: + """ + Get the maximal number of evaluations for the minimizer. + + :return: Maximal number of steps for the minimizer + """ + return self._max_evaluations + + @max_evaluations.setter + def max_evaluations(self, max_evaluations: int) -> None: + """ + Set the maximal number of evaluations for the minimizer. + + :param max_evaluations: Maximal number of steps for the minimizer + """ + self._max_evaluations = max_evaluations + @property def fit_function(self) -> Callable: """ @@ -175,7 +213,7 @@ def fit(self) -> Callable: re-constitute the independent variables and once the fit is completed, reshape the inputs to those expected. """ - @functools.wraps(self.minimizer.fit) + @functools.wraps(self._minimizer.fit) def inner_fit_callable( x: np.ndarray, y: np.ndarray, @@ -202,7 +240,14 @@ def inner_fit_callable( constraints = self._minimizer.fit_constraints() self.fit_function = fit_fun_wrap self._minimizer.set_fit_constraint(constraints) - f_res = self.minimizer.fit(x_fit, y_new, weights=weights, **kwargs) + f_res = self._minimizer.fit( + x_fit, + y_new, + weights=weights, + tolerance=self._tolerance, + max_evaluations=self._max_evaluations, + **kwargs, + ) # Postcompute fit_result = self._post_compute_reshaping(f_res, x, y) diff --git a/src/easyscience/fitting/minimizers/minimizer_base.py b/src/easyscience/fitting/minimizers/minimizer_base.py index 5435b39c..e0bd6771 100644 --- a/src/easyscience/fitting/minimizers/minimizer_base.py +++ b/src/easyscience/fitting/minimizers/minimizer_base.py @@ -40,7 +40,7 @@ def __init__( self, obj, #: BaseObj, fit_function: Callable, - minimizer_enum: Optional[AvailableMinimizers] = None, + minimizer_enum: AvailableMinimizers, ): # todo after constraint changes, add type hint: obj: BaseObj # noqa: E501 if minimizer_enum.method not in self.supported_methods(): raise FitError(f'Method {minimizer_enum.method} not available in {self.__class__}') @@ -58,6 +58,10 @@ def __init__( def all_constraints(self) -> List[ObjConstraint]: return [*self._constraints, *self._object._constraints] + @property + def enum(self) -> AvailableMinimizers: + return self._minimizer_enum + @property def name(self) -> str: return self._minimizer_enum.name @@ -83,6 +87,8 @@ def fit( model: Optional[Callable] = None, parameters: Optional[Parameter] = None, method: Optional[str] = None, + tolerance: Optional[float] = None, + max_evaluations: Optional[int] = None, **kwargs, ) -> FitResults: """ @@ -129,7 +135,7 @@ def evaluate(self, x: np.ndarray, minimizer_parameters: Optional[dict[str, float return self._fit_function(x, **minimizer_parameters, **kwargs) - def _get_method_dict(self, passed_method: Optional[str] = None) -> dict[str, str]: + def _get_method_kwargs(self, passed_method: Optional[str] = None) -> dict[str, str]: if passed_method is not None: if passed_method not in self.supported_methods(): raise FitError(f'Method {passed_method} not available in {self.__class__}') diff --git a/src/easyscience/fitting/minimizers/minimizer_bumps.py b/src/easyscience/fitting/minimizers/minimizer_bumps.py index a2142584..ee9f75f5 100644 --- a/src/easyscience/fitting/minimizers/minimizer_bumps.py +++ b/src/easyscience/fitting/minimizers/minimizer_bumps.py @@ -74,6 +74,8 @@ def fit( model: Optional[Callable] = None, parameters: Optional[Parameter] = None, method: Optional[str] = None, + tolerance: Optional[float] = None, + max_evaluations: Optional[int] = None, minimizer_kwargs: Optional[dict] = None, engine_kwargs: Optional[dict] = None, **kwargs, @@ -97,7 +99,7 @@ def fit( :return: Fit results :rtype: ModelResult """ - method_dict = self._get_method_dict(method) + method_dict = self._get_method_kwargs(method) if weights is None: weights = np.sqrt(np.abs(y)) @@ -107,10 +109,14 @@ def fit( if minimizer_kwargs is None: minimizer_kwargs = {} - # else: - # minimizer_kwargs = {"fit_kws": minimizer_kwargs} minimizer_kwargs.update(engine_kwargs) + if tolerance is not None: + minimizer_kwargs['ftol'] = tolerance # tolerance for change in function value + minimizer_kwargs['xtol'] = tolerance # tolerance for change in parameter value, could be an independent value + if max_evaluations is not None: + minimizer_kwargs['steps'] = max_evaluations + if model is None: model_function = self._make_model(parameters=parameters) model = model_function(x, y, weights) diff --git a/src/easyscience/fitting/minimizers/minimizer_dfo.py b/src/easyscience/fitting/minimizers/minimizer_dfo.py index 7534e2fc..8bf09ef5 100644 --- a/src/easyscience/fitting/minimizers/minimizer_dfo.py +++ b/src/easyscience/fitting/minimizers/minimizer_dfo.py @@ -53,9 +53,7 @@ def supported_methods() -> List[str]: @staticmethod def all_methods() -> List[str]: - return [ - 'leastsq', - ] + return ['leastsq'] def fit( self, @@ -65,8 +63,8 @@ def fit( model: Optional[Callable] = None, parameters: Optional[List[Parameter]] = None, method: str = None, - xtol: float = 1e-6, - ftol: float = 1e-8, + tolerance: Optional[float] = None, + max_evaluations: Optional[int] = None, **kwargs, ) -> FitResults: """ @@ -110,6 +108,8 @@ def fit( stack_status = global_object.stack.enabled global_object.stack.enabled = False + kwargs = self._prepare_kwargs(tolerance, max_evaluations, **kwargs) + try: model_results = self._dfo_fit(self._cached_pars, model, **kwargs) self._set_parameter_fit_result(model_results, stack_status) @@ -239,7 +239,11 @@ def _gen_fit_results(self, fit_results, weights, **kwargs) -> FitResults: return results @staticmethod - def _dfo_fit(pars: Dict[str, Parameter], model: Callable, **kwargs): + def _dfo_fit( + pars: Dict[str, Parameter], + model: Callable, + **kwargs, + ): """ Method to convert EasyScience styling to DFO-LS styling (yes, again) @@ -261,13 +265,23 @@ def _dfo_fit(pars: Dict[str, Parameter], model: Callable, **kwargs): np.array([par.max for par in pars.values()]), ) # https://numericalalgorithmsgroup.github.io/dfols/build/html/userguide.html - if np.isinf(bounds).any(): - results = dfols.solve(model, pars_values, bounds=bounds, **kwargs) - else: + if not np.isinf(bounds).any(): # It is only possible to scale (normalize) variables if they are bound (different from inf) - results = dfols.solve(model, pars_values, bounds=bounds, scaling_within_bounds=True, **kwargs) + kwargs['scaling_within_bounds'] = True + + results = dfols.solve(model, pars_values, bounds=bounds, **kwargs) if 'Success' not in results.msg: raise FitError(f'Fit failed with message: {results.msg}') return results + + @staticmethod + def _prepare_kwargs(tolerance: Optional[float] = None, max_evaluations: Optional[int] = None, **kwargs) -> dict[str:str]: + if max_evaluations is not None: + kwargs['maxfun'] = max_evaluations # max number of function evaluations + if tolerance is not None: + if 0.1 < tolerance: # dfo module throws errer if larger value + raise ValueError('Tolerance must be equal or smaller than 0.1') + kwargs['rhoend'] = tolerance # size of the trust region + return kwargs diff --git a/src/easyscience/fitting/minimizers/minimizer_lmfit.py b/src/easyscience/fitting/minimizers/minimizer_lmfit.py index 8550ffc0..a4094ea8 100644 --- a/src/easyscience/fitting/minimizers/minimizer_lmfit.py +++ b/src/easyscience/fitting/minimizers/minimizer_lmfit.py @@ -85,6 +85,8 @@ def fit( model: Optional[LMModel] = None, parameters: Optional[LMParameters] = None, method: Optional[str] = None, + tolerance: Optional[float] = None, + max_evaluations: Optional[int] = None, minimizer_kwargs: Optional[dict] = None, engine_kwargs: Optional[dict] = None, **kwargs, @@ -110,19 +112,14 @@ def fit( :return: Fit results :rtype: ModelResult """ - method_dict = self._get_method_dict(method) - if weights is None: weights = 1 / np.sqrt(np.abs(y)) if engine_kwargs is None: engine_kwargs = {} - if minimizer_kwargs is None: - minimizer_kwargs = {} - else: - minimizer_kwargs = {'fit_kws': minimizer_kwargs} - minimizer_kwargs.update(engine_kwargs) + method_kwargs = self._get_method_kwargs(method) + fit_kws_dict = self._get_fit_kws(method, tolerance, minimizer_kwargs) # Why do we do this? Because a fitting template has to have global_object instantiated outside pre-runtime from easyscience import global_object @@ -134,7 +131,16 @@ def fit( if model is None: model = self._make_model() - model_results = model.fit(y, x=x, weights=weights, **method_dict, **minimizer_kwargs, **kwargs) + model_results = model.fit( + y, + x=x, + weights=weights, + max_nfev=max_evaluations, + fit_kws=fit_kws_dict, + **method_kwargs, + **engine_kwargs, + **kwargs, + ) self._set_parameter_fit_result(model_results, stack_status) results = self._gen_fit_results(model_results) except Exception as e: @@ -143,6 +149,16 @@ def fit( raise FitError(e) return results + def _get_fit_kws(self, method: str, tolerance: float, minimizer_kwargs: dict[str:str]) -> dict[str:str]: + if minimizer_kwargs is None: + minimizer_kwargs = {} + if tolerance is not None: + if method in [None, 'least_squares', 'leastsq']: + minimizer_kwargs['ftol'] = tolerance + if method in ['differential_evolution', 'powell', 'cobyla']: + minimizer_kwargs['tol'] = tolerance + return minimizer_kwargs + def convert_to_pars_obj(self, parameters: Optional[List[Parameter]] = None) -> LMParameters: """ Create an lmfit compatible container with the `Parameters` converted from the base object. diff --git a/tests/integration_tests/Fitting/test_fitter.py b/tests/integration_tests/Fitting/test_fitter.py index a3e12519..cf17a79d 100644 --- a/tests/integration_tests/Fitting/test_fitter.py +++ b/tests/integration_tests/Fitting/test_fitter.py @@ -140,6 +140,62 @@ def test_fit_result(fit_engine): check_fit_results(result, sp_sin, ref_sin, x, sp_ref1=sp_ref1, sp_ref2=sp_ref2) +@pytest.mark.parametrize("fit_engine", [None, AvailableMinimizers.LMFit, AvailableMinimizers.Bumps, AvailableMinimizers.DFO]) +def test_basic_max_evaluations(fit_engine): + ref_sin = AbsSin(0.2, np.pi) + sp_sin = AbsSin(0.354, 3.05) + + x = np.linspace(0, 5, 200) + y = ref_sin(x) + + sp_sin.offset.fixed = False + sp_sin.phase.fixed = False + + f = Fitter(sp_sin, sp_sin) + if fit_engine is not None: + try: + f.switch_minimizer(fit_engine) + except AttributeError: + pytest.skip(msg=f"{fit_engine} is not installed") + args = [x, y] + kwargs = {} + f.max_evaluations = 3 + try: + result = f.fit(*args, **kwargs) + # Result should not be the same as the reference + assert sp_sin.phase.value != pytest.approx(ref_sin.phase.value, rel=1e-3) + assert sp_sin.offset.value != pytest.approx(ref_sin.offset.value, rel=1e-3) + except FitError as e: + # DFO throws a different error + assert "Objective has been called MAXFUN times" in str(e) + + +@pytest.mark.parametrize("fit_engine,tolerance", [(None, 10), (AvailableMinimizers.LMFit, 10), (AvailableMinimizers.Bumps, 10), (AvailableMinimizers.DFO, 0.1)]) +def test_basic_tolerance(fit_engine, tolerance): + ref_sin = AbsSin(0.2, np.pi) + sp_sin = AbsSin(0.354, 3.05) + + x = np.linspace(0, 5, 200) + y = ref_sin(x) + + sp_sin.offset.fixed = False + sp_sin.phase.fixed = False + + f = Fitter(sp_sin, sp_sin) + if fit_engine is not None: + try: + f.switch_minimizer(fit_engine) + except AttributeError: + pytest.skip(msg=f"{fit_engine} is not installed") + args = [x, y] + kwargs = {} + f.tolerance = tolerance + result = f.fit(*args, **kwargs) + # Result should not be the same as the reference + assert sp_sin.phase.value != pytest.approx(ref_sin.phase.value, rel=1e-3) + assert sp_sin.offset.value != pytest.approx(ref_sin.offset.value, rel=1e-3) + + @pytest.mark.parametrize("fit_method", ["leastsq", "powell", "cobyla"]) def test_lmfit_methods(fit_method): ref_sin = AbsSin(0.2, np.pi) diff --git a/tests/integration_tests/Fitting/test_fitter_legacy_parameter.py b/tests/integration_tests/Fitting/test_fitter_legacy_parameter.py index f9554062..ca10fae9 100644 --- a/tests/integration_tests/Fitting/test_fitter_legacy_parameter.py +++ b/tests/integration_tests/Fitting/test_fitter_legacy_parameter.py @@ -108,6 +108,62 @@ def test_basic_fit(fit_engine, with_errors): assert sp_sin.offset.raw_value == pytest.approx(ref_sin.offset.raw_value, rel=1e-3) +@pytest.mark.parametrize("fit_engine", [None, AvailableMinimizers.LMFit, AvailableMinimizers.Bumps, AvailableMinimizers.DFO]) +def test_basic_max_evaluations(fit_engine): + ref_sin = AbsSin(0.2, np.pi) + sp_sin = AbsSin(0.354, 3.05) + + x = np.linspace(0, 5, 200) + y = ref_sin(x) + + sp_sin.offset.fixed = False + sp_sin.phase.fixed = False + + f = Fitter(sp_sin, sp_sin) + if fit_engine is not None: + try: + f.switch_minimizer(fit_engine) + except AttributeError: + pytest.skip(msg=f"{fit_engine} is not installed") + args = [x, y] + kwargs = {} + f.max_evaluations = 3 + try: + result = f.fit(*args, **kwargs) + # Result should not be the same as the reference + assert sp_sin.phase.raw_value != pytest.approx(ref_sin.phase.raw_value, rel=1e-3) + assert sp_sin.offset.raw_value != pytest.approx(ref_sin.offset.raw_value, rel=1e-3) + except FitError as e: + # DFO throws a different error + assert "Objective has been called MAXFUN times" in str(e) + + +@pytest.mark.parametrize("fit_engine,tolerance", [(None, 10), (AvailableMinimizers.LMFit, 10), (AvailableMinimizers.Bumps, 0.1), (AvailableMinimizers.DFO, 0.1)]) +def test_basic_tolerance(fit_engine, tolerance): + ref_sin = AbsSin(0.2, np.pi) + sp_sin = AbsSin(0.354, 3.05) + + x = np.linspace(0, 5, 200) + y = ref_sin(x) + + sp_sin.offset.fixed = False + sp_sin.phase.fixed = False + + f = Fitter(sp_sin, sp_sin) + if fit_engine is not None: + try: + f.switch_minimizer(fit_engine) + except AttributeError: + pytest.skip(msg=f"{fit_engine} is not installed") + args = [x, y] + kwargs = {} + f.tolerance = tolerance + result = f.fit(*args, **kwargs) + # Result should not be the same as the reference + assert sp_sin.phase.raw_value != pytest.approx(ref_sin.phase.raw_value, rel=1e-3) + assert sp_sin.offset.raw_value != pytest.approx(ref_sin.offset.raw_value, rel=1e-3) + + @pytest.mark.parametrize("fit_engine", [None, AvailableMinimizers.LMFit, AvailableMinimizers.Bumps, AvailableMinimizers.DFO]) def test_fit_result(fit_engine): ref_sin = AbsSin(0.2, np.pi) diff --git a/tests/unit_tests/Fitting/minimizers/test_minimizer_base.py b/tests/unit_tests/Fitting/minimizers/test_minimizer_base.py index 7109468b..39993455 100644 --- a/tests/unit_tests/Fitting/minimizers/test_minimizer_base.py +++ b/tests/unit_tests/Fitting/minimizers/test_minimizer_base.py @@ -17,10 +17,11 @@ def minimizer(self): MinimizerBase.__abstractmethods__ = set() MinimizerBase.supported_methods = MagicMock(return_value=['method']) + self._mock_minimizer_enum = MagicMock(package='package', method='method') minimizer = MinimizerBase( obj='obj', fit_function='fit_function', - minimizer_enum=MagicMock(package='package', method='method') + minimizer_enum=self._mock_minimizer_enum ) return minimizer @@ -47,6 +48,9 @@ def test_init(self, minimizer: MinimizerBase): assert minimizer._fit_function == None assert minimizer._constraints == [] + def test_enum(self, minimizer: MinimizerBase): + assert minimizer.enum == self._mock_minimizer_enum + def test_evaluate(self, minimizer: MinimizerBase): # When minimizer._fit_function = MagicMock(return_value='fit_function_return') @@ -172,7 +176,7 @@ def test_create_signature(self, minimizer: MinimizerBase) -> None: def test_get_method_dict(self, minimizer: MinimizerBase) -> None: # When Then - result = minimizer._get_method_dict() + result = minimizer._get_method_kwargs() # Expect assert result == {'method': 'method'} @@ -182,7 +186,7 @@ def test_get_method_dict_no_self(self, minimizer: MinimizerBase) -> None: minimizer._method = None # Then - result = minimizer._get_method_dict() + result = minimizer._get_method_kwargs() # Expect assert result == {} @@ -192,7 +196,7 @@ def test_get_method_dict_supported_method(self, minimizer: MinimizerBase) -> Non minimizer.supported_methods = MagicMock(return_value=['supported_method']) # Then - result = minimizer._get_method_dict('supported_method') + result = minimizer._get_method_kwargs('supported_method') # Expect assert result == {'method': 'supported_method'} @@ -203,5 +207,5 @@ def test_get_method_dict_not_supported_method(self, minimizer: MinimizerBase) -> # Then Expect with pytest.raises(FitError): - result = minimizer._get_method_dict('not_supported_method') + result = minimizer._get_method_kwargs('not_supported_method') diff --git a/tests/unit_tests/Fitting/minimizers/test_minimizer_lmfit.py b/tests/unit_tests/Fitting/minimizers/test_minimizer_lmfit.py index b2f7b08e..e17b7401 100644 --- a/tests/unit_tests/Fitting/minimizers/test_minimizer_lmfit.py +++ b/tests/unit_tests/Fitting/minimizers/test_minimizer_lmfit.py @@ -98,7 +98,7 @@ def test_fit(self, minimizer: LMFit) -> None: # Expect assert result == 'gen_fit_results' - mock_model.fit.assert_called_once_with(2.0, x=1.0, weights=0.7071067811865475, method='leastsq') + mock_model.fit.assert_called_once_with(2.0, x=1.0, weights=0.7071067811865475, max_nfev=None, fit_kws={}, method='leastsq') minimizer._make_model.assert_called_once_with() minimizer._set_parameter_fit_result.assert_called_once_with('fit', False) minimizer._gen_fit_results.assert_called_once_with('fit') @@ -115,7 +115,7 @@ def test_fit_model(self, minimizer: LMFit) -> None: minimizer.fit(x=1.0, y=2.0, model=mock_model) # Expect - mock_model.fit.assert_called_once_with(2.0, x=1.0, weights=0.7071067811865475, method='leastsq') + mock_model.fit.assert_called_once_with(2.0, x=1.0, weights=0.7071067811865475, max_nfev=None, fit_kws={}, method='leastsq') minimizer._make_model.assert_not_called() def test_fit_method(self, minimizer: LMFit) -> None: @@ -132,7 +132,7 @@ def test_fit_method(self, minimizer: LMFit) -> None: minimizer.fit(x=1.0, y=2.0, method='method_passed') # Expect - mock_model.fit.assert_called_once_with(2.0, x=1.0, weights=0.7071067811865475, method='method_passed') + mock_model.fit.assert_called_once_with(2.0, x=1.0, weights=0.7071067811865475, max_nfev=None, fit_kws={}, method='method_passed') minimizer.supported_methods.assert_called_once_with() def test_fit_kwargs(self, minimizer: LMFit) -> None: @@ -147,7 +147,7 @@ def test_fit_kwargs(self, minimizer: LMFit) -> None: minimizer.fit(x=1.0, y=2.0, minimizer_kwargs={'minimizer_key': 'minimizer_val'}, engine_kwargs={'engine_key': 'engine_val'}) # Expect - mock_model.fit.assert_called_once_with(2.0, x=1.0, weights=0.7071067811865475, method='leastsq', fit_kws={'minimizer_key': 'minimizer_val'}, engine_key='engine_val') + mock_model.fit.assert_called_once_with(2.0, x=1.0, weights=0.7071067811865475, max_nfev=None, fit_kws={'minimizer_key': 'minimizer_val'}, method='leastsq', engine_key='engine_val') def test_fit_exception(self, minimizer: LMFit) -> None: # When diff --git a/tests/unit_tests/Fitting/test_fitter.py b/tests/unit_tests/Fitting/test_fitter.py index 471cbb7a..63783c17 100644 --- a/tests/unit_tests/Fitting/test_fitter.py +++ b/tests/unit_tests/Fitting/test_fitter.py @@ -20,7 +20,8 @@ def test_constructor(self, fitter: Fitter): assert fitter._fit_object == self.mock_fit_object assert fitter._fit_function == self.mock_fit_function assert fitter._dependent_dims is None - assert fitter._enum_current_minimizer == AvailableMinimizers.LMFit_leastsq + assert fitter._enum_current_minimizer is None #== AvailableMinimizers.LMFit_leastsq + assert fitter._minimizer is None fitter._update_minimizer.assert_called_once_with(AvailableMinimizers.LMFit_leastsq) def test_fit_constraints(self, fitter: Fitter):