-
Notifications
You must be signed in to change notification settings - Fork 85
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
fix: Guard Minuit optimizer against provided strategy of None #2278
Changes from 8 commits
bc10ba4
f634978
dcabf32
33d203a
51a538a
0cacda7
73307a5
6871bf1
6cd5366
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -28,7 +28,10 @@ def __init__(self, *args, **kwargs): | |
Args: | ||
errordef (:obj:`float`): See minuit docs. Default is ``1.0``. | ||
steps (:obj:`int`): Number of steps for the bounds. Default is ``1000``. | ||
strategy (:obj:`int`): See :attr:`iminuit.Minuit.strategy`. Default is ``None``. | ||
strategy (:obj:`int`): See :attr:`iminuit.Minuit.strategy`. | ||
Default is ``None``, which results in either | ||
:attr:`iminuit.Minuit.strategy` ``0`` or ``1`` from the evaluation of | ||
``int(not pyhf.tensorlib.default_do_grad)``. | ||
tolerance (:obj:`float`): Tolerance for termination. | ||
See specific optimizer for detailed meaning. | ||
Default is ``0.1``. | ||
|
@@ -99,11 +102,14 @@ def _minimize( | |
fitresult (scipy.optimize.OptimizeResult): the fit result | ||
""" | ||
maxiter = options.pop('maxiter', self.maxiter) | ||
# 0: Fast, user-provided gradient | ||
# 1: Default, no user-provided gradient | ||
strategy = options.pop( | ||
'strategy', self.strategy if self.strategy is not None else not do_grad | ||
) | ||
# int(not do_grad) results in iminuit.Minuit.strategy of either: | ||
# 0: Fast. Does not check a user-provided gradient. | ||
# 1: Default. Checks user-provided gradient against numerical gradient. | ||
strategy = options.pop("strategy", self.strategy) | ||
# Guard against None from either self.strategy defaulting to None or | ||
# passing strategy=None as options kwarg | ||
if strategy is None: | ||
strategy = int(not do_grad) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. As the There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. If you're aiming for maximum readability, I like |
||
tolerance = options.pop('tolerance', self.tolerance) | ||
if options: | ||
raise exceptions.Unsupported( | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -173,27 +173,35 @@ def test_minuit_strategy_do_grad(mocker, backend): | |
assert spy.spy_return.minuit.strategy == 1 | ||
|
||
|
||
@pytest.mark.parametrize('strategy', [0, 1]) | ||
@pytest.mark.parametrize('strategy', [0, 1, 2]) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Adding |
||
def test_minuit_strategy_global(mocker, backend, strategy): | ||
pyhf.set_backend( | ||
pyhf.tensorlib, pyhf.optimize.minuit_optimizer(strategy=strategy, tolerance=0.2) | ||
) | ||
spy = mocker.spy(pyhf.optimize.minuit_optimizer, '_minimize') | ||
m = pyhf.simplemodels.uncorrelated_background([50.0], [100.0], [10.0]) | ||
data = pyhf.tensorlib.astensor([125.0] + m.config.auxdata) | ||
model = pyhf.simplemodels.uncorrelated_background([50.0], [100.0], [10.0]) | ||
data = pyhf.tensorlib.astensor([125.0] + model.config.auxdata) | ||
|
||
pyhf.infer.mle.fit(data, m) | ||
pyhf.infer.mle.fit(data, model) | ||
assert spy.call_count == 1 | ||
assert spy.spy_return.minuit.strategy == strategy | ||
|
||
pyhf.infer.mle.fit(data, m, strategy=0) | ||
pyhf.infer.mle.fit(data, model, strategy=None) | ||
assert spy.call_count == 2 | ||
assert spy.spy_return.minuit.strategy == 0 | ||
assert spy.spy_return.minuit.strategy == int(not pyhf.tensorlib.default_do_grad) | ||
|
||
pyhf.infer.mle.fit(data, m, strategy=1) | ||
pyhf.infer.mle.fit(data, model, strategy=0) | ||
assert spy.call_count == 3 | ||
assert spy.spy_return.minuit.strategy == 0 | ||
|
||
pyhf.infer.mle.fit(data, model, strategy=1) | ||
assert spy.call_count == 4 | ||
assert spy.spy_return.minuit.strategy == 1 | ||
|
||
pyhf.infer.mle.fit(data, model, strategy=2) | ||
assert spy.call_count == 5 | ||
assert spy.spy_return.minuit.strategy == 2 | ||
|
||
|
||
def test_set_tolerance(backend): | ||
m = pyhf.simplemodels.uncorrelated_background([50.0], [100.0], [10.0]) | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
c.f. https://pyhf--2278.org.readthedocs.build/en/2278/_generated/pyhf.optimize.opt_minuit.minuit_optimizer.html for render.