Skip to content

Commit 0f52681

Browse files
pre-commit-ci[bot]kratsgmatthewfeickert
authored
chore: [pre-commit.ci] pre-commit autoupdate (#2568)
* Update pre-commit hooks: - github.com/pre-commit/pre-commit-hooks: v5.0.0 → v6.0.0 - github.com/astral-sh/ruff-pre-commit: v0.12.0 → v0.14.3 - github.com/psf/black-pre-commit-mirror: 24.10.0 → 25.9.0 - github.com/adamchainz/blacken-docs: 1.19.1 → 1.20.0 - github.com/pre-commit/mirrors-mypy: v1.13.0 → v1.18.2 - github.com/codespell-project/codespell: v2.3.0 → v2.4.1 - github.com/python-jsonschema/check-jsonschema: 0.30.0 → 0.34.1 * Apply fixes for RUF059: unused-unpacked-variable - c.f. https://docs.astral.sh/ruff/rules/unused-unpacked-variable/ * Apply fixes for RUF043: pytest-raises-ambiguous-pattern - c.f. https://docs.astral.sh/ruff/rules/pytest-raises-ambiguous-pattern/ * Add validation and test of pyhf.readxml.process_channel's ElementTree argument being not None. Co-authored-by: Giordon Stark <[email protected]> Co-authored-by: Matthew Feickert <[email protected]>
1 parent 239f765 commit 0f52681

File tree

18 files changed

+50
-124
lines changed

18 files changed

+50
-124
lines changed

.pre-commit-config.yaml

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ ci:
44

55
repos:
66
- repo: https://github.com/pre-commit/pre-commit-hooks
7-
rev: v5.0.0
7+
rev: v6.0.0
88
hooks:
99
- id: check-added-large-files
1010
- id: check-case-conflict
@@ -35,25 +35,25 @@ repos:
3535
- id: rst-inline-touching-normal
3636

3737
- repo: https://github.com/astral-sh/ruff-pre-commit
38-
rev: "v0.12.0"
38+
rev: "v0.14.3"
3939
hooks:
4040
- id: ruff-check
4141
args: ["--fix", "--show-fixes"]
4242

4343
- repo: https://github.com/psf/black-pre-commit-mirror
44-
rev: 24.10.0
44+
rev: 25.9.0
4545
hooks:
4646
- id: black-jupyter
4747
types_or: [python, pyi, jupyter]
4848

4949
- repo: https://github.com/adamchainz/blacken-docs
50-
rev: 1.19.1
50+
rev: 1.20.0
5151
hooks:
5252
- id: blacken-docs
5353
additional_dependencies: [black==24.10.0]
5454

5555
- repo: https://github.com/pre-commit/mirrors-mypy
56-
rev: v1.13.0
56+
rev: v1.18.2
5757
# check the oldest and newest supported Pythons
5858
# except skip python 3.9 for numpy, due to poor typing
5959
hooks:
@@ -69,14 +69,14 @@ repos:
6969
args: ["--python-version=3.13"]
7070

7171
- repo: https://github.com/codespell-project/codespell
72-
rev: v2.3.0
72+
rev: v2.4.1
7373
hooks:
7474
- id: codespell
7575
files: ^.*\.(py|md|rst)$
7676
args: ["-w", "-L", "hist,gaus"]
7777

7878
- repo: https://github.com/python-jsonschema/check-jsonschema
79-
rev: 0.30.0
79+
rev: 0.34.1
8080
hooks:
8181
- id: check-readthedocs
8282
args: ["--verbose"]

docs/development.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -255,7 +255,7 @@ final deployment to PyPI_ can be done by creating a GitHub Release:
255255
#. Select the release tag that was just pushed, and set the release title to be the tag
256256
(e.g. ``v1.2.3``).
257257
#. Use the "Auto-generate release notes" button to generate a skeleton of the release
258-
notes and then augment them with the preprepared release notes the release maintainer
258+
notes and then augment them with the prepared release notes the release maintainer
259259
has written.
260260
#. Select "This is a pre-release" if the release is a release candidate.
261261
#. Select "Create a discussion for this release" if the release is a stable release.

docs/examples/notebooks/ImpactPlot.ipynb

Lines changed: 2 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -143,24 +143,10 @@
143143
},
144144
{
145145
"cell_type": "code",
146-
"execution_count": 5,
146+
"execution_count": null,
147147
"metadata": {},
148148
"outputs": [],
149-
"source": [
150-
"def calc_impact(idx, b, e, i, width, poi_index):\n",
151-
" _, _, bb, ee = fitresults([(idx, b + e)])\n",
152-
" poi_up_post = bb[poi_index]\n",
153-
"\n",
154-
" _, _, bb, ee = fitresults([(idx, b - e)])\n",
155-
" poi_dn_post = bb[poi_index]\n",
156-
"\n",
157-
" _, _, bb, ee = fitresults([(idx, b + width)])\n",
158-
" poi_up_pre = bb[poi_index]\n",
159-
"\n",
160-
" _, _, bb, ee = fitresults([(idx, b - width)])\n",
161-
" poi_dn_pre = bb[poi_index]\n",
162-
" return np.asarray([poi_dn_post, poi_up_post, poi_dn_pre, poi_up_pre])"
163-
]
149+
"source": "def calc_impact(idx, b, e, i, width, poi_index):\n _, _, bb, _ = fitresults([(idx, b + e)])\n poi_up_post = bb[poi_index]\n\n _, _, bb, _ = fitresults([(idx, b - e)])\n poi_dn_post = bb[poi_index]\n\n _, _, bb, _ = fitresults([(idx, b + width)])\n poi_up_pre = bb[poi_index]\n\n _, _, bb, _ = fitresults([(idx, b - width)])\n poi_dn_pre = bb[poi_index]\n return np.asarray([poi_dn_post, poi_up_post, poi_dn_pre, poi_up_pre])"
164150
},
165151
{
166152
"cell_type": "code",

docs/examples/notebooks/binderexample/StatisticalAnalysis.ipynb

Lines changed: 2 additions & 74 deletions
Original file line numberDiff line numberDiff line change
@@ -171,82 +171,10 @@
171171
},
172172
{
173173
"cell_type": "code",
174-
"execution_count": 7,
174+
"execution_count": null,
175175
"metadata": {},
176176
"outputs": [],
177-
"source": [
178-
"def get_mc_counts(pars):\n",
179-
" deltas, factors = pdf.modifications(pars)\n",
180-
" allsum = pyhf.tensorlib.concatenate(\n",
181-
" deltas + [pyhf.tensorlib.astensor(pdf.nominal_rates)]\n",
182-
" )\n",
183-
" nom_plus_delta = pyhf.tensorlib.sum(allsum, axis=0)\n",
184-
" nom_plus_delta = pyhf.tensorlib.reshape(\n",
185-
" nom_plus_delta, (1,) + pyhf.tensorlib.shape(nom_plus_delta)\n",
186-
" )\n",
187-
" allfac = pyhf.tensorlib.concatenate(factors + [nom_plus_delta])\n",
188-
" return pyhf.tensorlib.product(allfac, axis=0)\n",
189-
"\n",
190-
"\n",
191-
"animate_plot_pieces = None\n",
192-
"\n",
193-
"\n",
194-
"def init_plot(fig, ax, par_settings):\n",
195-
" global animate_plot_pieces\n",
196-
"\n",
197-
" nbins = sum(list(pdf.config.channel_nbins.values()))\n",
198-
" x = np.arange(nbins)\n",
199-
" data = np.zeros(nbins)\n",
200-
" items = []\n",
201-
" for i in [3, 2, 1, 0]:\n",
202-
" items.append(ax.bar(x, data, 1, alpha=1.0))\n",
203-
" animate_plot_pieces = (\n",
204-
" items,\n",
205-
" ax.scatter(\n",
206-
" x, workspace.data(pdf, include_auxdata=False), c=\"k\", alpha=1.0, zorder=99\n",
207-
" ),\n",
208-
" )\n",
209-
"\n",
210-
"\n",
211-
"def animate(ax=None, fig=None, **par_settings):\n",
212-
" global animate_plot_pieces\n",
213-
" items, obs = animate_plot_pieces\n",
214-
" pars = pyhf.tensorlib.astensor(pdf.config.suggested_init())\n",
215-
" for k, v in par_settings.items():\n",
216-
" pars[par_name_dict[k]] = v\n",
217-
"\n",
218-
" mc_counts = get_mc_counts(pars)\n",
219-
" rectangle_collection = zip(*map(lambda x: x.patches, items))\n",
220-
"\n",
221-
" for rectangles, binvalues in zip(rectangle_collection, mc_counts[:, 0].T):\n",
222-
" offset = 0\n",
223-
" for sample_index in [3, 2, 1, 0]:\n",
224-
" rect = rectangles[sample_index]\n",
225-
" binvalue = binvalues[sample_index]\n",
226-
" rect.set_y(offset)\n",
227-
" rect.set_height(binvalue)\n",
228-
" offset += rect.get_height()\n",
229-
"\n",
230-
" fig.canvas.draw()\n",
231-
"\n",
232-
"\n",
233-
"def plot(ax=None, order=[3, 2, 1, 0], **par_settings):\n",
234-
" pars = pyhf.tensorlib.astensor(pdf.config.suggested_init())\n",
235-
" for k, v in par_settings.items():\n",
236-
" pars[par_name_dict[k]] = v\n",
237-
"\n",
238-
" mc_counts = get_mc_counts(pars)\n",
239-
" bottom = None\n",
240-
" # nb: bar_data[0] because evaluating only one parset\n",
241-
" for i, sample_index in enumerate(order):\n",
242-
" data = mc_counts[sample_index][0]\n",
243-
" x = np.arange(len(data))\n",
244-
" ax.bar(x, data, 1, bottom=bottom, alpha=1.0)\n",
245-
" bottom = data if i == 0 else bottom + data\n",
246-
" ax.scatter(\n",
247-
" x, workspace.data(pdf, include_auxdata=False), c=\"k\", alpha=1.0, zorder=99\n",
248-
" )"
249-
]
177+
"source": "def get_mc_counts(pars):\n deltas, factors = pdf.modifications(pars)\n allsum = pyhf.tensorlib.concatenate(\n deltas + [pyhf.tensorlib.astensor(pdf.nominal_rates)]\n )\n nom_plus_delta = pyhf.tensorlib.sum(allsum, axis=0)\n nom_plus_delta = pyhf.tensorlib.reshape(\n nom_plus_delta, (1,) + pyhf.tensorlib.shape(nom_plus_delta)\n )\n allfac = pyhf.tensorlib.concatenate(factors + [nom_plus_delta])\n return pyhf.tensorlib.product(allfac, axis=0)\n\n\nanimate_plot_pieces = None\n\n\ndef init_plot(fig, ax, par_settings):\n global animate_plot_pieces\n\n nbins = sum(list(pdf.config.channel_nbins.values()))\n x = np.arange(nbins)\n data = np.zeros(nbins)\n items = []\n for i in [3, 2, 1, 0]:\n items.append(ax.bar(x, data, 1, alpha=1.0))\n animate_plot_pieces = (\n items,\n ax.scatter(\n x, workspace.data(pdf, include_auxdata=False), c=\"k\", alpha=1.0, zorder=99\n ),\n )\n\n\ndef animate(ax=None, fig=None, **par_settings):\n global animate_plot_pieces\n items, _ = animate_plot_pieces\n pars = pyhf.tensorlib.astensor(pdf.config.suggested_init())\n for k, v in par_settings.items():\n pars[par_name_dict[k]] = v\n\n mc_counts = get_mc_counts(pars)\n rectangle_collection = zip(*map(lambda x: x.patches, items))\n\n for rectangles, binvalues in zip(rectangle_collection, mc_counts[:, 0].T):\n offset = 0\n for sample_index in [3, 2, 1, 0]:\n rect = rectangles[sample_index]\n binvalue = binvalues[sample_index]\n rect.set_y(offset)\n rect.set_height(binvalue)\n offset += rect.get_height()\n\n fig.canvas.draw()\n\n\ndef plot(ax=None, order=[3, 2, 1, 0], **par_settings):\n pars = pyhf.tensorlib.astensor(pdf.config.suggested_init())\n for k, v in par_settings.items():\n pars[par_name_dict[k]] = v\n\n mc_counts = get_mc_counts(pars)\n bottom = None\n # nb: bar_data[0] because evaluating only one parset\n for i, sample_index in enumerate(order):\n data = mc_counts[sample_index][0]\n x = np.arange(len(data))\n ax.bar(x, data, 1, bottom=bottom, alpha=1.0)\n bottom = data if i == 0 else bottom + data\n ax.scatter(\n x, workspace.data(pdf, include_auxdata=False), c=\"k\", alpha=1.0, zorder=99\n )"
250178
},
251179
{
252180
"cell_type": "markdown",

docs/likelihood.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,7 @@ constraint terms are derived implicitly as each type of modifier
115115
unambiguously defines the constraint terms it requires. Correlated shape
116116
modifiers and normalisation uncertainties have compatible constraint
117117
terms and thus modifiers can be declared that *share* parameters by
118-
re-using a name [1]_ for multiple modifiers. That is, a variation of a
118+
reusing a name [1]_ for multiple modifiers. That is, a variation of a
119119
single parameter causes a shift within sample rates due to both shape
120120
and normalisation variations.
121121

src/pyhf/infer/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -177,7 +177,7 @@ def hypotest(
177177
teststat, sig_plus_bkg_distribution, bkg_only_distribution
178178
)
179179
)
180-
CLsb_exp, CLb_exp, CLs_exp = calc.expected_pvalues(
180+
CLsb_exp, _CLb_exp, CLs_exp = calc.expected_pvalues(
181181
sig_plus_bkg_distribution, bkg_only_distribution
182182
)
183183

src/pyhf/infer/calculators.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -524,7 +524,6 @@ def expected_pvalues(self, sig_plus_bkg_distribution, bkg_only_distribution):
524524
:math:`\mathrm{CL}_{b}`, and :math:`\mathrm{CL}_{s}`.
525525
"""
526526
# Calling pvalues is easier then repeating the CLs calculation here
527-
tb, _ = get_backend()
528527
return list(
529528
map(
530529
list,

src/pyhf/infer/test_statistics.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ def _qmu_like(
2323
If the lower bound of the POI is 0 this automatically implements
2424
qmu_tilde. Otherwise this is qmu (no tilde).
2525
"""
26-
tensorlib, optimizer = get_backend()
26+
tensorlib, _ = get_backend()
2727
tmu_like_stat, (mubhathat, muhatbhat) = _tmu_like(
2828
mu, data, pdf, init_pars, par_bounds, fixed_params, return_fitted_pars=True
2929
)
@@ -44,7 +44,7 @@ def _tmu_like(
4444
If the lower bound of the POI is 0 this automatically implements
4545
tmu_tilde. Otherwise this is tmu (no tilde).
4646
"""
47-
tensorlib, optimizer = get_backend()
47+
tensorlib, _ = get_backend()
4848
mubhathat, fixed_poi_fit_lhood_val = fixed_poi_fit(
4949
mu, data, pdf, init_pars, par_bounds, fixed_params, return_fitted_val=True
5050
)
@@ -515,7 +515,7 @@ def q0(mu, data, pdf, init_pars, par_bounds, fixed_params, return_fitted_pars=Fa
515515
)
516516
mu = 0.0
517517

518-
tensorlib, optimizer = get_backend()
518+
tensorlib, _ = get_backend()
519519

520520
tmu_like_stat, (mubhathat, muhatbhat) = _tmu_like(
521521
mu, data, pdf, init_pars, par_bounds, fixed_params, return_fitted_pars=True

src/pyhf/optimize/opt_jax.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,6 @@ def wrap_objective(objective, data, pdf, stitch_pars, do_grad=False, jit_pieces=
4545
Returns:
4646
objective_and_grad (:obj:`func`): tensor backend wrapped objective,gradient pair
4747
"""
48-
tensorlib, _ = get_backend()
4948
# NB: tuple arguments that need to be hashable (static_argnums)
5049
if do_grad:
5150

src/pyhf/readxml.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -283,9 +283,13 @@ def process_data(
283283

284284

285285
def process_channel(
286-
channelxml: ET.ElementTree, resolver: ResolverType, track_progress: bool = False
286+
channelxml: ET.ElementTree[ET.Element[str]],
287+
resolver: ResolverType,
288+
track_progress: bool = False,
287289
) -> tuple[str, list[float], list[Sample], list[Parameter]]:
288290
channel = channelxml.getroot()
291+
if channel is None:
292+
raise RuntimeError("Root element of ElementTree is missing.")
289293

290294
inputfile = channel.attrib.get('InputFile', '')
291295
histopath = channel.attrib.get('HistoPath', '')
@@ -316,7 +320,7 @@ def process_channel(
316320

317321

318322
def process_measurements(
319-
toplvl: ET.ElementTree,
323+
toplvl: ET.ElementTree[ET.Element[str]],
320324
other_parameter_configs: Sequence[Parameter] | None = None,
321325
) -> list[Measurement]:
322326
"""

0 commit comments

Comments
 (0)