From 73b8d96035695b1fa91e5b6705720d1e72d64d82 Mon Sep 17 00:00:00 2001 From: Amy Thompson <52806925+amyjaynethompson@users.noreply.github.com> Date: Tue, 18 Jun 2024 10:36:58 +0100 Subject: [PATCH 01/40] With clustering tables (#2671) migrated over the rest of the clustering code from xia2.multiplex and added cluster tables to output --- newsfragments/2671.feature | 1 + src/dials/algorithms/correlation/analysis.py | 87 ++++++++++++++++++-- src/dials/algorithms/correlation/cluster.py | 53 ++++++++++++ src/dials/command_line/correlation_matrix.py | 2 + src/dials/templates/clusters.html | 6 ++ 5 files changed, 144 insertions(+), 5 deletions(-) create mode 100644 newsfragments/2671.feature create mode 100644 src/dials/algorithms/correlation/cluster.py diff --git a/newsfragments/2671.feature b/newsfragments/2671.feature new file mode 100644 index 0000000000..8daf56a644 --- /dev/null +++ b/newsfragments/2671.feature @@ -0,0 +1 @@ +``dials.correlation_matrix``: Add tables with cluster information to html output diff --git a/src/dials/algorithms/correlation/analysis.py b/src/dials/algorithms/correlation/analysis.py index cb73896eb1..cf41589e0e 100644 --- a/src/dials/algorithms/correlation/analysis.py +++ b/src/dials/algorithms/correlation/analysis.py @@ -1,5 +1,6 @@ from __future__ import annotations +import copy import json import logging import sys @@ -12,11 +13,14 @@ import iotbx.phil from dxtbx.model import ExperimentList from libtbx.phil import scope_extract +from scitbx.array_family import flex +from dials.algorithms.correlation.cluster import ClusterInfo from dials.algorithms.correlation.plots import linkage_matrix_to_dict, to_plotly_json from dials.algorithms.symmetry.cosym import CosymAnalysis from dials.algorithms.symmetry.cosym.plots import plot_coords, plot_rij_histogram from dials.array_family.flex import reflection_table +from dials.util import tabulate from dials.util.exclude_images import get_selection_for_valid_image_ranges from dials.util.filter_reflections import filtered_arrays_from_experiments_reflections from dials.util.multi_dataset_handling import select_datasets_on_identifiers @@ -53,6 +57,7 @@ def __init__( experiments: ExperimentList, reflections: list[reflection_table], params: scope_extract = None, + ids_to_identifiers_map: dict = None, ): """ Set up the required cosym preparations for determining the correlation matricies @@ -70,6 +75,7 @@ def __init__( params = phil_scope.extract() self.params = params self._reflections = [] + self.ids_to_identifiers_map = ids_to_identifiers_map if len(reflections) == len(experiments): for refl, expt in zip(reflections, experiments): @@ -87,9 +93,14 @@ def __init__( ) # Used for optional json creation that is in a format friendly for import and analysis (for future development) - self.ids_to_identifiers_map = {} - for table in self._reflections: - self.ids_to_identifiers_map.update(table.experiment_identifiers()) + # Also to retain dataset ids when used in multiplex + if self.ids_to_identifiers_map is None: + self.ids_to_identifiers_map = {} + for table in self._reflections: + self.ids_to_identifiers_map.update(table.experiment_identifiers()) + + self.labels = list(dict.fromkeys(self.ids_to_identifiers_map)) + self._labels_all = flex.size_t(self.labels) # Filter reflections that do not meet partiality threshold or default I/Sig(I) criteria @@ -100,6 +111,8 @@ def __init__( partiality_threshold=params.partiality_threshold, ) + self.unmerged_datasets = datasets + # Merge intensities to prepare for cosym analysis self.datasets = self._merge_intensities(datasets) @@ -183,10 +196,26 @@ def calculate_matrices(self): ) = self.compute_correlation_coefficient_matrix( self.cosym_analysis.target.rij_matrix ) + + self.correlation_clusters = self.cluster_info( + linkage_matrix_to_dict(self.cc_linkage_matrix) + ) + + logger.info("\nIntensity correlation clustering summary:") + self.cc_table = ClusterInfo.as_table(self.correlation_clusters) + logger.info(tabulate(self.cc_table, headers="firstrow", tablefmt="rst")) self.cos_angle, self.cos_linkage_matrix = self.compute_cos_angle_matrix( self.cosym_analysis.coords ) + self.cos_angle_clusters = self.cluster_info( + linkage_matrix_to_dict(self.cos_linkage_matrix) + ) + + logger.info("\nCos(angle) clustering summary:") + self.cos_table = ClusterInfo.as_table(self.cos_angle_clusters) + logger.info(tabulate(self.cos_table, headers="firstrow", tablefmt="rst")) + @staticmethod def compute_correlation_coefficient_matrix( correlation_matrix: np.ndarray, @@ -203,7 +232,7 @@ def compute_correlation_coefficient_matrix( """ - logger.info("\nCalculating Correlation Matrix (rij matrix - see dials.cosym)\n") + logger.info("\nCalculating Correlation Matrix (rij matrix - see dials.cosym)") # Make diagonals equal to 1 (each dataset correlated with itself) np.fill_diagonal(correlation_matrix, 1) @@ -243,7 +272,7 @@ def compute_cos_angle_matrix( """ logger.info( - "Calculating Cos Angle Matrix from optimised cosym coordinates (see dials.cosym)\n" + "\nCalculating Cos Angle Matrix from optimised cosym coordinates (see dials.cosym)" ) # Convert coordinates to cosine distances and then reversed so closer cosine distances have higher values to match CC matrix @@ -255,6 +284,52 @@ def compute_cos_angle_matrix( return cos_angle, cos_linkage_matrix + def cluster_info(self, cluster_dict: dict) -> list: + """ + Generate list of cluster objects with associated statistics. + Args: + cluster_dict(dict): dictionary of clusters (generated from linkage_matrix_to_dict) + + Returns: + info(list): list of ClusterInfo objects to describe all clusters of a certain type (ie correlation or cos angle) + """ + + info = [] + for cluster_id, cluster in cluster_dict.items(): + uc_params = [flex.double() for i in range(6)] + for j in cluster["datasets"]: + uc_j = self.datasets[j - 1].unit_cell().parameters() + for i in range(6): + uc_params[i].append(uc_j[i]) + average_uc = [flex.mean(uc_params[i]) for i in range(6)] + intensities_cluster = [] + labels_cluster = [] + ids = [self._labels_all[id - 1] for id in cluster["datasets"]] + for idx, k in zip(self._labels_all, self.unmerged_datasets): + if idx in ids: + intensities_cluster.append(k) + labels_cluster.append(idx) + merged = None + for d in intensities_cluster: + if merged is None: + merged = copy.deepcopy(d) + else: + merged = merged.concatenate(d, assert_is_similar_symmetry=False) + merging = merged.merge_equivalents() + merged_intensities = merging.array() + multiplicities = merging.redundancies() + info.append( + ClusterInfo( + cluster_id, + labels_cluster, + flex.mean(multiplicities.data().as_double()), + merged_intensities.completeness(), + unit_cell=average_uc, + height=cluster.get("height"), + ) + ) + return info + def convert_to_html_json(self): """ Prepares the required dataset tables and converts analysis into the required format for HTML output. @@ -265,11 +340,13 @@ def convert_to_html_json(self): self.cc_json = to_plotly_json( self.correlation_matrix, self.cc_linkage_matrix, + labels=self.labels, matrix_type="correlation", ) self.cos_json = to_plotly_json( self.cos_angle, self.cos_linkage_matrix, + labels=self.labels, matrix_type="cos_angle", ) diff --git a/src/dials/algorithms/correlation/cluster.py b/src/dials/algorithms/correlation/cluster.py new file mode 100644 index 0000000000..ca271481ba --- /dev/null +++ b/src/dials/algorithms/correlation/cluster.py @@ -0,0 +1,53 @@ +from __future__ import annotations + +from libtbx.str_utils import wordwrap + + +class ClusterInfo: + def __init__( + self, cluster_id, labels, multiplicity, completeness, unit_cell, height=None + ): + self.cluster_id = cluster_id + self.labels = labels + self.multiplicity = multiplicity + self.completeness = completeness + self.unit_cell = unit_cell + self.height = height + + def __str__(self): + lines = [ + "Cluster %i" % self.cluster_id, + " Number of datasets: %i" % len(self.labels), + " Completeness: %.1f %%" % (self.completeness * 100), + " Multiplicity: %.2f" % self.multiplicity, + " Datasets:" + ",".join("%s" % s for s in self.labels), + ] + if self.height is not None: + lines.append(" height: %f" % self.height) + return "\n".join(lines) + + @staticmethod + def as_table(cluster_info): + headers = [ + "Cluster", + "No. datasets", + "Datasets", + "Height", + "Multiplicity", + "Completeness", + ] + rows = [] + for info in cluster_info: + rows.append( + [ + "%i" % info.cluster_id, + "%i" % len(info.labels), + wordwrap(" ".join("%s" % l for l in info.labels)), + "%.2g" % info.height, + "%.1f" % info.multiplicity, + "%.2f" % info.completeness, + ] + ) + + rows.insert(0, headers) + return rows diff --git a/src/dials/command_line/correlation_matrix.py b/src/dials/command_line/correlation_matrix.py index 1248a79230..d19c78c65e 100644 --- a/src/dials/command_line/correlation_matrix.py +++ b/src/dials/command_line/correlation_matrix.py @@ -132,7 +132,9 @@ def run(args=None): html = template.stream( page_title="DIALS Correlation Matrix", cc_cluster_json=matrices.cc_json, + cc_cluster_table=matrices.cc_table, cos_angle_cluster_json=matrices.cos_json, + cos_angle_cluster_table=matrices.cos_table, image_range_tables=[matrices.table_list], cosym_graphs=matrices.rij_graphs, ) diff --git a/src/dials/templates/clusters.html b/src/dials/templates/clusters.html index d3f0fd1352..92a8d0cdf9 100644 --- a/src/dials/templates/clusters.html +++ b/src/dials/templates/clusters.html @@ -41,6 +41,9 @@

+ {{ macros.table(cc_cluster_table, + has_column_header=true, + has_row_header=true) }} {{ macros.plotly_graph("cc_cluster", cc_cluster_json, style="dendrogram-plot") }}
@@ -57,6 +60,9 @@

+ {{ macros.table(cos_angle_cluster_table, + has_column_header=true, + has_row_header=true) }} {{ macros.plotly_graph("cos_angle_cluster", cos_angle_cluster_json, style="dendrogram-plot") }}
From dcfd0fac6fed25603f5cd5d49314020489f40435 Mon Sep 17 00:00:00 2001 From: Nicholas Devenish Date: Tue, 18 Jun 2024 14:39:10 +0100 Subject: [PATCH 02/40] MNT: Add libGLU to linux dependencies Although the conda-forge CDT takes care of the GL dependency, we were still relying on libGLU to be present/usable from the conda environment. If this wasn't, then gltbx would fall back to including system libraries, which caused failures due to conflicting versions of other libraries. --- .conda-envs/linux.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/.conda-envs/linux.txt b/.conda-envs/linux.txt index aa9f3300a8..c4b5eeabbc 100644 --- a/.conda-envs/linux.txt +++ b/.conda-envs/linux.txt @@ -17,6 +17,7 @@ conda-forge::iota conda-forge::jinja2 conda-forge::libboost-devel conda-forge::libboost-python-devel +conda-forge::libglu conda-forge::matplotlib-base>=3.0.2 conda-forge::mesa-libgl-devel-cos7-x86_64 conda-forge::mrcfile From a513fee11dd4bb02c5d47241f93ec03d6dcb1266 Mon Sep 17 00:00:00 2001 From: James Beilsten-Edmands <30625594+jbeilstenedmands@users.noreply.github.com> Date: Tue, 18 Jun 2024 15:31:08 +0100 Subject: [PATCH 03/40] Don't modify in place for flex.reflection_table.concat (#2679) --- newsfragments/2679.bugfix | 1 + src/dials/array_family/flex_ext.py | 8 ++++---- tests/array_family/test_reflection_table.py | 9 +++++---- 3 files changed, 10 insertions(+), 8 deletions(-) create mode 100644 newsfragments/2679.bugfix diff --git a/newsfragments/2679.bugfix b/newsfragments/2679.bugfix new file mode 100644 index 0000000000..2d9a35c7b1 --- /dev/null +++ b/newsfragments/2679.bugfix @@ -0,0 +1 @@ +Fix flex.reflection_table.concat to not modify in-place. diff --git a/src/dials/array_family/flex_ext.py b/src/dials/array_family/flex_ext.py index cf40979054..164474a346 100644 --- a/src/dials/array_family/flex_ext.py +++ b/src/dials/array_family/flex_ext.py @@ -531,10 +531,10 @@ def concat( from dials.util.multi_dataset_handling import renumber_table_id_columns tables = renumber_table_id_columns(tables) - first = tables[0] - for table in tables[1:]: - first.extend(table) - return first + new = dials_array_family_flex_ext.reflection_table() + for table in tables: + new.extend(table) + return new def match_with_reference(self, other): """ diff --git a/tests/array_family/test_reflection_table.py b/tests/array_family/test_reflection_table.py index c63dc70801..1fbf2c9223 100644 --- a/tests/array_family/test_reflection_table.py +++ b/tests/array_family/test_reflection_table.py @@ -1602,11 +1602,12 @@ def test_concat(): ids2[0] = "c" ids2[1] = "d" - table1 = flex.reflection_table.concat([table1, table2]) + table3 = flex.reflection_table.concat([table1, table2]) + ids3 = dict(table3.experiment_identifiers()) - assert list(table1["id"]) == [0, 0, 1, 1, 2, 2, 3, 3] - assert list(ids1.keys()) == [0, 1, 2, 3] - assert list(ids1.values()) == ["a", "b", "c", "d"] + assert list(table3["id"]) == [0, 0, 1, 1, 2, 2, 3, 3] + assert list(ids3.keys()) == [0, 1, 2, 3] + assert list(ids3.values()) == ["a", "b", "c", "d"] # test empty tables table1 = flex.reflection_table() From a592df1169541030a4c8707e12d7695d1604e39b Mon Sep 17 00:00:00 2001 From: Nicholas Devenish Date: Tue, 18 Jun 2024 16:41:12 +0100 Subject: [PATCH 04/40] MNT: Disable broken 3d-threaded-integrator test --- tests/command_line/test_integrate.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/tests/command_line/test_integrate.py b/tests/command_line/test_integrate.py index 200f8816bc..36147348c0 100644 --- a/tests/command_line/test_integrate.py +++ b/tests/command_line/test_integrate.py @@ -143,10 +143,7 @@ def test_basic_blocking_options(dials_data, tmp_path, block_size, block_units): assert not result.returncode and not result.stderr -@pytest.mark.skipif( - (datetime.date.today() < datetime.date(2024, 6, 5)), - reason="Temporary skip for test that started to fail on Azure pipelines", -) +@pytest.mark.skip(reason="3d threaded integrator") def test_basic_threaded_integrate(dials_data, tmp_path): """Test the threaded integrator on single imageset data.""" From 64e6b8e3498f0b6297a1fdeeef1417964ca6f814 Mon Sep 17 00:00:00 2001 From: Nicholas Devenish Date: Tue, 18 Jun 2024 22:24:41 +0100 Subject: [PATCH 05/40] MNT: Restrict wxPython build version on macOS This is non-ideal, but build_number=6 dropped support for older macOS systems (earlier than Big Sur). Pin this here, and work on dropping other platforms after this release. --- .conda-envs/macos.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.conda-envs/macos.txt b/.conda-envs/macos.txt index 3977f7ad20..108cf8d281 100644 --- a/.conda-envs/macos.txt +++ b/.conda-envs/macos.txt @@ -52,6 +52,6 @@ conda-forge::sqlite conda-forge::tabulate conda-forge::tqdm conda-forge::urllib3 -conda-forge::wxpython>=4.2.0 +conda-forge::wxpython>=4.2.0=*_5 conda-forge::xz conda-forge::zlib From 89c917f72bfa6a239c4188e1bd6a6268cca1f331 Mon Sep 17 00:00:00 2001 From: Kevin Dalton Date: Wed, 19 Jun 2024 09:46:33 -0400 Subject: [PATCH 06/40] Fix Pinkindexer Bug (#2680) Fix a bug in the PinkIndexer lattice search code which was preventing it from indexing Micro ED test data. * format black * News * Rename newsfragments/xxx.bugfix to newsfragments/2680.bugfix --------- Co-authored-by: Kevin Dalton Co-authored-by: David Waterman Co-authored-by: DiamondLightSource-build-server --- newsfragments/2680.bugfix | 1 + .../indexing/lattice_search/pinkindexer.py | 69 ++++++++----------- tests/command_line/test_integrate.py | 1 - 3 files changed, 28 insertions(+), 43 deletions(-) create mode 100644 newsfragments/2680.bugfix diff --git a/newsfragments/2680.bugfix b/newsfragments/2680.bugfix new file mode 100644 index 0000000000..2d97359538 --- /dev/null +++ b/newsfragments/2680.bugfix @@ -0,0 +1 @@ +``dials.index``: Fix a bug in the ``pink_indexer`` method that caused failures for images with electron diffraction geometry. diff --git a/src/dials/algorithms/indexing/lattice_search/pinkindexer.py b/src/dials/algorithms/indexing/lattice_search/pinkindexer.py index 4a11c029e2..b653e23767 100644 --- a/src/dials/algorithms/indexing/lattice_search/pinkindexer.py +++ b/src/dials/algorithms/indexing/lattice_search/pinkindexer.py @@ -50,11 +50,12 @@ def rotvec_to_quaternion(rotvec, deg=False, eps=1e-32): if deg: alpha = np.deg2rad(alpha) a2 = 0.5 * alpha + sa2 = np.sin(a2) w = np.cos(a2) - x = np.sin(a2) * ax[..., 0] - y = np.sin(a2) * ax[..., 1] - z = np.sin(a2) * ax[..., 2] - return np.stack((w, x, y, z), axis=-1) + x = sa2 * ax[..., 0] + y = sa2 * ax[..., 1] + z = sa2 * ax[..., 2] + return np.stack((x, y, z, w), axis=-1) def quaternion_multiply(a, b): @@ -62,24 +63,24 @@ def quaternion_multiply(a, b): Multiply two quaternions, return a*b """ # Expand a - aw = a[..., 0] - ax = a[..., 1] - ay = a[..., 2] - az = a[..., 3] + ax = a[..., 0] + ay = a[..., 1] + az = a[..., 2] + aw = a[..., 3] # Expand b - bw = b[..., 0] - bx = b[..., 1] - by = b[..., 2] - bz = b[..., 3] + bx = b[..., 0] + by = b[..., 1] + bz = b[..., 2] + bw = b[..., 3] # Calculate result - w = aw * bw - ax * bx - ay * by - az * bz x = aw * bx + ax * bw + ay * bz - az * by y = aw * by - ax * bz + ay * bw + az * bx z = aw * bz + ax * by - ay * bx + az * bw + w = aw * bw - ax * bx - ay * by - az * bz - return np.dstack((w, x, y, z)) + return np.dstack((x, y, z, w)) def norm2(array, axis=-1, keepdims=False): @@ -288,12 +289,13 @@ def index_pink( # Discretize scaled rotvecs scale_max = np.arctan(np.pi / 4.0) bins = np.linspace(-scale_max, scale_max, voxel_grid_points) - # This is how you would calculate the bin centers if you cared to extract the rotation matrix directly - # bin_centers = np.concatenate( - # (bins[[0]], 0.5 * (bins[1:] + bins[:-1]), bins[[-1]]) - # ) idx = np.digitize(scaled_rotvec, bins) + # This is how to calculate the bin centers to extract the rotation matrix from the voxel grid + bin_centers = np.concatenate( + (bins[[0]], 0.5 * (bins[1:] + bins[:-1]), bins[[-1]]) + ) + # Map discretized rotvecs into voxel grid n = voxel_grid_points + 1 voxels = np.zeros((n, n, n), dtype=int_dtype) @@ -319,30 +321,13 @@ def index_pink( cutoff = np.sort(voxels.flatten())[-min_lattices] peaks = np.column_stack(np.where(voxels >= cutoff)) for peak in peaks: - assignment = np.zeros_like(mask) - assignment[i, j] = (idx == peak).all(-1).any(-1) - refl_id, miller_id = np.where(assignment) - - # Use a bootstrap approach to estimate the UB matrix - n = len(refl_id) - num_bootstraps = 100 - bs_idx = np.random.choice(n, (num_bootstraps, n)) - - refl_id = refl_id[bs_idx] - miller_id = miller_id[bs_idx] - - h = Hall[miller_id] - s = s1_hat[refl_id] - - # Assign everything to be the nominal wavelength - wav = self.wav_peak - k = np.reciprocal(wav) - UB = ( - k - * (s - s0_hat).transpose(0, 2, 1) - @ np.linalg.pinv(h.transpose(0, 2, 1)) - ) - yield UB.mean(0) + v = bin_centers[peak] + l = norm2(v) + theta = np.tan(l) * 4.0 + rotvec = theta * v / l + U = Rotation.from_rotvec(rotvec).as_matrix() + UB = U @ self.B + yield UB class PinkIndexer(Strategy): diff --git a/tests/command_line/test_integrate.py b/tests/command_line/test_integrate.py index 36147348c0..530ef990de 100644 --- a/tests/command_line/test_integrate.py +++ b/tests/command_line/test_integrate.py @@ -1,6 +1,5 @@ from __future__ import annotations -import datetime import json import math import os From d7af803a43f1086509920d535a8051245fc26e82 Mon Sep 17 00:00:00 2001 From: DiamondLightSource-build-server Date: Wed, 19 Jun 2024 08:59:24 +0100 Subject: [PATCH 07/40] DIALS 3.20.0 Changelog towncrier --name=DIALS --version='3.20.0' --- CHANGELOG.rst | 33 +++++++++++++++++++++++++++++++++ newsfragments/2613.misc | 1 - newsfragments/2616.misc | 1 - newsfragments/2632.feature | 1 - newsfragments/2641.misc | 1 - newsfragments/2651.feature | 1 - newsfragments/2654.feature | 1 - newsfragments/2658.bugfix | 1 - newsfragments/2660.bugfix | 1 - newsfragments/2664.bugfix | 1 - newsfragments/2666.feature | 1 - newsfragments/2667.misc | 1 - newsfragments/2668.bugfix | 1 - newsfragments/2670.bugfix | 1 - newsfragments/2671.feature | 1 - newsfragments/2672.feature | 1 - newsfragments/2673.bugfix | 1 - newsfragments/2674.bugfix | 1 - newsfragments/2679.bugfix | 1 - 19 files changed, 33 insertions(+), 18 deletions(-) delete mode 100644 newsfragments/2613.misc delete mode 100644 newsfragments/2616.misc delete mode 100644 newsfragments/2632.feature delete mode 100644 newsfragments/2641.misc delete mode 100644 newsfragments/2651.feature delete mode 100644 newsfragments/2654.feature delete mode 100644 newsfragments/2658.bugfix delete mode 100644 newsfragments/2660.bugfix delete mode 100644 newsfragments/2664.bugfix delete mode 100644 newsfragments/2666.feature delete mode 100644 newsfragments/2667.misc delete mode 100644 newsfragments/2668.bugfix delete mode 100644 newsfragments/2670.bugfix delete mode 100644 newsfragments/2671.feature delete mode 100644 newsfragments/2672.feature delete mode 100644 newsfragments/2673.bugfix delete mode 100644 newsfragments/2674.bugfix delete mode 100644 newsfragments/2679.bugfix diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 19d271eed8..24d6876586 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,36 @@ +DIALS 3.20.0 (2024-06-19) +========================= + +Features +-------- + +- New tool: ``dials.correlation_matrix``: A new command-line tool for correlation and cosine similarity clustering of multi-crystal datasets, independent of ``xia2.multiplex``. It provides HTML output, including clustering heatmaps, dendrograms and corresponding ``dials.cosym`` graphs. (`#2632 `_) +- DIALS is now compatible with Python 3.12. (`#2651 `_) +- ``dials.scale``: Add filtering options to default basic error model to allow error modelling of stills data. (`#2654 `_) +- ``dials.cosym``: Add alternative weighting during cosym CC calculation with a new ``cc_weights=`` option. (`#2666 `_) +- ``dials.correlation_matrix``: Add tables with cluster information to html output. (`#2671 `_) +- New tool: ``dials.split_still_data`` for splitting dials-processed still data based on image number (e.g. dose series). (`#2672 `_) + + +Bugfixes +-------- + +- ``dials.find_rotation_axis``: Correctly set the orientation of the rotation axis for a multi-axis goniometer. (`#2658 `_) +- Fix ``dials.show`` beam checks for time of flight experiments. (`#2660 `_) +- When masking, raise an error if ``d_min > d_max`` (where no spots would be found). (`#2664 `_) +- ``dials.cosym``: Make function return structure correctly, in the recently added ``cc_weights=`` option. (`#2668 `_) +- ``dials.find_rotation_axis``: Add reflection selection criteria, to avoid runs that use a very large amount of memory. (`#2670 `_) +- ``dials.cosym``: For ``cc_weights=sigma``, ensure correct filtering based on min_pairs parameters. (`#2673 `_) +- ``dials.cosym``: Fix to give more accurate cc calculation when running with a ``space_group=`` set. (`#2674 `_) +- Fix ``flex.reflection_table.concat``, to not modify in-place. (`#2679 `_) + + +Misc +---- + +- `#2613 `_, `#2616 `_, `#2641 `_, `#2667 `_ + + DIALS 3.19.1 (2024-05-23) ========================= diff --git a/newsfragments/2613.misc b/newsfragments/2613.misc deleted file mode 100644 index 45974be105..0000000000 --- a/newsfragments/2613.misc +++ /dev/null @@ -1 +0,0 @@ -Update cctbx-base dependency to 2024 versions. diff --git a/newsfragments/2616.misc b/newsfragments/2616.misc deleted file mode 100644 index 8cc6b8c058..0000000000 --- a/newsfragments/2616.misc +++ /dev/null @@ -1 +0,0 @@ -``dials.import``: add a test for distance overrides for a multi-panel detector. diff --git a/newsfragments/2632.feature b/newsfragments/2632.feature deleted file mode 100644 index 1d4515a7b8..0000000000 --- a/newsfragments/2632.feature +++ /dev/null @@ -1 +0,0 @@ -``dials.correlation_matrix``: A new command-line tool for correlation and cosine similarity clustering of multi-crystal datasets, independent of ``xia2.multiplex``. It provides HTML output, including clustering heatmaps, dendrograms and corresponding ``dials.cosym`` graphs. diff --git a/newsfragments/2641.misc b/newsfragments/2641.misc deleted file mode 100644 index e7fce4ab53..0000000000 --- a/newsfragments/2641.misc +++ /dev/null @@ -1 +0,0 @@ -Remove find_spots_server_client tests. diff --git a/newsfragments/2651.feature b/newsfragments/2651.feature deleted file mode 100644 index 312d214171..0000000000 --- a/newsfragments/2651.feature +++ /dev/null @@ -1 +0,0 @@ -DIALS is now compatible with Python 3.12. diff --git a/newsfragments/2654.feature b/newsfragments/2654.feature deleted file mode 100644 index a46d4d9535..0000000000 --- a/newsfragments/2654.feature +++ /dev/null @@ -1 +0,0 @@ -``dials.scale``: Add filtering options to default basic error model to allow error modelling of stills data diff --git a/newsfragments/2658.bugfix b/newsfragments/2658.bugfix deleted file mode 100644 index 6e9e6f86a0..0000000000 --- a/newsfragments/2658.bugfix +++ /dev/null @@ -1 +0,0 @@ -``dials.find_rotation_axis``: Correctly set the orientation of the rotation axis for a multi-axis goniometer. diff --git a/newsfragments/2660.bugfix b/newsfragments/2660.bugfix deleted file mode 100644 index 5e29f576d1..0000000000 --- a/newsfragments/2660.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix `dials.show` beam checks for time of flight experiments. diff --git a/newsfragments/2664.bugfix b/newsfragments/2664.bugfix deleted file mode 100644 index 99af372143..0000000000 --- a/newsfragments/2664.bugfix +++ /dev/null @@ -1 +0,0 @@ -d_min, d_max in masking: raise error if d_min > d_max so no spots will be found diff --git a/newsfragments/2666.feature b/newsfragments/2666.feature deleted file mode 100644 index 01b33456da..0000000000 --- a/newsfragments/2666.feature +++ /dev/null @@ -1 +0,0 @@ -``dials.cosym``: Add alternative weighting during cosym CC calculation diff --git a/newsfragments/2667.misc b/newsfragments/2667.misc deleted file mode 100644 index 6ea1b19665..0000000000 --- a/newsfragments/2667.misc +++ /dev/null @@ -1 +0,0 @@ -Reset ids in image grouping code to enable use in scripts diff --git a/newsfragments/2668.bugfix b/newsfragments/2668.bugfix deleted file mode 100644 index 00980f672f..0000000000 --- a/newsfragments/2668.bugfix +++ /dev/null @@ -1 +0,0 @@ -``dials.cosym``: Make function return structure correct in recently added cc_weights option diff --git a/newsfragments/2670.bugfix b/newsfragments/2670.bugfix deleted file mode 100644 index 2f76fa6ed4..0000000000 --- a/newsfragments/2670.bugfix +++ /dev/null @@ -1 +0,0 @@ -``dials.find_rotation_axis``: Add reflection selection criteria to avoid runs that use a very large amount of memory. diff --git a/newsfragments/2671.feature b/newsfragments/2671.feature deleted file mode 100644 index 8daf56a644..0000000000 --- a/newsfragments/2671.feature +++ /dev/null @@ -1 +0,0 @@ -``dials.correlation_matrix``: Add tables with cluster information to html output diff --git a/newsfragments/2672.feature b/newsfragments/2672.feature deleted file mode 100644 index e410954029..0000000000 --- a/newsfragments/2672.feature +++ /dev/null @@ -1 +0,0 @@ -XXX.feature: Add dials.split_still_data for splitting dials-processed still data based on image number (e.g. dose series) diff --git a/newsfragments/2673.bugfix b/newsfragments/2673.bugfix deleted file mode 100644 index 22cb041c81..0000000000 --- a/newsfragments/2673.bugfix +++ /dev/null @@ -1 +0,0 @@ -``dials.cosym``: For cc_weights=sigma, ensure correct filtering based on min_pairs parameters diff --git a/newsfragments/2674.bugfix b/newsfragments/2674.bugfix deleted file mode 100644 index 28100bbc86..0000000000 --- a/newsfragments/2674.bugfix +++ /dev/null @@ -1 +0,0 @@ -``dials.cosym``: Fix to give more accurate cc calculation when running with a space_group set diff --git a/newsfragments/2679.bugfix b/newsfragments/2679.bugfix deleted file mode 100644 index 2d9a35c7b1..0000000000 --- a/newsfragments/2679.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix flex.reflection_table.concat to not modify in-place. From db15ca229f8b832e24a6a2161fd3ef5afb2b4f49 Mon Sep 17 00:00:00 2001 From: David Waterman Date: Mon, 24 Jun 2024 19:00:42 +0100 Subject: [PATCH 08/40] Docstrings and type hints for `reindex_experiments` and `reindex_reflections` (#2683) Add docstrings and type hints for reindex_experiments and reindex_reflections. Fixes #2682 --- newsfragments/2683.bugfix | 3 +++ src/dials/util/reindex.py | 41 +++++++++++++++++++++++++++++++++++---- 2 files changed, 40 insertions(+), 4 deletions(-) create mode 100644 newsfragments/2683.bugfix diff --git a/newsfragments/2683.bugfix b/newsfragments/2683.bugfix new file mode 100644 index 0000000000..c9ffb58523 --- /dev/null +++ b/newsfragments/2683.bugfix @@ -0,0 +1,3 @@ +Docstrings and type hints are added to the ``reindex_experiments`` and +``reindex_reflections`` functions to make it easier to use these +outside the ``dials.reindex`` program. diff --git a/src/dials/util/reindex.py b/src/dials/util/reindex.py index 03fef9c76e..8342cb7bb4 100644 --- a/src/dials/util/reindex.py +++ b/src/dials/util/reindex.py @@ -4,6 +4,7 @@ import logging from cctbx import sgtbx +from dxtbx.model import ExperimentList from rstbx.symmetry.constraints import parameter_reduction from dials.algorithms.scaling.scaling_library import determine_best_unit_cell @@ -91,9 +92,24 @@ def change_of_basis_op_against_reference( return change_of_basis_op -def reindex_experiments(experiments, cb_op, space_group=None): - reindexed_experiments = copy.deepcopy(experiments) +def reindex_experiments( + experiments: ExperimentList, + cb_op: sgtbx.change_of_basis_op, + space_group: sgtbx.space_group | None = None, +) -> ExperimentList: + """ + Reindexes the given experiment list using the provided change of basis operator, and optionally set the space group. + + Args: + experiments (ExperimentList): The list of experiments to reindex. + cb_op (sgtbx.change_of_basis_op): The change of basis operator. + space_group (sgtbx.space_group | None, optional): The space group to set after reindexing. Defaults to None. + + Returns: + ExperimentList: The reindexed experiments. + """ + reindexed_experiments = copy.deepcopy(experiments) for crystal in reindexed_experiments.crystals(): cryst_reindexed = copy.deepcopy(crystal) if space_group is not None: @@ -123,9 +139,26 @@ def reindex_experiments(experiments, cb_op, space_group=None): return reindexed_experiments -def reindex_reflections(reflections, change_of_basis_op, hkl_offset=None): - reflections = flex.reflection_table.concat(reflections) +def reindex_reflections( + reflections: list[flex.reflection_table], + change_of_basis_op: sgtbx.change_of_basis_op, + hkl_offset: list[int] | None = None, +) -> flex.reflection_table: + """ + Reindexes reflection tables based on a change of basis operation and an optional HKL offset. + Args: + reflections (list[flex.reflection_table]): A list of reflection tables to be reindexed. + change_of_basis_op (sgtbx.change_of_basis_op): The change of basis operation to apply. + hkl_offset (list[int] | None, optional): An optional HKL offset to apply. Defaults to None. + + Returns: + flex.reflection_table: The reindexed reflection table. + + Removes reflections whose change of basis results in non-integral indices. + """ + + reflections = flex.reflection_table.concat(reflections) miller_indices = reflections["miller_index"] if hkl_offset is not None: From d04235dcaff221df5ca8f40f6d074aaf642b72ce Mon Sep 17 00:00:00 2001 From: David McDonagh <60879630+toastisme@users.noreply.github.com> Date: Mon, 24 Jun 2024 23:14:21 +0100 Subject: [PATCH 09/40] Enable time-of-flight indexing and Laue/ToF refinement (#2662) * Enable time-of-flight indexing and Laue/time-of-flight refinement. --- newsfragments/2662.feature | 1 + src/dials/algorithms/indexing/indexer.py | 36 +- .../indexing/lattice_search/__init__.py | 2 +- src/dials/algorithms/indexing/max_cell.py | 2 + .../algorithms/indexing/model_evaluation.py | 4 +- .../algorithms/indexing/nearest_neighbor.py | 18 +- .../parameterisation/beam_parameters.py | 142 +++++ .../refinement/parameterisation/configure.py | 28 +- .../parameterisation/prediction_parameters.py | 159 ++++- .../prediction/managed_predictors.py | 72 ++- src/dials/algorithms/refinement/refiner.py | 21 +- .../refinement/reflection_manager.py | 549 ++++++++++++++++-- src/dials/algorithms/refinement/target.py | 188 +++++- .../refinement/weighting_strategies.py | 46 ++ .../algorithms/spot_prediction/__init__.py | 16 + .../boost_python/ray_predictor.cc | 30 + .../boost_python/reflection_predictor.cc | 35 ++ .../boost_python/spot_prediction_ext.cc | 2 + .../spot_prediction/ray_predictor.h | 61 +- .../spot_prediction/reflection_predictor.h | 328 +++++++++++ src/dials/array_family/flex_ext.py | 43 +- .../refinement/test_finite_diffs.py | 388 ++++++++++--- .../refinement/test_orientation_refinement.py | 315 ++++++++-- .../refinement/test_prediction_parameters.py | 283 +++++++-- 24 files changed, 2513 insertions(+), 256 deletions(-) create mode 100644 newsfragments/2662.feature diff --git a/newsfragments/2662.feature b/newsfragments/2662.feature new file mode 100644 index 0000000000..5ae13cb41f --- /dev/null +++ b/newsfragments/2662.feature @@ -0,0 +1 @@ +Add classes to support time-of-flight and Laue indexing and refinement. diff --git a/src/dials/algorithms/indexing/indexer.py b/src/dials/algorithms/indexing/indexer.py index 3b3c1c4bb0..219963e7ae 100644 --- a/src/dials/algorithms/indexing/indexer.py +++ b/src/dials/algorithms/indexing/indexer.py @@ -8,7 +8,7 @@ import iotbx.phil import libtbx from cctbx import sgtbx -from dxtbx.model import ExperimentList, ImageSequence +from dxtbx.model import ExperimentList, ImageSequence, tof_helpers import dials.util from dials.algorithms.indexing import ( @@ -873,7 +873,7 @@ def _xyzcal_mm_to_px(self, experiments, reflections): refined_reflections = reflections.select(imgset_sel) panel_numbers = flex.size_t(refined_reflections["panel"]) xyzcal_mm = refined_reflections["xyzcal.mm"] - x_mm, y_mm, z_rad = xyzcal_mm.parts() + x_mm, y_mm, z = xyzcal_mm.parts() xy_cal_mm = flex.vec2_double(x_mm, y_mm) xy_cal_px = flex.vec2_double(len(xy_cal_mm)) for i_panel in range(len(expt.detector)): @@ -884,10 +884,18 @@ def _xyzcal_mm_to_px(self, experiments, reflections): ) x_px, y_px = xy_cal_px.parts() if expt.scan is not None: - z_px = expt.scan.get_array_index_from_angle(z_rad, deg=False) + if expt.scan.has_property("time_of_flight"): + tof = expt.scan.get_property("time_of_flight") + frames = list(range(len(tof))) + tof_to_frame = tof_helpers.tof_to_frame_interpolator(tof, frames) + z.set_selected(z < min(tof), min(tof)) + z.set_selected(z > max(tof), max(tof)) + z_px = flex.double(tof_to_frame(z)) + else: + z_px = expt.scan.get_array_index_from_angle(z, deg=False) else: # must be a still image, z centroid not meaningful - z_px = z_rad + z_px = z xyzcal_px = flex.vec3_double(x_px, y_px, z_px) reflections["xyzcal.px"].set_selected(imgset_sel, xyzcal_px) @@ -941,6 +949,25 @@ def find_max_cell(self): self.params.max_cell = params.multiplier * max(uc_params[:3]) logger.info("Using max_cell: %.1f Angstrom", self.params.max_cell) else: + + convert_reflections_z_to_deg = True + all_tof_experiments = False + for expt in self.experiments: + if expt.scan is not None and expt.scan.has_property( + "time_of_flight" + ): + all_tof_experiments = True + elif all_tof_experiments: + raise ValueError( + "Cannot find max cell for ToF and non-ToF experiments at the same time" + ) + + if all_tof_experiments: + if params.step_size < 100: + logger.info("Setting default ToF step size to 500 usec") + params.step_size = 500 + convert_reflections_z_to_deg = False + self.params.max_cell = find_max_cell( self.reflections, max_cell_multiplier=params.multiplier, @@ -952,6 +979,7 @@ def find_max_cell(self): filter_ice=params.filter_ice, filter_overlaps=params.filter_overlaps, overlaps_border=params.overlaps_border, + convert_reflections_z_to_deg=convert_reflections_z_to_deg, ).max_cell logger.info("Found max_cell: %.1f Angstrom", self.params.max_cell) diff --git a/src/dials/algorithms/indexing/lattice_search/__init__.py b/src/dials/algorithms/indexing/lattice_search/__init__.py index f579e2bdfc..c4b9ffc8c2 100644 --- a/src/dials/algorithms/indexing/lattice_search/__init__.py +++ b/src/dials/algorithms/indexing/lattice_search/__init__.py @@ -194,7 +194,7 @@ def choose_best_orientation_matrix(self, candidate_orientation_matrices): experiments = ExperimentList() for i_expt, expt in enumerate(self.experiments): # XXX Not sure if we still need this loop over self.experiments - if expt.scan is not None: + if expt.scan is not None and expt.scan.has_property("oscillation"): start, end = expt.scan.get_oscillation_range() if (end - start) > 360: # only use reflections from the first 360 degrees of the scan diff --git a/src/dials/algorithms/indexing/max_cell.py b/src/dials/algorithms/indexing/max_cell.py index 7c45cc6d5d..dccb542abf 100644 --- a/src/dials/algorithms/indexing/max_cell.py +++ b/src/dials/algorithms/indexing/max_cell.py @@ -22,6 +22,7 @@ def find_max_cell( filter_ice=True, filter_overlaps=True, overlaps_border=0, + convert_reflections_z_to_deg=True, ): logger.debug("Finding suitable max_cell based on %i reflections", len(reflections)) # Exclude potential ice-ring spots from nearest neighbour analysis if needed @@ -63,6 +64,7 @@ def find_max_cell( percentile=nearest_neighbor_percentile, histogram_binning=histogram_binning, nn_per_bin=nn_per_bin, + convert_reflections_z_to_deg=convert_reflections_z_to_deg, ) except AssertionError as e: raise DialsIndexError("Failure in nearest neighbour analysis:\n" + str(e)) diff --git a/src/dials/algorithms/indexing/model_evaluation.py b/src/dials/algorithms/indexing/model_evaluation.py index 8e6008dc41..b31801603f 100644 --- a/src/dials/algorithms/indexing/model_evaluation.py +++ b/src/dials/algorithms/indexing/model_evaluation.py @@ -223,7 +223,7 @@ def score_by_volume(self, reverse=False): def score_by_rmsd_xy(self, reverse=False): # smaller rmsds = better rmsd_x, rmsd_y, rmsd_z = flex.vec3_double( - s.rmsds for s in self.all_solutions + s.rmsds[:3] for s in self.all_solutions ).parts() rmsd_xy = flex.sqrt(flex.pow2(rmsd_x) + flex.pow2(rmsd_y)) score = flex.log(rmsd_xy) / math.log(2) @@ -275,7 +275,7 @@ def __str__(self): perm = flex.sort_permutation(combined_scores) rmsd_x, rmsd_y, rmsd_z = flex.vec3_double( - s.rmsds for s in self.all_solutions + s.rmsds[:3] for s in self.all_solutions ).parts() rmsd_xy = flex.sqrt(flex.pow2(rmsd_x) + flex.pow2(rmsd_y)) diff --git a/src/dials/algorithms/indexing/nearest_neighbor.py b/src/dials/algorithms/indexing/nearest_neighbor.py index 4d5de82d88..2e5c0af2e1 100644 --- a/src/dials/algorithms/indexing/nearest_neighbor.py +++ b/src/dials/algorithms/indexing/nearest_neighbor.py @@ -13,6 +13,7 @@ def __init__( percentile=None, histogram_binning="linear", nn_per_bin=5, + convert_reflections_z_to_deg=True, ): self.tolerance = tolerance # Margin of error for max unit cell estimate from scitbx.array_family import flex @@ -28,7 +29,10 @@ def __init__( else: entering_flags = flex.bool(reflections.size(), True) rs_vectors = reflections["rlp"] - phi_deg = reflections["xyzobs.mm.value"].parts()[2] * (180 / math.pi) + + z = reflections["xyzobs.mm.value"].parts()[2] + if convert_reflections_z_to_deg: + z = z * (180 / math.pi) d_spacings = flex.double() # nearest neighbor analysis @@ -38,16 +42,16 @@ def __init__( sel_imageset = reflections["imageset_id"] == imageset_id if sel_imageset.count(True) == 0: continue - phi_min = flex.min(phi_deg.select(sel_imageset)) - phi_max = flex.max(phi_deg.select(sel_imageset)) - d_phi = phi_max - phi_min - n_steps = max(int(math.ceil(d_phi / step_size)), 1) + z_min = flex.min(z.select(sel_imageset)) + z_max = flex.max(z.select(sel_imageset)) + d_z = z_max - z_min + n_steps = max(int(math.ceil(d_z / step_size)), 1) for n in range(n_steps): sel_step = ( sel_imageset - & (phi_deg >= (phi_min + n * step_size)) - & (phi_deg < (phi_min + (n + 1) * step_size)) + & (z >= (z_min + n * step_size)) + & (z < (z_min + (n + 1) * step_size)) ) for entering in (True, False): diff --git a/src/dials/algorithms/refinement/parameterisation/beam_parameters.py b/src/dials/algorithms/refinement/parameterisation/beam_parameters.py index c0531e82fe..ce1deeff3c 100644 --- a/src/dials/algorithms/refinement/parameterisation/beam_parameters.py +++ b/src/dials/algorithms/refinement/parameterisation/beam_parameters.py @@ -156,3 +156,145 @@ def get_state(self): # only a single beam exists, so no multi_state_elt argument is allowed return matrix.col(self._model.get_s0()) + + +class LaueBeamMixin: + """Mix-in class defining some functionality unique to Laue beam parameterisations + that can be shared by static and scan-varying versions""" + + @staticmethod + def _build_p_list(unit_s0, goniometer, parameter_type=Parameter): + """Build the list of parameters, using the parameter_type callback to + select between versions of the Parameter class""" + + # Set up the parameters + if goniometer: + spindle = matrix.col(goniometer.get_rotation_axis()) + unit_s0_plane_dir2 = unit_s0.cross(spindle).normalize() + unit_s0_plane_dir1 = unit_s0_plane_dir2.cross(unit_s0).normalize() + else: + unit_s0_plane_dir1 = unit_s0.ortho().normalize() + unit_s0_plane_dir2 = unit_s0.cross(unit_s0_plane_dir1).normalize() + + # rotation around unit_s0_plane_dir1 + mu1 = parameter_type(0.0, unit_s0_plane_dir1, "angle (mrad)", "Mu1") + # rotation around unit_s0_plane_dir2 + mu2 = parameter_type(0.0, unit_s0_plane_dir2, "angle (mrad)", "Mu2") + + # build the parameter list in a specific, maintained order + p_list = [mu1, mu2] + + return p_list + + @staticmethod + def _compose_core(is0, ipn, mu1, mu2, mu1_axis, mu2_axis): + + # convert angles to radians + mu1rad, mu2rad = mu1 / 1000.0, mu2 / 1000.0 + + # compose rotation matrices and their first order derivatives + Mu1 = (mu1_axis).axis_and_angle_as_r3_rotation_matrix(mu1rad, deg=False) + dMu1_dmu1 = dR_from_axis_and_angle(mu1_axis, mu1rad, deg=False) + + Mu2 = (mu2_axis).axis_and_angle_as_r3_rotation_matrix(mu2rad, deg=False) + dMu2_dmu2 = dR_from_axis_and_angle(mu2_axis, mu2rad, deg=False) + + # compose new state + Mu21 = Mu2 * Mu1 + unit_s0 = (Mu21 * is0).normalize() + pn_new_dir = (Mu21 * ipn).normalize() + + # calculate derivatives of the beam direction wrt angles: + # 1) derivative wrt mu1 + dMu21_dmu1 = Mu2 * dMu1_dmu1 + dunit_s0_new_dir_dmu1 = dMu21_dmu1 * is0 + + # 2) derivative wrt mu2 + dMu21_dmu2 = dMu2_dmu2 * Mu1 + dunit_s0_new_dir_dmu2 = dMu21_dmu2 * is0 + + # calculate derivatives of the attached beam vector, converting + # parameters back to mrad + dunit_s0_dval = [ + dunit_s0_new_dir_dmu1 / 1000.0, + dunit_s0_new_dir_dmu2 / 1000.0, + unit_s0, + ] + + return (unit_s0, pn_new_dir), dunit_s0_dval + + +class LaueBeamParameterisation(ModelParameterisation, LaueBeamMixin): + """A parameterisation of a Laue Beam model, where wavelength is ignored. + + The Beam direction is parameterised using angles expressed in + mrad. A goniometer can be provided (if + present in the experiment) to ensure a consistent definition of the beam + rotation angles with respect to the spindle-beam plane.""" + + def __init__(self, beam, goniometer=None, experiment_ids=None): + """Initialise the BeamParameterisation object + + Args: + beam: A dxtbx PolychromaticBeam object to be parameterised. + goniometer: An optional dxtbx Goniometer object. Defaults to None. + experiment_ids (list): The experiment IDs affected by this + parameterisation. Defaults to None, which is replaced by [0]. + """ + # The state of the beam model consists of the unit s0 vector that it is + # modelling. The initial state is the direction of this vector at the point + # of initialisation, plus the direction of the orthogonal polarization + # normal vector. Future states are composed by rotations around axes + # perpendicular to that direction. + + # Set up the initial state + if experiment_ids is None: + experiment_ids = [0] + unit_s0 = matrix.col(beam.get_unit_s0()) + istate = { + "unit_s0": matrix.col(unit_s0), + "polarization_normal": matrix.col(beam.get_polarization_normal()), + } + + # build the parameter list + p_list = self._build_p_list(unit_s0, goniometer) + + # set up the base class + ModelParameterisation.__init__( + self, beam, istate, p_list, experiment_ids=experiment_ids + ) + + # call compose to calculate all the derivatives + self.compose() + + return + + def compose(self): + + # extract direction from the initial state + ius0 = self._initial_state["unit_s0"] + ipn = self._initial_state["polarization_normal"] + + # extract parameters from the internal list + mu1, mu2 = self._param + + # calculate new s0 and derivatives + (unit_s0, pn), self._dstate_dp = self._compose_core( + ius0, + ipn, + mu1.value, + mu2.value, + mu1_axis=mu1.axis, + mu2_axis=mu2.axis, + ) + + # now update the model with its new s0 and polarization vector + self._model.set_unit_s0(unit_s0) + self._model.set_polarization_normal(pn) + + return + + def get_state(self): + + # only a single beam exists, so no multi_state_elt argument is allowed + return matrix.col(self._model.get_unit_s0()) diff --git a/src/dials/algorithms/refinement/parameterisation/configure.py b/src/dials/algorithms/refinement/parameterisation/configure.py index 2a86f82dd9..91b1c46d84 100644 --- a/src/dials/algorithms/refinement/parameterisation/configure.py +++ b/src/dials/algorithms/refinement/parameterisation/configure.py @@ -4,6 +4,7 @@ import re import libtbx +from dxtbx.model import PolychromaticBeam from libtbx.phil import parse from dials.algorithms.refinement import DialsRefineConfigError @@ -15,11 +16,12 @@ phil_str as sv_phil_str, ) from dials.algorithms.refinement.refinement_helpers import string_sel +from dials.algorithms.refinement.reflection_manager import LaueReflectionManager from dials.algorithms.refinement.restraints.restraints_parameterisation import ( uc_phil_str as uc_restraints_phil_str, ) -from .beam_parameters import BeamParameterisation +from .beam_parameters import BeamParameterisation, LaueBeamParameterisation from .crystal_parameters import ( CrystalOrientationParameterisation, CrystalUnitCellParameterisation, @@ -31,6 +33,7 @@ ) from .goniometer_parameters import GoniometerParameterisation from .prediction_parameters import ( + LauePredictionParameterisation, XYPhiPredictionParameterisation, XYPhiPredictionParameterisationSparse, ) @@ -433,8 +436,15 @@ def _parameterise_beams(options, experiments, analysis): experiment_ids=exp_ids, ) else: - # Parameterise scan static beam, passing the goniometer - beam_param = BeamParameterisation(beam, goniometer, experiment_ids=exp_ids) + if isinstance(beam, PolychromaticBeam): + beam_param = LaueBeamParameterisation( + beam, goniometer, experiment_ids=exp_ids + ) + else: + # Parameterise scan static beam, passing the goniometer + beam_param = BeamParameterisation( + beam, goniometer, experiment_ids=exp_ids + ) # Set the model identifier to name the parameterisation beam_param.model_identifier = f"Beam{ibeam + 1}" @@ -454,7 +464,9 @@ def _parameterise_beams(options, experiments, analysis): fix_list.append("Mu1") if "out_spindle_plane" in options.beam.fix: fix_list.append("Mu2") - if "wavelength" in options.beam.fix: + if "wavelength" in options.beam.fix and not isinstance( + beam, PolychromaticBeam + ): fix_list.append("nu") if fix_list: @@ -818,10 +830,16 @@ def build_prediction_parameterisation( analysis = _centroid_analysis(options, experiments, reflection_manager) # Parameterise each unique model - beam_params = _parameterise_beams(options, experiments, analysis) xl_ori_params, xl_uc_params = _parameterise_crystals(options, experiments, analysis) det_params = _parameterise_detectors(options, experiments, analysis) gon_params = _parameterise_goniometers(options, experiments, analysis) + beam_params = _parameterise_beams(options, experiments, analysis) + + if isinstance(reflection_manager, LaueReflectionManager): + PredParam = LauePredictionParameterisation + return PredParam( + experiments, det_params, beam_params, xl_ori_params, xl_uc_params + ) # Build the prediction equation parameterisation if do_stills: # doing stills diff --git a/src/dials/algorithms/refinement/parameterisation/prediction_parameters.py b/src/dials/algorithms/refinement/parameterisation/prediction_parameters.py index d5b421faa2..f85be0ed11 100644 --- a/src/dials/algorithms/refinement/parameterisation/prediction_parameters.py +++ b/src/dials/algorithms/refinement/parameterisation/prediction_parameters.py @@ -418,8 +418,13 @@ def _get_model_data_for_experiment(self, experiment, reflections): sel = panels == ipanel D.set_selected(sel, D_mat) + if "s0" in reflections: + s0 = reflections["s0"] + else: + s0 = experiment.beam.get_s0() + result = { - "s0": experiment.beam.get_s0(), + "s0": s0, "U": matrix.sqr(experiment.crystal.get_U()), "B": matrix.sqr(experiment.crystal.get_B()), "D": D, @@ -967,3 +972,155 @@ class XYPhiPredictionParameterisationSparse( @staticmethod def _extend_gradient_vectors(results, m, n, keys=("dX_dp", "dY_dp", "dZ_dp")): return SparseGradientVectorMixin._extend_gradient_vectors(results, m, n, keys) + + +class LauePredictionParameterisation(PredictionParameterisation): + """A basic extension to PredictionParameterisation for Laue data, + where gradients for the wavelength of each reflection are also considered.""" + + _grad_names = ("dX_dp", "dY_dp", "dwavelength_dp") + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + return + + def _local_setup(self, reflections): + self._wavelength = reflections["wavelength_cal"] + self._r = self._setting_rotation * self._fixed_rotation * self._UB * self._h + self._s0 = reflections["s0_cal"] + self._e_X_r = (self._setting_rotation * self._axis).cross(self._r) + self._e_r_s0 = (self._e_X_r).dot(self._s0) + self._ds0_dbeam_p = None + return + + def _beam_derivatives( + self, isel, parameterisation=None, ds0_dbeam_p=None, reflections=None + ): + """helper function to extend the derivatives lists by derivatives of the + beam parameterisations.""" + + # Get required data + r = self._r.select(isel) + e_X_r = self._e_X_r.select(isel) + e_r_s0 = self._e_r_s0.select(isel) + D = self._D.select(isel) + + if ds0_dbeam_p is None: + + # get the derivatives of the beam vector wrt the parameters + ds0_dbeam_p = parameterisation.get_ds_dp(use_none_as_null=True) + + ds0_dbeam_p = [ + None if e is None else flex.vec3_double(len(r), e.elems) + for e in ds0_dbeam_p + ] + self._ds0_dbeam_p = ds0_dbeam_p + + dphi_dp = [] + dpv_dp = [] + + # loop through the parameters + for der in ds0_dbeam_p: + + if der is None: + dphi_dp.append(None) + dpv_dp.append(None) + continue + + # calculate the derivative of phi for this parameter + dphi = (der.dot(r) / e_r_s0) * -1.0 + dphi_dp.append(dphi) + + # calculate the derivative of pv for this parameter + dpv_dp.append(D * (e_X_r * dphi + der)) + + return dpv_dp, dphi_dp + + def _xl_derivatives(self, isel, derivatives, b_matrix, parameterisation=None): + """helper function to extend the derivatives lists by derivatives of + generic parameterisations.""" + + # Get required data + h = self._h.select(isel) + if b_matrix: + B = self._B.select(isel) + else: + U = self._U.select(isel) + D = self._D.select(isel) + wavelength = self._wavelength.select(isel) + fixed_rotation = self._fixed_rotation.select(isel) + setting_rotation = self._setting_rotation.select(isel) + r = self._r.select(isel) + + if derivatives is None: + # get derivatives of the B/U matrix wrt the parameters + derivatives = [ + None if der is None else flex.mat3_double(len(isel), der.elems) + for der in parameterisation.get_ds_dp(use_none_as_null=True) + ] + + dpv_dp = [] + dwavelength_dp = [] + + # loop through the parameters + for idx, der in enumerate(derivatives): + if der is None: + dpv_dp.append(None) + dwavelength_dp.append(None) + continue + + # calculate the derivative of r for this parameter + if b_matrix: + dr = setting_rotation * fixed_rotation * der * B * h + else: + dr = setting_rotation * fixed_rotation * U * der * h + + unit_s0 = flex.vec3_double(len(dr), self._experiments[0].beam.get_unit_s0()) + r_dot_r = r.dot(r) + dwavelength = -2 * unit_s0.dot( + ((dr * r_dot_r) - ((2 * r) * dr.dot(r))) / (r_dot_r**2) + ) + dwavelength_dp.append(dwavelength) + + # calculate the derivative of pv for this parameter + dpv_dp.append(D * (dr - (unit_s0 / wavelength**2) * dwavelength)) + + return dpv_dp, dwavelength_dp + + def _xl_orientation_derivatives( + self, isel, parameterisation=None, dU_dxlo_p=None, reflections=None + ): + """helper function to extend the derivatives lists by derivatives of the + crystal orientation parameterisations""" + return self._xl_derivatives( + isel, dU_dxlo_p, b_matrix=True, parameterisation=parameterisation + ) + + def _xl_unit_cell_derivatives( + self, isel, parameterisation=None, dB_dxluc_p=None, reflections=None + ): + """helper function to extend the derivatives lists by + derivatives of the crystal unit cell parameterisations""" + return self._xl_derivatives( + isel, dB_dxluc_p, b_matrix=False, parameterisation=parameterisation + ) + + @staticmethod + def _calc_dX_dp_and_dY_dp_from_dpv_dp(w_inv, u_w_inv, v_w_inv, dpv_dp): + """helper function to calculate positional derivatives from + dpv_dp using the quotient rule""" + + dX_dp = [] + dY_dp = [] + + for der in dpv_dp: + if der is None: + dX_dp.append(None) + dY_dp.append(None) + else: + du_dp, dv_dp, dw_dp = der.parts() + + dX_dp.append(w_inv * (du_dp - dw_dp * u_w_inv)) + dY_dp.append(w_inv * (dv_dp - dw_dp * v_w_inv)) + + return dX_dp, dY_dp diff --git a/src/dials/algorithms/refinement/prediction/managed_predictors.py b/src/dials/algorithms/refinement/prediction/managed_predictors.py index 97833e73f4..a4f5ca6e35 100644 --- a/src/dials/algorithms/refinement/prediction/managed_predictors.py +++ b/src/dials/algorithms/refinement/prediction/managed_predictors.py @@ -11,9 +11,13 @@ from math import pi +from dxtbx.model import tof_helpers from scitbx.array_family import flex -from dials.algorithms.spot_prediction import ScanStaticRayPredictor +from dials.algorithms.spot_prediction import ( + LaueReflectionPredictor, + ScanStaticRayPredictor, +) from dials.algorithms.spot_prediction import ScanStaticReflectionPredictor as sc from dials.algorithms.spot_prediction import ScanVaryingReflectionPredictor as sv from dials.algorithms.spot_prediction import StillsReflectionPredictor as st @@ -83,6 +87,7 @@ def __call__(self, reflections): refs = reflections.select(sel) self._predict_one_experiment(e, refs) + refs = self._post_predict_one_experiment(e, refs) # write predictions back to overall reflections reflections.set_selected(sel, refs) @@ -95,6 +100,9 @@ def _predict_one_experiment(self, experiment, reflections): raise NotImplementedError() + def _post_predict_one_experiment(self, experiment, reflections): + return reflections + def _post_prediction(self, reflections): """Perform tasks on the whole reflection list after prediction before returning.""" @@ -165,6 +173,52 @@ def _predict_one_experiment(self, experiment, reflections): predictor.for_reflection_table(reflections, UB) +class LaueExperimentsPredictor(ExperimentsPredictor): + def _predict_one_experiment(self, experiment, reflections): + + min_s0_idx = min( + range(len(reflections["wavelength"])), + key=reflections["wavelength"].__getitem__, + ) + + if "s0" not in reflections: + unit_s0 = experiment.beam.get_unit_s0() + wl = reflections["wavelength"][min_s0_idx] + min_s0 = (unit_s0[0] / wl, unit_s0[1] / wl, unit_s0[2] / wl) + else: + min_s0 = reflections["s0"][min_s0_idx] + + dmin = experiment.detector.get_max_resolution(min_s0) + predictor = LaueReflectionPredictor(experiment, dmin) + UB = experiment.crystal.get_A() + predictor.for_reflection_table(reflections, UB) + + +class TOFExperimentsPredictor(LaueExperimentsPredictor): + def _post_predict_one_experiment(self, experiment, reflections): + + # Add ToF to xyzcal.mm + wavelength_cal = reflections["wavelength_cal"] + distance = experiment.beam.get_sample_to_source_distance() * 10**-3 + distance = distance + (reflections["s1"].norms() * 10**-3) + tof_cal = tof_helpers.tof_from_wavelength(distance, wavelength_cal) # (s) + x, y, z = reflections["xyzcal.mm"].parts() + tof_cal = tof_cal * 1e6 # (usec) + reflections["xyzcal.mm"] = flex.vec3_double(x, y, tof_cal) + + # Add frame to xyzcal.px + expt_tof = experiment.scan.get_property("time_of_flight") # (usec) + frames = list(range(len(expt_tof))) + tof_to_frame = tof_helpers.tof_to_frame_interpolator(expt_tof, frames) + tof_cal.set_selected(tof_cal < min(expt_tof), min(expt_tof)) + tof_cal.set_selected(tof_cal > max(expt_tof), max(expt_tof)) + reflection_frames = flex.double(tof_to_frame(tof_cal)) + px, py, pz = reflections["xyzcal.px"].parts() + reflections["xyzcal.px"] = flex.vec3_double(px, py, reflection_frames) + + return reflections + + class ExperimentsPredictorFactory: @staticmethod def from_experiments(experiments, force_stills=False, spherical_relp=False): @@ -180,7 +234,21 @@ def from_experiments(experiments, force_stills=False, spherical_relp=False): if force_stills: predictor = StillsExperimentsPredictor(experiments) predictor.spherical_relp_model = spherical_relp + else: - predictor = ScansExperimentsPredictor(experiments) + + all_tof_experiments = False + for expt in experiments: + if expt.scan is not None and expt.scan.has_property("time_of_flight"): + all_tof_experiments = True + elif all_tof_experiments: + raise ValueError( + "Cannot create ExperimentsPredictor for ToF and non-ToF experiments at the same time" + ) + + if all_tof_experiments: + predictor = TOFExperimentsPredictor(experiments) + else: + predictor = ScansExperimentsPredictor(experiments) return predictor diff --git a/src/dials/algorithms/refinement/refiner.py b/src/dials/algorithms/refinement/refiner.py index a61b6204f5..5bb05ef715 100644 --- a/src/dials/algorithms/refinement/refiner.py +++ b/src/dials/algorithms/refinement/refiner.py @@ -244,6 +244,9 @@ def _filter_reflections(reflections): "flags", "shoebox", "delpsical.weights", + "wavelength", + "wavelength_cal", + "s0", ] # NB xyzobs.px.value & xyzcal.px required by SauterPoon outlier rejector # NB delpsical.weights is used by ExternalDelPsiWeightingStrategy @@ -383,6 +386,7 @@ def _build_reflection_manager_and_predictor(cls, params, reflections, experiment obs["x_resid"] = x_calc - x_obs obs["y_resid"] = y_calc - y_obs obs["phi_resid"] = phi_calc - phi_obs + refman.update_residuals() # determine whether to do basic centroid analysis to automatically # determine outlier rejection block @@ -832,6 +836,9 @@ def print_step_table(self): if units == "mm": header.append(name + "\n(mm)") rmsd_multipliers.append(1.0) + elif units == "A": + header.append(name + "\n(A)") + rmsd_multipliers.append(1.0) elif units == "rad": # convert radians to degrees for reporting header.append(name + "\n(deg)") rmsd_multipliers.append(RAD2DEG) @@ -907,6 +914,8 @@ def calc_exp_rmsd_table(self): # will convert other angles in radians to degrees (e.g. for # RMSD_DeltaPsi and RMSD_2theta) header.append(name + "\n(deg)") + elif name == "RMSD_wavelength" and units == "frame": + header.append(name + "\n(frame)") else: # skip other/unknown RMSDs pass @@ -928,7 +937,7 @@ def calc_exp_rmsd_table(self): scan = exp.scan try: images_per_rad = 1.0 / abs(scan.get_oscillation(deg=False)[1]) - except (AttributeError, ZeroDivisionError): + except (AttributeError, ZeroDivisionError, RuntimeError): images_per_rad = None raw_rmsds = self._target.rmsds_for_experiment(iexp) @@ -945,6 +954,8 @@ def calc_exp_rmsd_table(self): rmsds.append(rmsd * px_per_mm[1]) elif name == "RMSD_Phi" and units == "rad": rmsds.append(rmsd * images_per_rad) + elif name == "RMSD_wavelength" and units == "frame": + rmsds.append(rmsd) elif units == "rad": rmsds.append(rmsd * RAD2DEG) rows.append([str(iexp), str(num)] + [f"{r:.5g}" for r in rmsds]) @@ -967,14 +978,14 @@ def print_exp_rmsd_table(self): def print_panel_rmsd_table(self): """print useful output about refinement steps in the form of a simple table""" - if len(self._experiments.scans()) > 1: + if len(self._experiments.scans()) > 1 and not self._experiments.all_tof(): logger.warning( "Multiple scans present. Only the first scan will be used " "to determine the image width for reporting RMSDs" ) scan = self._experiments.scans()[0] images_per_rad = None - if scan: + if scan and scan.has_property("oscillation"): if scan.get_oscillation(deg=False)[1] != 0.0: images_per_rad = 1.0 / abs(scan.get_oscillation(deg=False)[1]) @@ -995,6 +1006,8 @@ def print_panel_rmsd_table(self): name == "RMSD_DeltaPsi" and units == "rad" ): # convert radians to degrees for reporting of stills header.append(name + "\n(deg)") + elif name == "RMSD_wavelength" and units == "frame": + header.append(name + "\n(frame)") else: # skip RMSDs that cannot be expressed in image/scan space pass @@ -1021,6 +1034,8 @@ def print_panel_rmsd_table(self): rmsds.append(rmsd * images_per_rad) elif name == "RMSD_DeltaPsi" and units == "rad": rmsds.append(rmsd * RAD2DEG) + elif name == "RMSD_wavelength" and units == "frame": + rmsds.append(rmsd) rows.append([str(ipanel), str(num)] + [f"{r:.5g}" for r in rmsds]) if len(rows) > 0: diff --git a/src/dials/algorithms/refinement/reflection_manager.py b/src/dials/algorithms/refinement/reflection_manager.py index 89f042cf55..2f3c01790d 100644 --- a/src/dials/algorithms/refinement/reflection_manager.py +++ b/src/dials/algorithms/refinement/reflection_manager.py @@ -8,6 +8,8 @@ import random import libtbx +from dxtbx.model import tof_helpers +from dxtbx.model.experiment_list import ExperimentList from libtbx.phil import parse from scitbx import matrix from scitbx.math import five_number_summary @@ -102,6 +104,10 @@ "whether the case is for stills or scans. The default gives" "unit weighting." .type = floats(size = 3, value_min = 0) + wavelength_weight = 1e4 + .help = "Weight for the wavelength term in the target function for" + "Laue refinement" + .type = float(value_min = 0) } %(outlier_phil)s @@ -228,32 +234,93 @@ def from_parameters_reflections_experiments( flex.set_random_seed(params.random_seed) logger.debug("Random seed set to %d", params.random_seed) - # check whether we deal with stills or scans - if do_stills: - refman = StillsReflectionManager - # check incompatible weighting strategy - if params.weighting_strategy.override == "statistical": - raise DialsRefineConfigError( - 'The "statistical" weighting strategy is not compatible ' - "with stills refinement" - ) + if "wavelength" in reflections: + return ReflectionManagerFactory.laue_manager( + experiments, reflections, params + ) + + elif do_stills: + return ReflectionManagerFactory.stills_manager( + experiments, reflections, params + ) + else: - refman = ReflectionManager - # check incompatible weighting strategy - if params.weighting_strategy.override in ["stills", "external_deltapsi"]: - msg = ( - 'The "{}" weighting strategy is not compatible with ' - "scan refinement" - ).format(params.weighting_strategy.override) - raise DialsRefineConfigError(msg) - - # set automatic outlier rejection options + return ReflectionManagerFactory.rotation_scan_manager( + experiments, reflections, params + ) + + @staticmethod + def stills_manager( + experiments: ExperimentList, + reflections: flex.reflection_table, + params: libtbx.phil.scope_extract, + ) -> StillsReflectionManager: + + refman = StillsReflectionManager + + ## Outlier detection + if params.outlier.algorithm in ("auto", libtbx.Auto): - if do_stills: - params.outlier.algorithm = "sauter_poon" - else: - params.outlier.algorithm = "mcd" + params.outlier.algorithm = "sauter_poon" + if params.outlier.sauter_poon.px_sz is libtbx.Auto: + # get this from the first panel of the first detector + params.outlier.sauter_poon.px_sz = experiments.detectors()[0][ + 0 + ].get_pixel_size() + + if params.outlier.algorithm in ("null", None): + outlier_detector = None + else: + colnames = ["x_resid", "y_resid"] + params.outlier.block_width = None + from dials.algorithms.refinement.outlier_detection import ( + CentroidOutlierFactory, + ) + + outlier_detector = CentroidOutlierFactory.from_parameters_and_colnames( + params, colnames + ) + + ## Weighting strategy + + # check incompatible weighting strategy + if params.weighting_strategy.override == "statistical": + raise DialsRefineConfigError( + 'The "statistical" weighting strategy is not compatible ' + "with stills refinement" + ) + if params.weighting_strategy.override == "constant": + params.weighting_strategy.override = "constant_stills" + + weighting_strategy = ReflectionManagerFactory.get_weighting_strategy_override( + params + ) + + return refman( + reflections=reflections, + experiments=experiments, + nref_per_degree=params.reflections_per_degree, + max_sample_size=params.maximum_sample_size, + min_sample_size=params.minimum_sample_size, + close_to_spindle_cutoff=params.close_to_spindle_cutoff, + scan_margin=params.scan_margin, + outlier_detector=outlier_detector, + weighting_strategy_override=weighting_strategy, + ) + + @staticmethod + def rotation_scan_manager( + experiments: ExperimentList, + reflections: flex.reflection_table, + params: libtbx.phil.scope_extract, + ) -> ReflectionManager: + + refman = ReflectionManager + + ## Outlier detection + if params.outlier.algorithm in ("auto", libtbx.Auto): + params.outlier.algorithm = "mcd" if params.outlier.algorithm == "sauter_poon": if params.outlier.sauter_poon.px_sz is libtbx.Auto: # get this from the first panel of the first detector @@ -261,15 +328,19 @@ def from_parameters_reflections_experiments( 0 ].get_pixel_size() - # do outlier rejection? + ## Weighting strategy + + # check incompatible weighting strategy + if params.weighting_strategy.override in ["stills", "external_deltapsi"]: + msg = ( + 'The "{}" weighting strategy is not compatible with ' "scan refinement" + ).format(params.weighting_strategy.override) + raise DialsRefineConfigError(msg) + if params.outlier.algorithm in ("null", None): outlier_detector = None else: - if do_stills: - colnames = ["x_resid", "y_resid"] - params.outlier.block_width = None - else: - colnames = ["x_resid", "y_resid", "phi_resid"] + colnames = ["x_resid", "y_resid", "phi_resid"] from dials.algorithms.refinement.outlier_detection import ( CentroidOutlierFactory, ) @@ -278,36 +349,84 @@ def from_parameters_reflections_experiments( params, colnames ) - # override default weighting strategy? - weighting_strategy = None - if params.weighting_strategy.override == "statistical": - from dials.algorithms.refinement.weighting_strategies import ( - StatisticalWeightingStrategy, - ) + weighting_strategy = ReflectionManagerFactory.get_weighting_strategy_override( + params + ) - weighting_strategy = StatisticalWeightingStrategy() - elif params.weighting_strategy.override == "stills": - from dials.algorithms.refinement.weighting_strategies import ( - StillsWeightingStrategy, - ) + return refman( + reflections=reflections, + experiments=experiments, + nref_per_degree=params.reflections_per_degree, + max_sample_size=params.maximum_sample_size, + min_sample_size=params.minimum_sample_size, + close_to_spindle_cutoff=params.close_to_spindle_cutoff, + scan_margin=params.scan_margin, + outlier_detector=outlier_detector, + weighting_strategy_override=weighting_strategy, + ) - weighting_strategy = StillsWeightingStrategy( - params.weighting_strategy.delpsi_constant + @staticmethod + def laue_manager( + experiments: ExperimentList, + reflections: flex.reflection_table, + params: libtbx.phil.scope_extract, + ) -> LaueReflectionManager: + + all_tof_experiments = False + for expt in experiments: + if expt.scan is not None and expt.scan.has_property("time_of_flight"): + all_tof_experiments = True + elif all_tof_experiments: + raise ValueError( + "Cannot refine ToF and non-ToF experiments at the same time" + ) + + if all_tof_experiments: + refman = TOFReflectionManager + else: + refman = LaueReflectionManager + + ## Outlier detection + if params.outlier.algorithm in ("auto", libtbx.Auto): + params.outlier.algorithm = "mcd" + if params.outlier.sauter_poon.px_sz is libtbx.Auto: + # get this from the first panel of the first detector + params.outlier.sauter_poon.px_sz = experiments.detectors()[0][ + 0 + ].get_pixel_size() + + if params.outlier.algorithm in ("null", None): + outlier_detector = None + else: + colnames = ["x_resid", "y_resid"] + params.outlier.block_width = None + from dials.algorithms.refinement.outlier_detection import ( + CentroidOutlierFactory, ) - elif params.weighting_strategy.override == "external_deltapsi": - from dials.algorithms.refinement.weighting_strategies import ( - ExternalDelPsiWeightingStrategy, + + outlier_detector = CentroidOutlierFactory.from_parameters_and_colnames( + params, colnames ) - weighting_strategy = ExternalDelPsiWeightingStrategy() + ## Weighting strategy + + if params.weighting_strategy.override == "statistical": + params.weighting_strategy.override = "statistical_laue" elif params.weighting_strategy.override == "constant": - from dials.algorithms.refinement.weighting_strategies import ( - ConstantWeightingStrategy, - ) + params.weighting_strategy.override = "constant_laue" + + if params.weighting_strategy.override is not None: + if params.weighting_strategy.override not in [ + "constant_laue", + "statistical_laue", + ]: + raise ValueError( + f"{params.weighting_strategy.override} not compatible with Laue data" + ) - weighting_strategy = ConstantWeightingStrategy( - *params.weighting_strategy.constants, stills=do_stills - ) + weighting_strategy = ReflectionManagerFactory.get_weighting_strategy_override( + params + ) return refman( reflections=reflections, @@ -319,8 +438,47 @@ def from_parameters_reflections_experiments( scan_margin=params.scan_margin, outlier_detector=outlier_detector, weighting_strategy_override=weighting_strategy, + wavelength_weight=params.weighting_strategy.wavelength_weight, ) + @staticmethod + def get_weighting_strategy_override( + params: libtbx.phil.scope_extract, + ) -> weighting_strategies.StatisticalWeightingStrategy | weighting_strategies.ConstantWeightingStrategy: + + if params.weighting_strategy.override == "statistical": + return weighting_strategies.StatisticalWeightingStrategy() + + elif params.weighting_strategy.override == "stills": + return weighting_strategies.StillsWeightingStrategy( + params.weighting_strategy.delpsi_constant + ) + + elif params.weighting_strategy.override == "external_deltapsi": + return weighting_strategies.ExternalDelPsiWeightingStrategy() + + elif params.weighting_strategy.override == "constant": + return weighting_strategies.ConstantWeightingStrategy( + *params.weighting_strategy.constants + ) + + elif params.weighting_strategy.override == "constant_stills": + return weighting_strategies.ConstantStillsWeightingStrategy( + *params.weighting_strategy.constants + ) + + elif params.weighting_strategy.override == "statistical_laue": + return weighting_strategies.LaueStatisticalWeightingStrategy( + params.weighting_strategy.wavelength_weight, + ) + + elif params.weighting_strategy.override == "constant_laue": + return weighting_strategies.LaueMixedWeightingStrategy( + params.weighting_strategy.wavelength_weight, + ) + + return None + class ReflectionManager: """A class to maintain information about observed and predicted @@ -607,7 +765,11 @@ def _create_working_set(self): nrefs = sample_size = len(isel) # set sample size according to nref_per_degree (per experiment) - if exp.scan and self._nref_per_degree: + if ( + exp.scan + and exp.scan.has_property("oscillation") + and self._nref_per_degree + ): sequence_range_rad = exp.scan.get_oscillation_range(deg=False) width = abs(sequence_range_rad[1] - sequence_range_rad[0]) * RAD2DEG if self._nref_per_degree is libtbx.Auto: @@ -731,6 +893,13 @@ def filter_obs(self, sel): self._reflections = self._reflections.select(sel) return self._reflections + def update_residuals(self): + x_obs, y_obs, phi_obs = self._reflections["xyzobs.mm.value"].parts() + x_calc, y_calc, phi_calc = self._reflections["xyzcal.mm"].parts() + self._reflections["x_resid"] = x_calc - x_obs + self._reflections["y_resid"] = y_calc - y_obs + self._reflections["phi_resid"] = phi_calc - phi_obs + class StillsReflectionManager(ReflectionManager): """Overloads for a Reflection Manager that does not exclude @@ -802,3 +971,277 @@ def print_stats_on_matches(self): ) logger.info(msg) logger.info(dials.util.tabulate(rows, header) + "\n") + + +class LaueReflectionManager(ReflectionManager): + + _weighting_strategy = weighting_strategies.LaueStatisticalWeightingStrategy() + experiment_type = "laue" + + def __init__( + self, + reflections, + experiments, + nref_per_degree=None, + max_sample_size=None, + min_sample_size=0, + close_to_spindle_cutoff=0.02, + scan_margin=0.0, + outlier_detector=None, + weighting_strategy_override=None, + wavelength_weight=1e7, + ): + + if len(reflections) == 0: + raise ValueError("Empty reflections table provided to ReflectionManager") + + # keep track of models + self._experiments = experiments + goniometers = [e.goniometer for e in self._experiments] + self._axes = [ + matrix.col(g.get_rotation_axis()) if g else None for g in goniometers + ] + + # unset the refinement flags (creates flags field if needed) + reflections.unset_flags( + flex.size_t_range(len(reflections)), + flex.reflection_table.flags.used_in_refinement, + ) + + # check that the observed beam vectors are stored: if not, compute them + n_s1_set = set_obs_s1(reflections, experiments) + if n_s1_set > 0: + logger.debug("Set scattering vectors for %d reflections", n_s1_set) + + # keep track of the original indices of the reflections + reflections["iobs"] = flex.size_t_range(len(reflections)) + + # Check for monotonically increasing value range. If not, ref_table isn't sorted, + # and proceed to sort by id and panel. This is required for the C++ extension + # modules to allow for nlogn subselection of values used in refinement. + l_id = reflections["id"] + id0 = l_id[0] + for id_x in l_id[1:]: + if id0 <= id_x: + id0 = id_x + else: + reflections.sort("id") # Ensuring the ref_table is sorted by id + reflections.subsort( + "id", "panel" + ) # Ensuring that within each sorted id block, sorting is next performed by panel + break + + # set up the reflection inclusion criteria + self._close_to_spindle_cutoff = close_to_spindle_cutoff # close to spindle + self._scan_margin = DEG2RAD * scan_margin # close to the scan edge + self._outlier_detector = outlier_detector # for outlier rejection + self._nref_per_degree = nref_per_degree # random subsets + self._max_sample_size = max_sample_size # sample size ceiling + self._min_sample_size = min_sample_size # sample size floor + + # exclude reflections that fail some inclusion criteria + refs_to_keep = self._id_refs_to_keep(reflections) + self._accepted_refs_size = len(refs_to_keep) + + # set entering flags for all reflections + reflections.calculate_entering_flags(self._experiments) + + # reset all use flags + self.reset_accepted_reflections(reflections) + + # put full list of indexed reflections aside and select only the reflections + # that were not excluded to manage + self._indexed = reflections + self._reflections = reflections.select(refs_to_keep) + + # set exclusion flag for reflections that failed the tests + refs_to_excl = flex.bool(len(self._indexed), True) + refs_to_excl.set_selected(refs_to_keep, False) + self._indexed.set_flags( + refs_to_excl, self._indexed.flags.excluded_for_refinement + ) + + # set weights for all kept reflections + if weighting_strategy_override is not None: + self._weighting_strategy = weighting_strategy_override + else: + self._weighting_strategy = ( + weighting_strategies.LaueStatisticalWeightingStrategy(wavelength_weight) + ) + self._weighting_strategy.calculate_weights(self._reflections) + + # not known until the manager is finalised + self._sample_size = None + + def _id_refs_to_keep(self, obs_data): + """Create a selection of observations that pass certain conditions. + Stills-specific version removes checks relevant only to experiments + with a rotation axis.""" + + # first exclude reflections with miller index set to 0,0,0 + sel1 = obs_data["miller_index"] != (0, 0, 0) + + # exclude reflections with overloads, as these have worse centroids + sel2 = ~obs_data.get_flags(obs_data.flags.overloaded) + + # combine selections + sel = sel1 & sel2 + inc = flex.size_t_range(len(obs_data)).select(sel) + + return inc + + def print_stats_on_matches(self): + """Print some basic statistics on the matches""" + + l = self.get_matches() + nref = len(l) + if nref == 0: + logger.warning( + "Unable to calculate summary statistics for zero observations" + ) + return + + from scitbx.math import five_number_summary + + try: + x_resid = l["x_resid"] + y_resid = l["y_resid"] + wavelength_resid = l["wavelength_resid"] + w_x, w_y, w_z = l["xyzobs.mm.weights"].parts() + except KeyError: + return + + header = ["", "Min", "Q1", "Med", "Q3", "Max"] + rows = [] + row_data = five_number_summary(x_resid) + rows.append(["Xc - Xo (mm)"] + [f"{e:.4g}" for e in row_data]) + row_data = five_number_summary(y_resid) + rows.append(["Yc - Yo (mm)"] + [f"{e:.4g}" for e in row_data]) + row_data = five_number_summary(wavelength_resid) + rows.append(["Wavelengthc - Wavelengtho (A)"] + [f"{e:.4g}" for e in row_data]) + row_data = five_number_summary(w_x) + rows.append(["X weights"] + [f"{e:.4g}" for e in row_data]) + row_data = five_number_summary(w_y) + rows.append(["Y weights"] + [f"{e:.4g}" for e in row_data]) + row_data = five_number_summary(w_z) + rows.append(["Wavelength weights"] + [f"{e:.4g}" for e in row_data]) + + msg = ( + f"\nSummary statistics for {nref} observations" + " matched to predictions:" + ) + logger.info(msg) + logger.info(dials.util.tabulate(rows, header) + "\n") + + def update_residuals(self): + x_obs, y_obs, _ = self._reflections["xyzobs.mm.value"].parts() + x_calc, y_calc, _ = self._reflections["xyzcal.mm"].parts() + wavelength_obs = self._reflections["wavelength"] + wavelength_cal = self._reflections["wavelength_cal"] + self._reflections["x_resid"] = x_calc - x_obs + self._reflections["y_resid"] = y_calc - y_obs + self._reflections["wavelength_resid"] = wavelength_cal - wavelength_obs + self._reflections["wavelength_resid2"] = ( + self._reflections["wavelength_resid"] ** 2 + ) + + +class TOFReflectionManager(LaueReflectionManager): + def __init__( + self, + reflections, + experiments, + nref_per_degree=None, + max_sample_size=None, + min_sample_size=0, + close_to_spindle_cutoff=0.02, + scan_margin=0.0, + outlier_detector=None, + weighting_strategy_override=None, + wavelength_weight=1e7, + ): + + super().__init__( + reflections=reflections, + experiments=experiments, + nref_per_degree=nref_per_degree, + max_sample_size=max_sample_size, + min_sample_size=min_sample_size, + close_to_spindle_cutoff=close_to_spindle_cutoff, + scan_margin=scan_margin, + outlier_detector=outlier_detector, + weighting_strategy_override=weighting_strategy_override, + wavelength_weight=wavelength_weight, + ) + + tof_to_frame_interpolators = [] + sample_to_source_distances = [] + tof_ranges = [] + for expt in self._experiments: + tof = expt.scan.get_property("time_of_flight") # (usec) + tof_range = (min(tof), max(tof)) + tof_ranges.append(tof_range) + frames = list(range(len(tof))) + tof_to_frame = tof_helpers.tof_to_frame_interpolator(tof, frames) + tof_to_frame_interpolators.append(tof_to_frame) + sample_to_source_distances.append( + expt.beam.get_sample_to_source_distance() * 10**-3 # (m) + ) + + self._tof_to_frame_interpolators = tof_to_frame_interpolators + self._sample_to_source_distances = sample_to_source_distances + self._tof_ranges = tof_ranges + + def update_residuals(self): + x_obs, y_obs, _ = self._reflections["xyzobs.mm.value"].parts() + x_calc, y_calc, _ = self._reflections["xyzcal.mm"].parts() + wavelength_obs = self._reflections["wavelength"] + wavelength_cal = self._reflections["wavelength_cal"] + L2 = self._reflections["s1"].norms() * 10**-3 + self._reflections["x_resid"] = x_calc - x_obs + self._reflections["y_resid"] = y_calc - y_obs + self._reflections["wavelength_resid"] = wavelength_cal - wavelength_obs + self._reflections["wavelength_resid2"] = ( + self._reflections["wavelength_resid"] ** 2 + ) + + frame_resid = flex.double(len(self._reflections)) + frame_resid2 = flex.double(len(self._reflections)) + for idx, expt in enumerate(self._experiments): + if "imageset_id" in self._reflections: + r_expt = self._reflections["imageset_id"] == idx + else: + r_expt = self._reflections["id"] == idx + + L_expt = self._sample_to_source_distances[idx] + L2.select(r_expt) + + tof_obs_expt = ( + tof_helpers.tof_from_wavelength(L_expt, wavelength_obs.select(r_expt)) + * 10**6 + ) # (usec) + tof_obs_expt.set_selected( + tof_obs_expt < self._tof_ranges[idx][0], self._tof_ranges[idx][0] + ) + tof_obs_expt.set_selected( + tof_obs_expt > self._tof_ranges[idx][1], self._tof_ranges[idx][1] + ) + + tof_cal_expt = ( + tof_helpers.tof_from_wavelength(L_expt, wavelength_cal.select(r_expt)) + * 10**6 + ) # (usec) + tof_cal_expt.set_selected( + tof_cal_expt < self._tof_ranges[idx][0], self._tof_ranges[idx][0] + ) + tof_cal_expt.set_selected( + tof_cal_expt > self._tof_ranges[idx][1], self._tof_ranges[idx][1] + ) + + tof_to_frame = self._tof_to_frame_interpolators[idx] + frame_resid_expt = flex.double( + tof_to_frame(tof_cal_expt) - tof_to_frame(tof_obs_expt) + ) + frame_resid.set_selected(r_expt, frame_resid_expt) + frame_resid2.set_selected(r_expt, frame_resid_expt**2) + + self._reflections["frame_resid"] = frame_resid + self._reflections["frame_resid2"] = frame_resid2 diff --git a/src/dials/algorithms/refinement/target.py b/src/dials/algorithms/refinement/target.py index 5d470eb46b..3ecb1d2758 100644 --- a/src/dials/algorithms/refinement/target.py +++ b/src/dials/algorithms/refinement/target.py @@ -5,7 +5,7 @@ from __future__ import annotations import math -from typing import Any, Tuple, Union +from typing import Any, Optional, Tuple, Union from libtbx.phil import parse from scitbx import sparse @@ -77,9 +77,23 @@ def from_parameters_and_experiments( + " not recognised" ) + all_tof_experiments = False + for expt in experiments: + if expt.scan is not None and expt.scan.has_property("time_of_flight"): + all_tof_experiments = True + elif all_tof_experiments: + raise ValueError( + "Cannot refine ToF and non-ToF experiments at the same time" + ) + + if all_tof_experiments: + from dials.algorithms.refinement.target import ( + TOFLeastSquaresResidualWithRmsdCutoff as targ, + ) + # Determine whether the target is in X, Y, Phi space or just X, Y to choose # the right Target to instantiate - if do_stills: + elif do_stills: if do_sparse: from dials.algorithms.refinement.target_stills import ( LeastSquaresStillsResidualWithRmsdCutoffSparse as targ, @@ -233,7 +247,11 @@ def predict_for_reflection_table(self, reflections, skip_derivatives=False): sel = reflections["id"] == iexp # keep all reflections if there is no rotation axis - if exp.goniometer is None: + if ( + exp.goniometer is None + or exp.scan is None + or not exp.scan.has_property("oscillation") + ): to_keep.set_selected(sel, True) continue @@ -698,3 +716,167 @@ class LeastSquaresPositionalResidualWithRmsdCutoffSparse( large number of Experiments""" pass + + +class LaueLeastSquaresResidualWithRmsdCutoff(Target): + + """A Laue implementation of the target class providing a least squares + residual in terms of detector impact position X, Y, and observed + wavelength""" + + _grad_names = ["dX_dp", "dY_dp", "dwavelength_dp"] + rmsd_names = ["RMSD_X", "RMSD_Y", "RMSD_wavelength"] + rmsd_units = ["mm", "mm", "A"] + + def __init__( + self, + experiments, + predictor, + reflection_manager, + prediction_parameterisation, + restraints_parameterisation, + frac_binsize_cutoff: float = 0.33333, + absolute_cutoffs: Optional[list] = None, + gradient_calculation_blocksize=None, + ): + + Target.__init__( + self, + experiments, + predictor, + reflection_manager, + prediction_parameterisation, + restraints_parameterisation, + gradient_calculation_blocksize, + ) + + """ + Set up the RMSD achieved criterion. + For simplicity, we take models from the first Experiment only. + If this is not appropriate for refinement over all experiments + then absolute cutoffs should be used instead. + """ + + detector = experiments[0].detector + + if not absolute_cutoffs: + # Pixel cutoffs + pixel_sizes = [p.get_pixel_size() for p in detector] + min_px_size_x = min(e[0] for e in pixel_sizes) + min_px_size_y = min(e[1] for e in pixel_sizes) + self._binsize_cutoffs = [ + min_px_size_x * frac_binsize_cutoff, + min_px_size_y * frac_binsize_cutoff, + ] + # Wavelength cutoff + self._binsize_cutoffs.append(0) + else: + assert len(absolute_cutoffs) == 3 + self._binsize_cutoffs = absolute_cutoffs + + @staticmethod + def _extract_residuals_and_weights(matches): + + # return residuals and weights as 1d flex.double vectors + residuals = flex.double.concatenate(matches["x_resid"], matches["y_resid"]) + + residuals = flex.double.concatenate(residuals, matches["wavelength_resid"]) + + weights, w_y, w_z = matches["xyzobs.mm.weights"].parts() + weights.extend(w_y) + weights.extend(w_z) + + return residuals, weights + + @staticmethod + def _extract_squared_residuals(matches): + + return flex.double.concatenate( + matches["x_resid2"], + flex.double.concatenate(matches["y_resid2"], matches["wavelength_resid2"]), + ) + + def _rmsds_core(self, reflections): + + """calculate unweighted RMSDs for the specified reflections""" + + resid_x = flex.sum(reflections["x_resid2"]) + resid_y = flex.sum(reflections["y_resid2"]) + resid_wavelength = flex.sum(reflections["wavelength_resid2"]) + n = len(reflections) + + rmsds = ( + math.sqrt(resid_x / n), + math.sqrt(resid_y / n), + math.sqrt(abs(resid_wavelength) / n), + ) + return rmsds + + def _predict_core(self, reflections, skip_derivatives=False): + """perform prediction for the specified reflections""" + + # If the prediction parameterisation has a compose method (true for the scan + # varying case) then call it. Prefer hasattr to try-except duck typing to + # avoid masking AttributeErrors that could be raised within the method. + if hasattr(self._prediction_parameterisation, "compose"): + self._prediction_parameterisation.compose(reflections, skip_derivatives) + + # do prediction (updates reflection table in situ). Scan-varying prediction + # is done automatically if the crystal has scan-points (assuming reflections + # have ub_matrix set) + self._reflection_predictor(reflections) + + x_obs, y_obs, _ = reflections["xyzobs.mm.value"].parts() + x_calc, y_calc, _ = reflections["xyzcal.mm"].parts() + + # calculate residuals and assign columns + reflections["x_resid"] = x_calc - x_obs + reflections["x_resid2"] = reflections["x_resid"] ** 2 + reflections["y_resid"] = y_calc - y_obs + reflections["y_resid2"] = reflections["y_resid"] ** 2 + wavelength_obs = reflections["wavelength"] + wavelength_cal = reflections["wavelength_cal"] + reflections["wavelength_resid"] = wavelength_cal - wavelength_obs + reflections["wavelength_resid2"] = reflections["wavelength_resid"] ** 2 + + return reflections + + def achieved(self): + """RMSD criterion for target achieved""" + r = self._rmsds if self._rmsds else self.rmsds() + + # reset cached rmsds to avoid getting out of step + self._rmsds = None + + if ( + r[0] < self._binsize_cutoffs[0] + and r[1] < self._binsize_cutoffs[1] + and r[2] < self._binsize_cutoffs[2] + ): + return True + return False + + +class TOFLeastSquaresResidualWithRmsdCutoff(LaueLeastSquaresResidualWithRmsdCutoff): + + _grad_names = ["dX_dp", "dY_dp", "dwavelength_dp"] + rmsd_names = ["RMSD_X", "RMSD_Y", "RMSD_wavelength", "RMSD_wavelength"] + rmsd_units = ["mm", "mm", "A", "frame"] + + def _rmsds_core(self, reflections): + + """calculate unweighted RMSDs for the specified reflections""" + + resid_x = flex.sum(reflections["x_resid2"]) + resid_y = flex.sum(reflections["y_resid2"]) + resid_wavelength = flex.sum(reflections["wavelength_resid2"]) + resid_frame = flex.sum(reflections["frame_resid2"]) + n = len(reflections) + + rmsds = ( + math.sqrt(resid_x / n), + math.sqrt(resid_y / n), + math.sqrt(abs(resid_wavelength) / n), + math.sqrt(abs(resid_frame) / n), + ) + return rmsds diff --git a/src/dials/algorithms/refinement/weighting_strategies.py b/src/dials/algorithms/refinement/weighting_strategies.py index e50db4a4f8..595a9de71b 100644 --- a/src/dials/algorithms/refinement/weighting_strategies.py +++ b/src/dials/algorithms/refinement/weighting_strategies.py @@ -96,3 +96,49 @@ def calculate_weights(self, reflections): reflections["xyzobs.mm.weights"] = flex.vec3_double(wx, wy, wz) return reflections + + +class LaueStatisticalWeightingStrategy(StatisticalWeightingStrategy): + + """ + Variance in z estimated from sqrt(x^2+y^2) + """ + + def __init__( + self, + wavelength_weight: float = 1e7, + ): + self._wavelength_weight = wavelength_weight + + def calculate_weights(self, reflections): + + reflections = super().calculate_weights(reflections) + + wx, wy, _ = reflections["xyzobs.mm.weights"].parts() + wz = flex.sqrt(wx * wx + wy * wy) * self._wavelength_weight + reflections["xyzobs.mm.weights"] = flex.vec3_double(wx, wy, wz) + + return reflections + + +class LaueMixedWeightingStrategy(StatisticalWeightingStrategy): + + """ + Use statistical weighting for x and y, and constant weighting for z + """ + + def __init__( + self, + wavelength_weight: float = 1e7, + ): + self._wavelength_weight = wavelength_weight + + def calculate_weights(self, reflections): + + reflections = super().calculate_weights(reflections) + + wx, wy, wz = reflections["xyzobs.mm.weights"].parts() + wz = wz * 0 + self._wavelength_weight + reflections["xyzobs.mm.weights"] = flex.vec3_double(wx, wy, wz) + + return reflections diff --git a/src/dials/algorithms/spot_prediction/__init__.py b/src/dials/algorithms/spot_prediction/__init__.py index b3e0b35b4c..8b4cb9cd92 100644 --- a/src/dials/algorithms/spot_prediction/__init__.py +++ b/src/dials/algorithms/spot_prediction/__init__.py @@ -3,6 +3,7 @@ import dials_algorithms_spot_prediction_ext from dials_algorithms_spot_prediction_ext import ( IndexGenerator, + LaueRayPredictor, NaveStillsReflectionPredictor, PixelLabeller, PixelToMillerIndex, @@ -32,6 +33,8 @@ "StillsDeltaPsiReflectionPredictor", "StillsRayPredictor", "StillsReflectionPredictor", + "LaueRayPredictor", + "LaueReflectionPredictor", ] @@ -155,3 +158,16 @@ def StillsReflectionPredictor(experiment, dmin=None, spherical_relp=False, **kwa experiment.crystal.get_space_group().type(), dmin, ) + + +def LaueReflectionPredictor(experiment, dmin: float): + + return dials_algorithms_spot_prediction_ext.LaueReflectionPredictor( + experiment.beam, + experiment.detector, + experiment.goniometer, + experiment.crystal.get_A(), + experiment.crystal.get_unit_cell(), + experiment.crystal.get_space_group().type(), + dmin, + ) diff --git a/src/dials/algorithms/spot_prediction/boost_python/ray_predictor.cc b/src/dials/algorithms/spot_prediction/boost_python/ray_predictor.cc index fd3494cc2a..e22b437853 100644 --- a/src/dials/algorithms/spot_prediction/boost_python/ray_predictor.cc +++ b/src/dials/algorithms/spot_prediction/boost_python/ray_predictor.cc @@ -43,6 +43,28 @@ namespace dials { namespace algorithms { namespace boost_python { return result; } + static af::reflection_table laue_with_miller_index_array( + LaueRayPredictor &self, + const af::const_ref > &h, + const mat3 &UB) { + af::reflection_table result; + af::shared > hkl = result["miller_index"]; + af::shared > s1 = result["s1"]; + af::shared entering = result["entering"]; + af::shared wavelength = result["wavelength"]; + af::shared > s0 = result["s0"]; + for (std::size_t i = 0; i < h.size(); ++i) { + Ray ray = self(h[i], UB); + hkl.push_back(h[i]); + s1.push_back(ray.s1); + entering.push_back(ray.entering); + wavelength.push_back(self.get_wavelength()); + s0.push_back(self.get_s0()); + } + DIALS_ASSERT(result.is_consistent()); + return result; + } + void export_ray_predictor() { // Create and return the wrapper for the spot predictor object class_("ScanStaticRayPredictor", no_init) @@ -59,4 +81,12 @@ namespace dials { namespace algorithms { namespace boost_python { .def("__call__", &call_with_miller_index_array); } + void export_laue_ray_predictor() { + class_("LaueRayPredictor", no_init) + .def(init, mat3, mat3 >( + (arg("unit_s0"), arg("fixed_rotation"), arg("setting_rotation")))) + .def("__call__", &LaueRayPredictor::operator(), (arg("miller_index"), arg("UB"))) + .def("__call__", &laue_with_miller_index_array); + } + }}} // namespace dials::algorithms::boost_python diff --git a/src/dials/algorithms/spot_prediction/boost_python/reflection_predictor.cc b/src/dials/algorithms/spot_prediction/boost_python/reflection_predictor.cc index 3d7c481f4c..f16db816a5 100644 --- a/src/dials/algorithms/spot_prediction/boost_python/reflection_predictor.cc +++ b/src/dials/algorithms/spot_prediction/boost_python/reflection_predictor.cc @@ -156,12 +156,47 @@ namespace dials { namespace algorithms { namespace boost_python { .def("for_reflection_table", &Predictor::for_reflection_table_with_individual_ub); } + void export_laue_reflection_predictor() { + typedef LaueReflectionPredictor Predictor; + + af::reflection_table (Predictor::*predict_all)() const = &Predictor::operator(); + + af::reflection_table (Predictor::*predict_observed)( + const af::const_ref >&) = &Predictor::operator(); + + af::reflection_table (Predictor::*predict_observed_with_panel)( + const af::const_ref >&, std::size_t) = + &Predictor::operator(); + + af::reflection_table (Predictor::*predict_observed_with_panel_list)( + const af::const_ref >&, + const af::const_ref&) = &Predictor::operator(); + + class_("LaueReflectionPredictor", no_init) + .def(init, + const cctbx::uctbx::unit_cell&, + const cctbx::sgtbx::space_group_type&, + const double&>()) + .def("__call__", predict_all) + .def("for_ub", &Predictor::for_ub) + .def("__call__", predict_observed) + .def("__call__", predict_observed_with_panel) + .def("__call__", predict_observed_with_panel_list) + .def("for_reflection_table", &Predictor::for_reflection_table) + .def("all_reflections_for_asu", &Predictor::all_reflections_for_asu) + .def("for_reflection_table", &Predictor::for_reflection_table_with_individual_ub); + } + void export_reflection_predictor() { export_scan_static_reflection_predictor(); export_scan_varying_reflection_predictor(); export_stills_delta_psi_reflection_predictor(); export_nave_stills_reflection_predictor(); export_spherical_relp_stills_reflection_predictor(); + export_laue_reflection_predictor(); } }}} // namespace dials::algorithms::boost_python diff --git a/src/dials/algorithms/spot_prediction/boost_python/spot_prediction_ext.cc b/src/dials/algorithms/spot_prediction/boost_python/spot_prediction_ext.cc index b398ccb885..1d251f347d 100644 --- a/src/dials/algorithms/spot_prediction/boost_python/spot_prediction_ext.cc +++ b/src/dials/algorithms/spot_prediction/boost_python/spot_prediction_ext.cc @@ -24,6 +24,7 @@ namespace dials { namespace algorithms { namespace boost_python { void export_rotation_angles(); void export_ray_predictor(); void export_scan_varying_ray_predictor(); + void export_laue_ray_predictor(); void export_stills_ray_predictor(); void export_ray_intersection(); void export_reflection_predictor(); @@ -40,6 +41,7 @@ namespace dials { namespace algorithms { namespace boost_python { export_ray_predictor(); export_scan_varying_ray_predictor(); export_stills_ray_predictor(); + export_laue_ray_predictor(); export_ray_intersection(); export_reflection_predictor(); export_pixel_labeller(); diff --git a/src/dials/algorithms/spot_prediction/ray_predictor.h b/src/dials/algorithms/spot_prediction/ray_predictor.h index 30d2e307f3..9a225f3251 100644 --- a/src/dials/algorithms/spot_prediction/ray_predictor.h +++ b/src/dials/algorithms/spot_prediction/ray_predictor.h @@ -88,7 +88,7 @@ namespace dials { namespace algorithms { vec2 phi; try { phi = calculate_rotation_angles_(pstar0); - } catch (error const&) { + } catch (error const &) { return rays; } @@ -124,7 +124,7 @@ namespace dials { namespace algorithms { vec2 phi; try { phi = calculate_rotation_angles_(pstar0); - } catch (error const&) { + } catch (error const &) { return rays; } @@ -250,6 +250,63 @@ namespace dials { namespace algorithms { vec3 s0_m2_plane; }; + /** + * Class to predict s1 rays for Laue data + */ + class LaueRayPredictor { + public: + typedef cctbx::miller::index<> miller_index; + + LaueRayPredictor(const vec3 unit_s0, + mat3 fixed_rotation, + mat3 setting_rotation) + : unit_s0_(unit_s0), + fixed_rotation_(fixed_rotation), + setting_rotation_(setting_rotation) + + { + DIALS_ASSERT(unit_s0_.length() > 0.0); + } + + /** + * For a given miller index and UB matrix, calculates the predicted s1 ray. + * The LaueRayPredictor wavelength and s0 variables are updated during the + * calculation, so that they can be monitored for convergence. + * @param h The miller index + * @param ub The UB matrix + * @returns Ray + */ + Ray operator()(const miller_index &h, const mat3 &ub) { + // Calculate the reciprocal lattice vector + vec3 q = setting_rotation_ * fixed_rotation_ * ub * h; + + // Calculate the wavelength required to meet the diffraction condition + // (starting from q.q + 2q.s0 = 0) + wavelength_ = -2 * ((unit_s0_ * q) / (q * q)); + s0_ = unit_s0_ / wavelength_; + DIALS_ASSERT(s0_.length() > 0); + + // Calculate the Ray (default zero angle and 'entering' as false) + vec3 s1 = s0_ + q; + return Ray(s1, 0.0, false); + } + + double get_wavelength() const { + return wavelength_; + } + + vec3 get_s0() const { + return s0_; + } + + private: + const vec3 unit_s0_; + double wavelength_; + vec3 s0_; + mat3 fixed_rotation_; + mat3 setting_rotation_; + }; + }} // namespace dials::algorithms #endif // DIALS_ALGORITHMS_SPOT_PREDICTION_RAY_PREDICTOR_H diff --git a/src/dials/algorithms/spot_prediction/reflection_predictor.h b/src/dials/algorithms/spot_prediction/reflection_predictor.h index 88026ea313..83d9e615b5 100644 --- a/src/dials/algorithms/spot_prediction/reflection_predictor.h +++ b/src/dials/algorithms/spot_prediction/reflection_predictor.h @@ -28,6 +28,7 @@ #include #include #include +#include namespace dials { namespace algorithms { @@ -38,6 +39,7 @@ namespace dials { namespace algorithms { using dxtbx::model::is_angle_in_range; using dxtbx::model::Panel; using dxtbx::model::plane_ray_intersection; + using dxtbx::model::PolychromaticBeam; using dxtbx::model::Scan; using scitbx::constants::pi; using scitbx::constants::pi_180; @@ -75,6 +77,16 @@ namespace dials { namespace algorithms { } }; + struct laue_prediction_data : prediction_data { + af::shared wavelength_cal; + af::shared > s0_cal; + + laue_prediction_data(af::reflection_table &table) : prediction_data(table) { + wavelength_cal = table.get("wavelength_cal"); + s0_cal = table.get >("s0_cal"); + } + }; + /** * A reflection predictor for scan static prediction. */ @@ -1309,6 +1321,322 @@ namespace dials { namespace algorithms { SphericalRelpStillsRayPredictor spherical_relp_predict_ray_; }; + /** + * A class to do Laue reflection prediction. + * Uses LaueRayPredictor to make predictions, and adds additional + * wavelenegth_cal and s0_cal columns to the predicted reflection table. + */ + class LaueReflectionPredictor { + public: + typedef cctbx::miller::index<> miller_index; + + /** + * Initialise the predictor + */ + LaueReflectionPredictor(const PolychromaticBeam &beam, + const Detector &detector, + const Goniometer &goniometer, + mat3 ub, + const cctbx::uctbx::unit_cell &unit_cell, + const cctbx::sgtbx::space_group_type &space_group_type, + const double &dmin) + : beam_(beam), + detector_(detector), + goniometer_(goniometer), + ub_(ub), + unit_cell_(unit_cell), + space_group_type_(space_group_type), + dmin_(dmin), + predict_ray_(beam.get_unit_s0(), + goniometer.get_fixed_rotation(), + goniometer.get_setting_rotation()) {} + + /** + * Predict all reflection. + * @returns reflection table. + */ + af::reflection_table operator()() const { + throw DIALS_ERROR("Not implemented"); + return af::reflection_table(); + } + + af::reflection_table all_reflections_for_asu(double phi) { + mat3 fixed_rotation = goniometer_.get_fixed_rotation(); + mat3 setting_rotation = goniometer_.get_setting_rotation(); + vec3 rotation_axis = goniometer_.get_rotation_axis(); + mat3 rotation = + scitbx::math::r3_rotation::axis_and_angle_as_matrix(rotation_axis, phi); + vec3 unit_s0 = beam_.get_unit_s0(); + vec2 wavelength_range = beam_.get_wavelength_range(); + + cctbx::miller::index_generator indices = + cctbx::miller::index_generator(unit_cell_, space_group_type_, false, dmin_); + + af::shared indices_arr = indices.to_array(); + + af::reflection_table table; + af::shared wavelength_column; + table["wavelength_cal"] = wavelength_column; + af::shared > s0_column; + table["s0_cal"] = s0_column; + laue_prediction_data predictions(table); + + for (std::size_t i = 0; i < indices_arr.size(); ++i) { + miller_index h = indices_arr[i]; + + vec3 q = setting_rotation * rotation * fixed_rotation * ub_ * h; + + // Calculate the wavelength required to meet the diffraction condition + double wavelength = -2 * ((unit_s0 * q) / (q * q)); + if (wavelength < wavelength_range[0] || wavelength > wavelength_range[1]) { + continue; + } + vec3 s0 = unit_s0 / wavelength; + DIALS_ASSERT(s0.length() > 0); + + // Calculate the Ray (default zero angle and 'entering' as false) + vec3 s1 = s0 + q; + + int panel = detector_.get_panel_intersection(s1); + if (panel == -1) { + continue; + } + + Detector::coord_type coord; + coord.first = panel; + coord.second = detector_[panel].get_ray_intersection(s1); + vec2 mm = coord.second; + vec2 px = detector_[panel].millimeter_to_pixel(mm); + + // Add the reflections to the table + predictions.hkl.push_back(h); + predictions.enter.push_back(false); + predictions.s1.push_back(s1); + predictions.xyz_mm.push_back(vec3(mm[0], mm[1], 0.0)); + predictions.xyz_px.push_back(vec3(px[0], px[1], 0.0)); + predictions.panel.push_back(panel); + predictions.flags.push_back(af::Predicted); + predictions.wavelength_cal.push_back(wavelength); + predictions.s0_cal.push_back(s0); + } + + // Return the reflection table + return table; + } + + /** + * Predict reflections for UB. Also filters based on ewald sphere proximity. + * @param ub The UB matrix + * @returns A reflection table. + */ + af::reflection_table for_ub(const mat3 &ub) { + // Create the reflection table and the local container + af::reflection_table table; + laue_prediction_data predictions(table); + + // Create the index generate and loop through the indices. For each index, + // predict the rays and append to the reflection table + IndexGenerator indices(unit_cell_, space_group_type_, dmin_); + for (;;) { + miller_index h = indices.next(); + if (h.is_zero()) { + break; + } + + Ray ray; + ray = predict_ray_(h, ub); + append_for_index(predictions, ub, h); + } + + // Return the reflection table + return table; + } + + /** + * Predict the reflections with given Miller indices. + * @param h The miller index + * @returns The reflection table + */ + af::reflection_table operator()(const af::const_ref &h) { + af::reflection_table table; + laue_prediction_data predictions(table); + for (std::size_t i = 0; i < h.size(); ++i) { + append_for_index(predictions, ub_, h[i]); + } + return table; + } + + /** + * Predict for given Miller indices on a single panel. + * @param h The array of Miller indices + * @param panel The panel index + * @returns The reflection table + */ + af::reflection_table operator()(const af::const_ref &h, + std::size_t panel) { + af::shared panels(h.size(), panel); + return (*this)(h, panels.const_ref()); + } + + /** + * Predict for given Miller indices for specific panels. + * @param h The array of Miller indices + * @param panel The array of panel indices + * @returns The reflection table + */ + af::reflection_table operator()(const af::const_ref &h, + const af::const_ref &panel) { + DIALS_ASSERT(h.size() == panel.size()); + af::reflection_table table; + laue_prediction_data predictions(table); + for (std::size_t i = 0; i < h.size(); ++i) { + append_for_index(predictions, ub_, h[i], (int)panel[i]); + } + return table; + } + + /** + * Predict reflections for specific Miller indices, panels and individual + * UB matrices + * @param h The array of miller indices + * @param panel The array of panels + * @param ub The array of setting matrices + * @returns A reflection table. + */ + af::reflection_table for_hkl_with_individual_ub( + const af::const_ref &h, + const af::const_ref &panel, + const af::const_ref > &ub) { + DIALS_ASSERT(ub.size() == h.size()); + DIALS_ASSERT(ub.size() == panel.size()); + af::reflection_table table; + af::shared wavelength_column; + table["wavelength_cal"] = wavelength_column; + af::shared > s0_column; + table["s0_cal"] = s0_column; + laue_prediction_data predictions(table); + for (std::size_t i = 0; i < h.size(); ++i) { + append_for_index(predictions, ub[i], h[i], panel[i]); + } + DIALS_ASSERT(table.nrows() == h.size()); + return table; + } + + /** + * Predict reflections and add to the entries in the table for a single UB + * matrix + * @param table The reflection table + * @param ub The ub matrix + */ + void for_reflection_table(af::reflection_table table, const mat3 &ub) { + af::shared > uba(table.nrows(), ub); + for_reflection_table_with_individual_ub(table, uba.const_ref()); + } + + /** + * Predict reflections and add to the entries in the table for an array of + * UB matrices + * @param table The reflection table + */ + void for_reflection_table_with_individual_ub( + af::reflection_table table, + const af::const_ref > &ub) { + DIALS_ASSERT(ub.size() == table.nrows()); + af::reflection_table new_table = + for_hkl_with_individual_ub(table["miller_index"], table["panel"], ub); + DIALS_ASSERT(new_table.nrows() == table.nrows()); + table["miller_index"] = new_table["miller_index"]; + table["panel"] = new_table["panel"]; + table["s1"] = new_table["s1"]; + table["xyzcal.px"] = new_table["xyzcal.px"]; + table["xyzcal.mm"] = new_table["xyzcal.mm"]; + table["wavelength_cal"] = new_table["wavelength_cal"]; + table["s0_cal"] = new_table["s0_cal"]; + + af::shared flags = table["flags"]; + af::shared new_flags = new_table["flags"]; + for (std::size_t i = 0; i < flags.size(); ++i) { + flags[i] &= ~af::Predicted; + flags[i] |= new_flags[i]; + } + DIALS_ASSERT(table.is_consistent()); + } + + protected: + /** + * Predict for the given Miller index, UB matrix and panel number + * @param p The reflection data + * @param ub The UB matrix + * @param h The miller index + * @param panel The panel index + */ + virtual void append_for_index(laue_prediction_data &p, + const mat3 ub, + const miller_index &h, + int panel = -1) { + Ray ray; + ray = predict_ray_(h, ub); + double wavelength = predict_ray_.get_wavelength(); + vec3 s0 = predict_ray_.get_s0(); + append_for_ray(p, h, ray, panel, wavelength, s0); + } + + void append_for_ray(laue_prediction_data &p, + const miller_index &h, + const Ray &ray, + int panel, + double wavelength, + vec3 s0) const { + try { + // Get the impact on the detector + Detector::coord_type impact = get_ray_intersection(ray.s1, panel); + std::size_t panel = impact.first; + vec2 mm = impact.second; + vec2 px = detector_[panel].millimeter_to_pixel(mm); + + // Add the reflections to the table + p.hkl.push_back(h); + p.enter.push_back(ray.entering); + p.s1.push_back(ray.s1); + p.xyz_mm.push_back(vec3(mm[0], mm[1], 0.0)); + p.xyz_px.push_back(vec3(px[0], px[1], 0.0)); + p.panel.push_back(panel); + p.flags.push_back(af::Predicted); + p.wavelength_cal.push_back(wavelength); + p.s0_cal.push_back(s0); + + } catch (dxtbx::error const &) { + // do nothing + } + } + + private: + /** + * Helper function to do ray intersection with/without panel set. + */ + Detector::coord_type get_ray_intersection(vec3 s1, int panel) const { + Detector::coord_type coord; + if (panel < 0) { + coord = detector_.get_ray_intersection(s1); + } else { + coord.first = panel; + coord.second = detector_[panel].get_ray_intersection(s1); + } + return coord; + } + + protected: + PolychromaticBeam beam_; + Detector detector_; + Goniometer goniometer_; + Scan scan_; + mat3 ub_; + cctbx::uctbx::unit_cell unit_cell_; + cctbx::sgtbx::space_group_type space_group_type_; + const double dmin_; + LaueRayPredictor predict_ray_; + }; + }} // namespace dials::algorithms #endif // DIALS_ALGORITHMS_SPOT_PREDICTION_REFLECTION_PREDICTOR_H diff --git a/src/dials/array_family/flex_ext.py b/src/dials/array_family/flex_ext.py index 164474a346..72a02daecc 100644 --- a/src/dials/array_family/flex_ext.py +++ b/src/dials/array_family/flex_ext.py @@ -1321,17 +1321,29 @@ def map_centroids_to_reciprocal_space( sel_expt = self["id"] == i for i_panel in range(len(expt.detector)): + sel = sel_expt & (panel_numbers == i_panel) if calculated: - x, y, rot_angle = self["xyzcal.mm"].select(sel).parts() + x, y, z = self["xyzcal.mm"].select(sel).parts() else: - x, y, rot_angle = self["xyzobs.mm.value"].select(sel).parts() + x, y, z = self["xyzobs.mm.value"].select(sel).parts() s1 = expt.detector[i_panel].get_lab_coord( cctbx.array_family.flex.vec2_double(x, y) ) - s1 = s1 / s1.norms() * (1 / expt.beam.get_wavelength()) + + if calculated and "wavelength_cal" in self and "s0_cal" in self: + wavelength = self["wavelength_cal"].select(sel) + s0 = self["s0_cal"].select(sel) + elif "wavelength" in self and "s0" in self: + wavelength = self["wavelength"].select(sel) + s0 = self["s0"].select(sel) + else: + wavelength = expt.beam.get_wavelength() + s0 = expt.beam.get_s0() + + s1 = s1 / s1.norms() * (1 / wavelength) self["s1"].set_selected(sel, s1) - S = s1 - expt.beam.get_s0() + S = s1 - s0 if expt.goniometer is not None: setting_rotation = matrix.sqr( expt.goniometer.get_setting_rotation() @@ -1342,12 +1354,13 @@ def map_centroids_to_reciprocal_space( sample_rotation *= matrix.sqr(expt.crystal.get_U()) self["rlp"].set_selected(sel, tuple(setting_rotation.inverse()) * S) - self["rlp"].set_selected( - sel, - self["rlp"] - .select(sel) - .rotate_around_origin(rotation_axis, -rot_angle), - ) + if expt.scan is not None and expt.scan.has_property("oscillation"): + self["rlp"].set_selected( + sel, + self["rlp"] + .select(sel) + .rotate_around_origin(rotation_axis, -z), + ) self["rlp"].set_selected( sel, tuple(sample_rotation.inverse()) * self["rlp"].select(sel) ) @@ -1381,8 +1394,14 @@ def calculate_entering_flags(self, experiments): if not experiment.goniometer: continue axis = matrix.col(experiment.goniometer.get_rotation_axis()) - s0 = matrix.col(experiment.beam.get_s0()) - vec = s0.cross(axis) + if "s0" in self: + s0 = self["s0"] + vec = cctbx.array_family.flex.vec3_double(len(self)) + for i in range(len(s0)): + vec[i] = matrix.col(s0[i]).cross(axis) + else: + s0 = matrix.col(experiment.beam.get_s0()) + vec = s0.cross(axis) sel = self["id"] == iexp enterings.set_selected(sel, self["s1"].dot(vec) < 0.0) diff --git a/tests/algorithms/refinement/test_finite_diffs.py b/tests/algorithms/refinement/test_finite_diffs.py index 93b638e0e1..f47ff26441 100644 --- a/tests/algorithms/refinement/test_finite_diffs.py +++ b/tests/algorithms/refinement/test_finite_diffs.py @@ -1,60 +1,119 @@ +from __future__ import annotations + +# Python and cctbx imports +import random +from math import pi +from os.path import join + +from cctbx.sgtbx import space_group, space_group_symbols +from dxtbx.format.FormatISISSXD import FormatISISSXD + +# We will set up a mock scan and a mock experiment list +from dxtbx.model import CrystalFactory, ScanFactory +from dxtbx.model.experiment_list import Experiment, ExperimentList +from libtbx.phil import parse +from libtbx.test_utils import approx_equal +from scitbx import matrix +from scitbx.array_family import flex + +from dials.algorithms.refinement.parameterisation.beam_parameters import ( + BeamParameterisation, +) +from dials.algorithms.refinement.parameterisation.crystal_parameters import ( + CrystalOrientationParameterisation, + CrystalUnitCellParameterisation, +) + +# Model parameterisations +from dials.algorithms.refinement.parameterisation.detector_parameters import ( + DetectorParameterisationHierarchical, + DetectorParameterisationSinglePanel, +) + +# Parameterisation of the prediction equation +from dials.algorithms.refinement.parameterisation.prediction_parameters import ( + LauePredictionParameterisation, + XYPhiPredictionParameterisation, +) +from dials.algorithms.refinement.prediction.managed_predictors import ( + LaueExperimentsPredictor, + ScansExperimentsPredictor, + ScansRayPredictor, +) +from dials.algorithms.refinement.reflection_manager import ( + LaueReflectionManager, + ReflectionManager, +) + +# Imports for the target function +from dials.algorithms.refinement.target import ( + LaueLeastSquaresResidualWithRmsdCutoff, + LeastSquaresPositionalResidualWithRmsdCutoff, + TOFLeastSquaresResidualWithRmsdCutoff, +) + +# Reflection prediction +from dials.algorithms.spot_prediction import ( + IndexGenerator, + LaueReflectionPredictor, + ray_intersection, +) + +from . import geometry_phil + +# Experimental model builder +from .setup_geometry import Extract + """Test analytical calculation of gradients of the target function versus finite difference calculations""" +# function for calculating finite difference gradients of the target function +def get_fd_gradients(target, pred_param, deltas): + """Calculate centered finite difference gradients for each of the + parameters of the target function. -from __future__ import annotations + "deltas" must be a sequence of the same length as the parameter list, and + contains the step size for the difference calculations for each parameter. + """ + p_vals = pred_param.get_param_vals() + assert len(deltas) == len(p_vals) + fd_grad = [] + fd_curvs = [] -def test(args=[]): - # Python and cctbx imports - import random - from math import pi - - from cctbx.sgtbx import space_group, space_group_symbols - - # We will set up a mock scan and a mock experiment list - from dxtbx.model import ScanFactory - from dxtbx.model.experiment_list import Experiment, ExperimentList - from libtbx.phil import parse - from libtbx.test_utils import approx_equal - from scitbx import matrix - from scitbx.array_family import flex - - from dials.algorithms.refinement.parameterisation.beam_parameters import ( - BeamParameterisation, - ) - from dials.algorithms.refinement.parameterisation.crystal_parameters import ( - CrystalOrientationParameterisation, - CrystalUnitCellParameterisation, - ) + for i in range(len(deltas)): + val = p_vals[i] - # Model parameterisations - from dials.algorithms.refinement.parameterisation.detector_parameters import ( - DetectorParameterisationSinglePanel, - ) + p_vals[i] -= deltas[i] / 2.0 + pred_param.set_param_vals(p_vals) + target.predict() - # Parameterisation of the prediction equation - from dials.algorithms.refinement.parameterisation.prediction_parameters import ( - XYPhiPredictionParameterisation, - ) - from dials.algorithms.refinement.prediction.managed_predictors import ( - ScansExperimentsPredictor, - ScansRayPredictor, - ) - from dials.algorithms.refinement.reflection_manager import ReflectionManager + rev_state = target.compute_functional_gradients_and_curvatures() - # Imports for the target function - from dials.algorithms.refinement.target import ( - LeastSquaresPositionalResidualWithRmsdCutoff, - ) + p_vals[i] += deltas[i] + pred_param.set_param_vals(p_vals) + + target.predict() + + fwd_state = target.compute_functional_gradients_and_curvatures() + + # finite difference estimation of first derivatives + fd_grad.append((fwd_state[0] - rev_state[0]) / deltas[i]) + + # finite difference estimation of curvatures, using the analytical + # first derivatives + fd_curvs.append((fwd_state[1][i] - rev_state[1][i]) / deltas[i]) + + # set parameter back to centred value + p_vals[i] = val - # Reflection prediction - from dials.algorithms.spot_prediction import IndexGenerator, ray_intersection + # return to the initial state + pred_param.set_param_vals(p_vals) - from . import geometry_phil + return fd_grad, fd_curvs - # Experimental model builder - from .setup_geometry import Extract + +def test(args=[]): # Local functions def random_direction_close_to(vector, sd=0.5): @@ -217,57 +276,232 @@ def random_direction_close_to(vector, sd=0.5): # Do FD calculation for comparison # #################################### - # function for calculating finite difference gradients of the target function - def get_fd_gradients(target, pred_param, deltas): - """Calculate centered finite difference gradients for each of the - parameters of the target function. + # test normalised differences between FD and analytical calculations + fdgrads = get_fd_gradients(mytarget, pred_param, [1.0e-7] * len(pred_param)) + diffs = [a - b for a, b in zip(dL_dp, fdgrads[0])] + norm_diffs = tuple([a / b for a, b in zip(diffs, fdgrads[0])]) + for e in norm_diffs: + assert abs(e) < 0.001 # check differences less than 0.1% + + # test normalised differences between FD curvatures and analytical least + # squares approximation. We don't expect this to be especially close + if curvs: + diffs = [a - b for a, b in zip(curvs, fdgrads[1])] + norm_diffs = tuple([a / b for a, b in zip(diffs, fdgrads[1])]) + for e in norm_diffs: + assert abs(e) < 0.1 # check differences less than 10% + + +def test_laue_target_function(dials_data): + fmt = FormatISISSXD( + join(dials_data("isis_sxd_example_data", pathlib=True), "sxd_nacl_run.nxs") + ) + beam = fmt.get_beam() + detector = fmt.get_detector() + goniometer = fmt.get_goniometer() + scan = fmt.get_scan() + crystal = CrystalFactory.from_dict( + { + "__id__": "crystal", + "real_space_a": ( + 0.5681647125795644, + -2.9735716012061135, + -2.707784412005687, + ), + "real_space_b": ( + -2.4994848902125884, + -2.3900344014694066, + 2.091613643314567, + ), + "real_space_c": ( + -1.2771711635863638, + 3.676428861690809, + -1.226011051463438, + ), + "space_group_hall_symbol": " P 1", + "B_covariance": ( + 2.618491627225783e-13, + -2.4190170785778272e-30, + 2.7961382012436816e-30, + 1.4283218313839273e-13, + 8.110824693143866e-15, + 2.7961382012436816e-30, + -1.922218398881239e-13, + -1.1641948761717081e-14, + 2.2832201114561855e-14, + -2.419017078577827e-30, + 1.3543505986455804e-44, + -8.081590630292518e-46, + -4.202632560757537e-29, + -5.437640708903305e-29, + -8.081590630292518e-46, + 3.330706229067803e-30, + 5.621471188408899e-29, + -6.599119546892406e-30, + 2.7961382012436816e-30, + -8.08159063029252e-46, + 9.550033948814972e-46, + 5.487666450546843e-30, + 2.7096475027184553e-30, + 9.550033948814972e-46, + -3.935814660390771e-30, + -3.889472044173952e-30, + 7.798194512461942e-30, + 1.428321831383927e-13, + -4.2026325607575364e-29, + 5.487666450546843e-30, + 7.789867544667339e-13, + 1.4101250207277487e-13, + 5.487666450546843e-30, + -2.0005409484272627e-13, + -2.021584892435437e-13, + 4.481019714719027e-14, + 8.110824693143867e-15, + -5.437640708903304e-29, + 2.7096475027184553e-30, + 1.4101250207277487e-13, + 2.5553690436147e-13, + 2.7096475027184553e-30, + -1.1167612085554417e-14, + -1.8848015530742402e-13, + 2.2125950964841596e-14, + 2.7961382012436816e-30, + -8.08159063029252e-46, + 9.550033948814972e-46, + 5.487666450546843e-30, + 2.7096475027184553e-30, + 9.550033948814972e-46, + -3.935814660390771e-30, + -3.889472044173952e-30, + 7.798194512461942e-30, + -1.922218398881239e-13, + 3.330706229067804e-30, + -3.93581466039077e-30, + -2.000540948427263e-13, + -1.1167612085554417e-14, + -3.93581466039077e-30, + 2.7092227778026175e-13, + 1.6029668235488112e-14, + -3.2138365634328507e-14, + -1.1641948761717081e-14, + 5.621471188408898e-29, + -3.889472044173952e-30, + -2.021584892435437e-13, + -1.88480155307424e-13, + -3.889472044173952e-30, + 1.6029668235488112e-14, + 2.7054780216756276e-13, + -3.175994945548343e-14, + 2.2832201114561858e-14, + -6.599119546892407e-30, + 7.79819451246194e-30, + 4.4810197147190265e-14, + 2.2125950964841592e-14, + 7.79819451246194e-30, + -3.2138365634328507e-14, + -3.175994945548343e-14, + 6.36770905528953e-14, + ), + } + ) + + experiments = ExperimentList() + experiments.append( + Experiment( + beam=beam, + detector=detector, + goniometer=goniometer, + scan=scan, + crystal=crystal, + imageset=None, + ) + ) + + det_param = DetectorParameterisationHierarchical(detector) + xlo_param = CrystalOrientationParameterisation(crystal) + xluc_param = CrystalUnitCellParameterisation(crystal) - "deltas" must be a sequence of the same length as the parameter list, and - contains the step size for the difference calculations for each parameter. - """ + pred_param = LauePredictionParameterisation( + experiments, + detector_parameterisations=[det_param], + beam_parameterisations=[], + xl_orientation_parameterisations=[xlo_param], + xl_unit_cell_parameterisations=[xluc_param], + ) - p_vals = pred_param.get_param_vals() - assert len(deltas) == len(p_vals) - fd_grad = [] - fd_curvs = [] + # shift detector by 0.2 mm each translation and 2 mrad each rotation + det_p_vals = det_param.get_param_vals() + p_vals = [a + b for a, b in zip(det_p_vals, [2.0, 2.0, 2.0, 2.0, 2.0, 2.0])] + det_param.set_param_vals(p_vals) - for i in range(len(deltas)): - val = p_vals[i] + # rotate crystal a bit (=2 mrad each rotation) + xlo_p_vals = xlo_param.get_param_vals() + p_vals = [a + b for a, b in zip(xlo_p_vals, [2.0, 2.0, 2.0])] + xlo_param.set_param_vals(p_vals) - p_vals[i] -= deltas[i] / 2.0 - pred_param.set_param_vals(p_vals) - target.predict() + reflection_predictor = LaueReflectionPredictor(experiments[0], 1.0) + obs_refs = reflection_predictor.all_reflections_for_asu(0.0) - rev_state = target.compute_functional_gradients_and_curvatures() + # Set 'observed' centroids from the predicted ones + obs_refs["xyzobs.mm.value"] = obs_refs["xyzcal.mm"] + obs_refs["s0"] = obs_refs["s0_cal"] + obs_refs["wavelength"] = obs_refs["wavelength_cal"] + obs_refs["id"] = flex.int(len(obs_refs), 0) - p_vals[i] += deltas[i] - pred_param.set_param_vals(p_vals) + # Invent some variances for the centroid positions of the simulated data + px_size = detector[0].get_pixel_size() + var_x = flex.double(len(obs_refs), (px_size[0] / 2.0) ** 2) + var_y = flex.double(len(obs_refs), (px_size[1] / 2.0) ** 2) + var_z = flex.double(len(obs_refs), 0.0) + obs_refs["xyzobs.mm.variance"] = flex.vec3_double(var_x, var_y, var_z) - target.predict() + # Undo known parameter shifts + det_param.set_param_vals(det_p_vals) + xlo_param.set_param_vals(xlo_p_vals) - fwd_state = target.compute_functional_gradients_and_curvatures() + refman = LaueReflectionManager(obs_refs, experiments, outlier_detector=None) + refman.finalise() - # finite difference estimation of first derivatives - fd_grad.append((fwd_state[0] - rev_state[0]) / deltas[i]) + # Redefine the reflection predictor to use the type expected by the Target class + ref_predictor = LaueExperimentsPredictor(experiments) - # finite difference estimation of curvatures, using the analytical - # first derivatives - fd_curvs.append((fwd_state[1][i] - rev_state[1][i]) / deltas[i]) + mytarget = LaueLeastSquaresResidualWithRmsdCutoff( + experiments, ref_predictor, refman, pred_param, restraints_parameterisation=None + ) - # set parameter back to centred value - p_vals[i] = val + # get the functional and gradients + mytarget.predict() + L, dL_dp, curvs = mytarget.compute_functional_gradients_and_curvatures() - # return to the initial state - pred_param.set_param_vals(p_vals) + # test normalised differences between FD and analytical calculations + fdgrads = get_fd_gradients(mytarget, pred_param, [1.0e-7] * len(pred_param)) + diffs = [a - b for a, b in zip(dL_dp, fdgrads[0])] + norm_diffs = tuple([a / b for a, b in zip(diffs, fdgrads[0])]) + for e in norm_diffs: + assert abs(e) < 0.002 # check differences less than 0.2% + + # test normalised differences between FD curvatures and analytical least + # squares approximation. We don't expect this to be especially close + if curvs: + diffs = [a - b for a, b in zip(curvs, fdgrads[1])] + norm_diffs = tuple([a / b for a, b in zip(diffs, fdgrads[1])]) + for e in norm_diffs: + assert abs(e) < 0.1 # check differences less than 10% - return fd_grad, fd_curvs + mytarget = TOFLeastSquaresResidualWithRmsdCutoff( + experiments, ref_predictor, refman, pred_param, restraints_parameterisation=None + ) + + # get the functional and gradients + mytarget.predict() + L, dL_dp, curvs = mytarget.compute_functional_gradients_and_curvatures() # test normalised differences between FD and analytical calculations fdgrads = get_fd_gradients(mytarget, pred_param, [1.0e-7] * len(pred_param)) diffs = [a - b for a, b in zip(dL_dp, fdgrads[0])] norm_diffs = tuple([a / b for a, b in zip(diffs, fdgrads[0])]) for e in norm_diffs: - assert abs(e) < 0.001 # check differences less than 0.1% + assert abs(e) < 0.002 # check differences less than 0.2% # test normalised differences between FD curvatures and analytical least # squares approximation. We don't expect this to be especially close diff --git a/tests/algorithms/refinement/test_orientation_refinement.py b/tests/algorithms/refinement/test_orientation_refinement.py index dc6c79405c..361c553205 100644 --- a/tests/algorithms/refinement/test_orientation_refinement.py +++ b/tests/algorithms/refinement/test_orientation_refinement.py @@ -13,58 +13,76 @@ from __future__ import annotations import sys +from math import pi +from os.path import join + +from cctbx.sgtbx import space_group, space_group_symbols + +# Symmetry constrained parameterisation for the unit cell +from cctbx.uctbx import unit_cell +from dxtbx.format.FormatISISSXD import FormatISISSXD + +# We will set up a mock scan and a mock experiment list +from dxtbx.model import CrystalFactory, ScanFactory +from dxtbx.model.experiment_list import Experiment, ExperimentList +from libtbx.phil import parse +from libtbx.test_utils import approx_equal +from rstbx.symmetry.constraints.parameter_reduction import symmetrize_reduce_enlarge +from scitbx import matrix +from scitbx.array_family import flex + +from dials.algorithms.refinement.parameterisation.beam_parameters import ( + BeamParameterisation, +) +from dials.algorithms.refinement.parameterisation.crystal_parameters import ( + CrystalOrientationParameterisation, + CrystalUnitCellParameterisation, +) + +# Model parameterisations +from dials.algorithms.refinement.parameterisation.detector_parameters import ( + DetectorParameterisationHierarchical, + DetectorParameterisationSinglePanel, +) +from dials.algorithms.refinement.parameterisation.parameter_report import ( + ParameterReporter, +) + +# Parameterisation of the prediction equation +from dials.algorithms.refinement.parameterisation.prediction_parameters import ( + LauePredictionParameterisation, + XYPhiPredictionParameterisation, +) +from dials.algorithms.refinement.prediction.managed_predictors import ( + LaueExperimentsPredictor, + ScansExperimentsPredictor, + ScansRayPredictor, +) +from dials.algorithms.refinement.refiner import Refiner, RefinerFactory +from dials.algorithms.refinement.reflection_manager import ( + LaueReflectionManager, + ReflectionManager, +) + +# Imports for the target function +from dials.algorithms.refinement.target import ( + LaueLeastSquaresResidualWithRmsdCutoff, + LeastSquaresPositionalResidualWithRmsdCutoff, +) + +# Reflection prediction +from dials.algorithms.spot_prediction import ( + IndexGenerator, + LaueReflectionPredictor, + ray_intersection, +) +from dials.command_line.refine import phil_scope + +# Get modules to build models and minimiser using PHIL +from . import geometry_phil, minimiser_phil, setup_geometry, setup_minimiser def test(args=[]): - from math import pi - - from cctbx.sgtbx import space_group, space_group_symbols - - # Symmetry constrained parameterisation for the unit cell - from cctbx.uctbx import unit_cell - - # We will set up a mock scan and a mock experiment list - from dxtbx.model import ScanFactory - from dxtbx.model.experiment_list import Experiment, ExperimentList - from libtbx.phil import parse - from libtbx.test_utils import approx_equal - from rstbx.symmetry.constraints.parameter_reduction import symmetrize_reduce_enlarge - from scitbx import matrix - from scitbx.array_family import flex - - from dials.algorithms.refinement.parameterisation.beam_parameters import ( - BeamParameterisation, - ) - from dials.algorithms.refinement.parameterisation.crystal_parameters import ( - CrystalOrientationParameterisation, - CrystalUnitCellParameterisation, - ) - - # Model parameterisations - from dials.algorithms.refinement.parameterisation.detector_parameters import ( - DetectorParameterisationSinglePanel, - ) - - # Parameterisation of the prediction equation - from dials.algorithms.refinement.parameterisation.prediction_parameters import ( - XYPhiPredictionParameterisation, - ) - from dials.algorithms.refinement.prediction.managed_predictors import ( - ScansExperimentsPredictor, - ScansRayPredictor, - ) - from dials.algorithms.refinement.reflection_manager import ReflectionManager - - # Imports for the target function - from dials.algorithms.refinement.target import ( - LeastSquaresPositionalResidualWithRmsdCutoff, - ) - - # Reflection prediction - from dials.algorithms.spot_prediction import IndexGenerator, ray_intersection - - # Get modules to build models and minimiser using PHIL - from . import geometry_phil, minimiser_phil, setup_geometry, setup_minimiser ############################# # Setup experimental models # @@ -267,5 +285,204 @@ def test(args=[]): print(mycrystal) +def test_laue_refinement(dials_data): + fmt = FormatISISSXD( + join(dials_data("isis_sxd_example_data", pathlib=True), "sxd_nacl_run.nxs") + ) + beam = fmt.get_beam() + detector = fmt.get_detector() + goniometer = fmt.get_goniometer() + scan = fmt.get_scan() + crystal = CrystalFactory.from_dict( + { + "__id__": "crystal", + "real_space_a": ( + 0.5681647125795644, + -2.9735716012061135, + -2.707784412005687, + ), + "real_space_b": ( + -2.4994848902125884, + -2.3900344014694066, + 2.091613643314567, + ), + "real_space_c": ( + -1.2771711635863638, + 3.676428861690809, + -1.226011051463438, + ), + "space_group_hall_symbol": " P 1", + "B_covariance": ( + 2.618491627225783e-13, + -2.4190170785778272e-30, + 2.7961382012436816e-30, + 1.4283218313839273e-13, + 8.110824693143866e-15, + 2.7961382012436816e-30, + -1.922218398881239e-13, + -1.1641948761717081e-14, + 2.2832201114561855e-14, + -2.419017078577827e-30, + 1.3543505986455804e-44, + -8.081590630292518e-46, + -4.202632560757537e-29, + -5.437640708903305e-29, + -8.081590630292518e-46, + 3.330706229067803e-30, + 5.621471188408899e-29, + -6.599119546892406e-30, + 2.7961382012436816e-30, + -8.08159063029252e-46, + 9.550033948814972e-46, + 5.487666450546843e-30, + 2.7096475027184553e-30, + 9.550033948814972e-46, + -3.935814660390771e-30, + -3.889472044173952e-30, + 7.798194512461942e-30, + 1.428321831383927e-13, + -4.2026325607575364e-29, + 5.487666450546843e-30, + 7.789867544667339e-13, + 1.4101250207277487e-13, + 5.487666450546843e-30, + -2.0005409484272627e-13, + -2.021584892435437e-13, + 4.481019714719027e-14, + 8.110824693143867e-15, + -5.437640708903304e-29, + 2.7096475027184553e-30, + 1.4101250207277487e-13, + 2.5553690436147e-13, + 2.7096475027184553e-30, + -1.1167612085554417e-14, + -1.8848015530742402e-13, + 2.2125950964841596e-14, + 2.7961382012436816e-30, + -8.08159063029252e-46, + 9.550033948814972e-46, + 5.487666450546843e-30, + 2.7096475027184553e-30, + 9.550033948814972e-46, + -3.935814660390771e-30, + -3.889472044173952e-30, + 7.798194512461942e-30, + -1.922218398881239e-13, + 3.330706229067804e-30, + -3.93581466039077e-30, + -2.000540948427263e-13, + -1.1167612085554417e-14, + -3.93581466039077e-30, + 2.7092227778026175e-13, + 1.6029668235488112e-14, + -3.2138365634328507e-14, + -1.1641948761717081e-14, + 5.621471188408898e-29, + -3.889472044173952e-30, + -2.021584892435437e-13, + -1.88480155307424e-13, + -3.889472044173952e-30, + 1.6029668235488112e-14, + 2.7054780216756276e-13, + -3.175994945548343e-14, + 2.2832201114561858e-14, + -6.599119546892407e-30, + 7.79819451246194e-30, + 4.4810197147190265e-14, + 2.2125950964841592e-14, + 7.79819451246194e-30, + -3.2138365634328507e-14, + -3.175994945548343e-14, + 6.36770905528953e-14, + ), + } + ) + + experiments = ExperimentList() + experiments.append( + Experiment( + beam=beam, + detector=detector, + goniometer=goniometer, + scan=scan, + crystal=crystal, + imageset=None, + ) + ) + + det_param = DetectorParameterisationHierarchical(detector) + xlo_param = CrystalOrientationParameterisation(crystal) + xluc_param = CrystalUnitCellParameterisation(crystal) + + pred_param = LauePredictionParameterisation( + experiments, + detector_parameterisations=[det_param], + beam_parameterisations=[], + xl_orientation_parameterisations=[xlo_param], + xl_unit_cell_parameterisations=[xluc_param], + ) + + # shift detector by 0.2 mm each translation and 2 mrad each rotation + det_p_vals = det_param.get_param_vals() + p_vals = [a + b for a, b in zip(det_p_vals, [2.0, 2.0, 2.0, 2.0, 2.0, 2.0])] + det_param.set_param_vals(p_vals) + + # rotate crystal a bit (=2 mrad each rotation) + xlo_p_vals = xlo_param.get_param_vals() + p_vals = [a + b for a, b in zip(xlo_p_vals, [2.0, 2.0, 2.0])] + xlo_param.set_param_vals(p_vals) + + reflection_predictor = LaueReflectionPredictor(experiments[0], 1.0) + obs_refs = reflection_predictor.all_reflections_for_asu(0.0) + + # Set 'observed' centroids from the predicted ones + obs_refs["xyzobs.mm.value"] = obs_refs["xyzcal.mm"] + obs_refs["s0"] = obs_refs["s0_cal"] + obs_refs["wavelength"] = obs_refs["wavelength_cal"] + obs_refs["id"] = flex.int(len(obs_refs), 0) + + # Invent some variances for the centroid positions of the simulated data + px_size = detector[0].get_pixel_size() + var_x = flex.double(len(obs_refs), (px_size[0] / 2.0) ** 2) + var_y = flex.double(len(obs_refs), (px_size[1] / 2.0) ** 2) + var_z = flex.double(len(obs_refs), 0.0) + obs_refs["xyzobs.mm.variance"] = flex.vec3_double(var_x, var_y, var_z) + + # Undo known parameter shifts + det_param.set_param_vals(det_p_vals) + xlo_param.set_param_vals(xlo_p_vals) + + refman = LaueReflectionManager(obs_refs, experiments, outlier_detector=None) + + # Redefine the reflection predictor to use the type expected by the Target class + ref_predictor = LaueExperimentsPredictor(experiments) + + target = LaueLeastSquaresResidualWithRmsdCutoff( + experiments, ref_predictor, refman, pred_param, restraints_parameterisation=None + ) + + params = phil_scope.extract() + param_reporter = ParameterReporter( + pred_param.get_detector_parameterisations(), + pred_param.get_beam_parameterisations(), + pred_param.get_crystal_orientation_parameterisations(), + pred_param.get_crystal_unit_cell_parameterisations(), + pred_param.get_goniometer_parameterisations(), + ) + refinery = RefinerFactory.config_refinery(params, target, pred_param, None) + refiner = Refiner(experiments, pred_param, param_reporter, refman, target, refinery) + + print("Prior to refinement the experimental model is:") + print(detector) + print(crystal) + + refiner.run() + + print() + print("Refinement has completed with the following geometry:") + print(detector) + print(crystal) + + if __name__ == "__main__": test(sys.argv[1:]) diff --git a/tests/algorithms/refinement/test_prediction_parameters.py b/tests/algorithms/refinement/test_prediction_parameters.py index 91b9620c41..e319c7efcc 100644 --- a/tests/algorithms/refinement/test_prediction_parameters.py +++ b/tests/algorithms/refinement/test_prediction_parameters.py @@ -1,48 +1,52 @@ from __future__ import annotations import math +from os.path import join import pytest +from cctbx.sgtbx import space_group, space_group_symbols +from dxtbx.format.FormatISISSXD import FormatISISSXD +from dxtbx.model import CrystalFactory +from dxtbx.model.experiment_list import Experiment, ExperimentList +from libtbx.phil import parse +from scitbx.array_family import flex + +from dials.algorithms.refinement.parameterisation.beam_parameters import ( + BeamParameterisation, +) +from dials.algorithms.refinement.parameterisation.crystal_parameters import ( + CrystalOrientationParameterisation, + CrystalUnitCellParameterisation, +) +from dials.algorithms.refinement.parameterisation.detector_parameters import ( + DetectorParameterisationHierarchical, + DetectorParameterisationSinglePanel, +) +from dials.algorithms.refinement.parameterisation.goniometer_parameters import ( + GoniometerParameterisation, +) +from dials.algorithms.refinement.parameterisation.prediction_parameters import ( + LauePredictionParameterisation, + XYPhiPredictionParameterisation, +) +from dials.algorithms.refinement.prediction.managed_predictors import ( + LaueExperimentsPredictor, + ScansExperimentsPredictor, + ScansRayPredictor, +) +from dials.algorithms.refinement.reflection_manager import LaueReflectionManager +from dials.algorithms.spot_prediction import ( + IndexGenerator, + LaueReflectionPredictor, + ray_intersection, +) + from . import geometry_phil +from .setup_geometry import Extract def test(): - from cctbx.sgtbx import space_group, space_group_symbols - from dxtbx.model.experiment_list import Experiment, ExperimentList - from libtbx.phil import parse - from scitbx.array_family import flex - - from dials.algorithms.refinement.parameterisation.beam_parameters import ( - BeamParameterisation, - ) - from dials.algorithms.refinement.parameterisation.crystal_parameters import ( - CrystalOrientationParameterisation, - CrystalUnitCellParameterisation, - ) - from dials.algorithms.refinement.parameterisation.detector_parameters import ( - DetectorParameterisationSinglePanel, - ) - from dials.algorithms.refinement.parameterisation.goniometer_parameters import ( - GoniometerParameterisation, - ) - - #### Import model parameterisations - from dials.algorithms.refinement.parameterisation.prediction_parameters import ( - XYPhiPredictionParameterisation, - ) - from dials.algorithms.refinement.prediction.managed_predictors import ( - ScansExperimentsPredictor, - ScansRayPredictor, - ) - - ##### Imports for reflection prediction - from dials.algorithms.spot_prediction import IndexGenerator, ray_intersection - - ##### Import model builder - from .setup_geometry import Extract - - #### Create models overrides = """geometry.parameters.crystal.a.length.range = 10 50 geometry.parameters.crystal.b.length.range = 10 50 @@ -186,3 +190,212 @@ def test(): # return to the initial state pred_param.set_param_vals(p_vals) + + +def test_laue_prediction_parameters(dials_data): + fmt = FormatISISSXD( + join(dials_data("isis_sxd_example_data", pathlib=True), "sxd_nacl_run.nxs") + ) + beam = fmt.get_beam() + detector = fmt.get_detector() + goniometer = fmt.get_goniometer() + scan = fmt.get_scan() + crystal = CrystalFactory.from_dict( + { + "__id__": "crystal", + "real_space_a": ( + 0.5681647125795644, + -2.9735716012061135, + -2.707784412005687, + ), + "real_space_b": ( + -2.4994848902125884, + -2.3900344014694066, + 2.091613643314567, + ), + "real_space_c": ( + -1.2771711635863638, + 3.676428861690809, + -1.226011051463438, + ), + "space_group_hall_symbol": " P 1", + "B_covariance": ( + 2.618491627225783e-13, + -2.4190170785778272e-30, + 2.7961382012436816e-30, + 1.4283218313839273e-13, + 8.110824693143866e-15, + 2.7961382012436816e-30, + -1.922218398881239e-13, + -1.1641948761717081e-14, + 2.2832201114561855e-14, + -2.419017078577827e-30, + 1.3543505986455804e-44, + -8.081590630292518e-46, + -4.202632560757537e-29, + -5.437640708903305e-29, + -8.081590630292518e-46, + 3.330706229067803e-30, + 5.621471188408899e-29, + -6.599119546892406e-30, + 2.7961382012436816e-30, + -8.08159063029252e-46, + 9.550033948814972e-46, + 5.487666450546843e-30, + 2.7096475027184553e-30, + 9.550033948814972e-46, + -3.935814660390771e-30, + -3.889472044173952e-30, + 7.798194512461942e-30, + 1.428321831383927e-13, + -4.2026325607575364e-29, + 5.487666450546843e-30, + 7.789867544667339e-13, + 1.4101250207277487e-13, + 5.487666450546843e-30, + -2.0005409484272627e-13, + -2.021584892435437e-13, + 4.481019714719027e-14, + 8.110824693143867e-15, + -5.437640708903304e-29, + 2.7096475027184553e-30, + 1.4101250207277487e-13, + 2.5553690436147e-13, + 2.7096475027184553e-30, + -1.1167612085554417e-14, + -1.8848015530742402e-13, + 2.2125950964841596e-14, + 2.7961382012436816e-30, + -8.08159063029252e-46, + 9.550033948814972e-46, + 5.487666450546843e-30, + 2.7096475027184553e-30, + 9.550033948814972e-46, + -3.935814660390771e-30, + -3.889472044173952e-30, + 7.798194512461942e-30, + -1.922218398881239e-13, + 3.330706229067804e-30, + -3.93581466039077e-30, + -2.000540948427263e-13, + -1.1167612085554417e-14, + -3.93581466039077e-30, + 2.7092227778026175e-13, + 1.6029668235488112e-14, + -3.2138365634328507e-14, + -1.1641948761717081e-14, + 5.621471188408898e-29, + -3.889472044173952e-30, + -2.021584892435437e-13, + -1.88480155307424e-13, + -3.889472044173952e-30, + 1.6029668235488112e-14, + 2.7054780216756276e-13, + -3.175994945548343e-14, + 2.2832201114561858e-14, + -6.599119546892407e-30, + 7.79819451246194e-30, + 4.4810197147190265e-14, + 2.2125950964841592e-14, + 7.79819451246194e-30, + -3.2138365634328507e-14, + -3.175994945548343e-14, + 6.36770905528953e-14, + ), + } + ) + + experiments = ExperimentList() + experiments.append( + Experiment( + beam=beam, + detector=detector, + goniometer=goniometer, + scan=scan, + crystal=crystal, + imageset=None, + ) + ) + + det_param = DetectorParameterisationHierarchical(detector) + xlo_param = CrystalOrientationParameterisation(crystal) + xluc_param = CrystalUnitCellParameterisation(crystal) + + pred_param = LauePredictionParameterisation( + experiments, + detector_parameterisations=[det_param], + beam_parameterisations=[], + xl_orientation_parameterisations=[xlo_param], + xl_unit_cell_parameterisations=[xluc_param], + ) + + reflection_predictor = LaueReflectionPredictor(experiments[0], 1.0) + obs_refs = reflection_predictor.all_reflections_for_asu(0.0) + + # Set 'observed' centroids from the predicted ones + obs_refs["xyzobs.mm.value"] = obs_refs["xyzcal.mm"] + obs_refs["s0"] = obs_refs["s0_cal"] + obs_refs["wavelength"] = obs_refs["wavelength_cal"] + obs_refs["id"] = flex.int(len(obs_refs), 0) + + # Invent some variances for the centroid positions of the simulated data + px_size = detector[0].get_pixel_size() + var_x = flex.double(len(obs_refs), (px_size[0] / 2.0) ** 2) + var_y = flex.double(len(obs_refs), (px_size[1] / 2.0) ** 2) + var_z = flex.double(len(obs_refs), 0.0) + obs_refs["xyzobs.mm.variance"] = flex.vec3_double(var_x, var_y, var_z) + + # use a ReflectionManager to exclude reflections too close to the spindle + + refman = LaueReflectionManager(obs_refs, experiments, outlier_detector=None) + refman.finalise() + + # Redefine the reflection predictor to use the type expected by the Target class + ref_predictor = LaueExperimentsPredictor(experiments) + + # keep only those reflections that pass inclusion criteria and have predictions + reflections = refman.get_matches() + + # get analytical gradients + an_grads = pred_param.get_gradients(reflections) + + # get finite difference gradients + p_vals = pred_param.get_param_vals() + deltas = [1.0e-7] * len(p_vals) + + for i, delta in enumerate(deltas): + val = p_vals[i] + + p_vals[i] -= delta / 2.0 + pred_param.set_param_vals(p_vals) + + ref_predictor(reflections) + + rev_state = reflections["xyzcal.mm"].deep_copy() + rev_wavelengths = reflections["wavelength_cal"].deep_copy() + + p_vals[i] += delta + pred_param.set_param_vals(p_vals) + + ref_predictor(reflections) + + fwd_state = reflections["xyzcal.mm"].deep_copy() + fwd_wavelengths = reflections["wavelength_cal"].deep_copy() + p_vals[i] = val + + fd = fwd_state - rev_state + wavelength_grads = fwd_wavelengths - rev_wavelengths + x_grads, y_grads, _ = fd.parts() + x_grads /= delta + y_grads /= delta + wavelength_grads /= delta + + # compare with analytical calculation + assert x_grads == pytest.approx(an_grads[i]["dX_dp"], abs=5.0e-5) + assert y_grads == pytest.approx(an_grads[i]["dY_dp"], abs=5.0e-5) + assert wavelength_grads == pytest.approx( + an_grads[i]["dwavelength_dp"], abs=5.0e-7 + ) + + # return to the initial state + pred_param.set_param_vals(p_vals) From 97e2b7327236666bd025bbbbc98eb383683fce17 Mon Sep 17 00:00:00 2001 From: Amy Thompson <52806925+amyjaynethompson@users.noreply.github.com> Date: Tue, 25 Jun 2024 09:45:15 +0100 Subject: [PATCH 10/40] Data id fix (#2681) fix data ids output by json file --- newsfragments/2681.bugfix | 1 + src/dials/algorithms/correlation/analysis.py | 3 +- tests/algorithms/correlation/test_analysis.py | 47 ++++++++++++++++++- 3 files changed, 49 insertions(+), 2 deletions(-) create mode 100644 newsfragments/2681.bugfix diff --git a/newsfragments/2681.bugfix b/newsfragments/2681.bugfix new file mode 100644 index 0000000000..4b113a304a --- /dev/null +++ b/newsfragments/2681.bugfix @@ -0,0 +1 @@ +``dials.correlation_matrix``: Correctly select datasets for output json after filtering when used by multiplex. diff --git a/src/dials/algorithms/correlation/analysis.py b/src/dials/algorithms/correlation/analysis.py index cf41589e0e..5a50e33f29 100644 --- a/src/dials/algorithms/correlation/analysis.py +++ b/src/dials/algorithms/correlation/analysis.py @@ -377,7 +377,8 @@ def convert_to_importable_json(self, linkage_matrix: np.ndarray) -> OrderedDict: linkage_mat_as_dict = linkage_matrix_to_dict(linkage_matrix) for d in linkage_mat_as_dict.values(): # Difference in indexing between linkage_mat_as_dict and datasets, so have i-1 - d["datasets"] = [self.ids_to_identifiers_map[i - 1] for i in d["datasets"]] + real_num = [self.labels[i - 1] for i in d["datasets"]] + d["datasets"] = [self.ids_to_identifiers_map[i] for i in real_num] return linkage_mat_as_dict diff --git a/tests/algorithms/correlation/test_analysis.py b/tests/algorithms/correlation/test_analysis.py index 3f0f2ec9c4..a79944a30b 100644 --- a/tests/algorithms/correlation/test_analysis.py +++ b/tests/algorithms/correlation/test_analysis.py @@ -1,7 +1,10 @@ from __future__ import annotations +import json import pathlib +import pytest + from dials.algorithms.correlation.analysis import CorrelationMatrix from dials.command_line.correlation_matrix import phil_scope from dials.util.multi_dataset_handling import ( @@ -11,7 +14,8 @@ from dials.util.options import ArgumentParser, reflections_and_experiments_from_files -def test_corr_mat(dials_data, run_in_tmp_path): +@pytest.fixture() +def proteinase_k(dials_data): mcp = dials_data("vmxi_proteinase_k_sweeps", pathlib=True) params = phil_scope.extract() input_data = [] @@ -41,7 +45,48 @@ def test_corr_mat(dials_data, run_in_tmp_path): assert len(experiments) == len(reflections) assert len(experiments) > 1 experiments, reflections = assign_unique_identifiers(experiments, reflections) + yield experiments, reflections, params + + +def test_corr_mat(proteinase_k, run_in_tmp_path): + experiments, reflections, params = proteinase_k matrices = CorrelationMatrix(experiments, reflections, params) matrices.calculate_matrices() matrices.output_json() assert pathlib.Path("dials.correlation_matrix.json").is_file() + + +def test_filtered_corr_mat(proteinase_k, run_in_tmp_path): + experiments, reflections, params = proteinase_k + ids_to_identifiers_map = {} + for table in reflections: + ids_to_identifiers_map.update(table.experiment_identifiers()) + + # Simulate filtered dataset by multiplex + id_to_remove = [ids_to_identifiers_map[2]] + ids_to_identifiers_map.pop(2) + reflections.pop(2) + experiments.remove_on_experiment_identifiers(id_to_remove) + + matrices = CorrelationMatrix( + experiments, reflections, params, ids_to_identifiers_map + ) + matrices.calculate_matrices() + matrices.output_json() + assert pathlib.Path("dials.correlation_matrix.json").is_file() + + expected_ids = [[1, 3], [0, 1, 3]] + + # Check main algorithm correct with filtering + for i, j in zip(matrices.correlation_clusters, expected_ids): + assert i.labels == j + + # Check json output also correct + with open(pathlib.Path("dials.correlation_matrix.json")) as f: + data = json.load(f) + + assert len(data["correlation_matrix_clustering"]) == len(expected_ids) + for i, j in zip(data["correlation_matrix_clustering"], expected_ids): + assert len(data["correlation_matrix_clustering"][i]["datasets"]) == len(j) + for a, e in zip(data["correlation_matrix_clustering"][i]["datasets"], j): + assert a == ids_to_identifiers_map[e] From 263e099f40cd07822e9ebeab780ce7d7effd2b6c Mon Sep 17 00:00:00 2001 From: David Waterman Date: Thu, 4 Jul 2024 09:29:16 +0100 Subject: [PATCH 11/40] In the scans indexer, do not convert mm to px when `indexing.refinement_protocol.mode==None` (#2687) * Check required column is present before converting mm to px. This is what is done by the StillsIndexer when refinement_protocol.mode is None, so do the same here. --- newsfragments/2687.bugfix | 2 ++ src/dials/algorithms/indexing/indexer.py | 3 ++- 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 newsfragments/2687.bugfix diff --git a/newsfragments/2687.bugfix b/newsfragments/2687.bugfix new file mode 100644 index 0000000000..bdd4866ab6 --- /dev/null +++ b/newsfragments/2687.bugfix @@ -0,0 +1,2 @@ +``dials.index``: Avoid mm to px conversion when the ``refinement_protocol`` +is set to do no refinement, as the required data are not available. diff --git a/src/dials/algorithms/indexing/indexer.py b/src/dials/algorithms/indexing/indexer.py index 219963e7ae..d551bc472e 100644 --- a/src/dials/algorithms/indexing/indexer.py +++ b/src/dials/algorithms/indexing/indexer.py @@ -783,7 +783,8 @@ def index(self): rotation_matrix_differences(self.refined_experiments.crystals()) ) - self._xyzcal_mm_to_px(self.refined_experiments, self.refined_reflections) + if "xyzcal.mm" in self.refined_reflections: + self._xyzcal_mm_to_px(self.refined_experiments, self.refined_reflections) def _unit_cell_volume_sanity_check(self, original_experiments, refined_experiments): # sanity check for unrealistic unit cell volume increase during refinement From 1848e040e6c715aee099cdb6648c2abd1456af6d Mon Sep 17 00:00:00 2001 From: Nicholas Devenish Date: Thu, 11 Jul 2024 09:53:51 +0100 Subject: [PATCH 12/40] Increase image_viewer ring tool maximum size (#2698) Fixes #2697 --- newsfragments/2697.bugfix | 1 + src/dials/util/image_viewer/slip_viewer/ring_frame.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 newsfragments/2697.bugfix diff --git a/newsfragments/2697.bugfix b/newsfragments/2697.bugfix new file mode 100644 index 0000000000..878a91204f --- /dev/null +++ b/newsfragments/2697.bugfix @@ -0,0 +1 @@ +`dials.image_viewer`: Increase the maximum resolution for the ring tool radius. This was too small for new detectors. diff --git a/src/dials/util/image_viewer/slip_viewer/ring_frame.py b/src/dials/util/image_viewer/slip_viewer/ring_frame.py index 8cf326b8a4..b2b83d519a 100644 --- a/src/dials/util/image_viewer/slip_viewer/ring_frame.py +++ b/src/dials/util/image_viewer/slip_viewer/ring_frame.py @@ -59,7 +59,7 @@ def __init__(self, *args, **kwds): # have a non-linear slider. self._radius = 100 self._center = [0, 0] - radius_max = 2000 + radius_max = 3000 radius_min = 10 # Radius controls. From 153f5a70a5aaaf1225781228c06f6922e2f8ffd8 Mon Sep 17 00:00:00 2001 From: James Beilsten-Edmands <30625594+jbeilstenedmands@users.noreply.github.com> Date: Fri, 12 Jul 2024 16:11:08 +0200 Subject: [PATCH 13/40] Fix potential crash in indexing max cell estimation if all spots are ice ring resolution (#2699) --- newsfragments/2699.bugfix | 1 + src/dials/algorithms/indexing/max_cell.py | 4 ++++ 2 files changed, 5 insertions(+) create mode 100644 newsfragments/2699.bugfix diff --git a/newsfragments/2699.bugfix b/newsfragments/2699.bugfix new file mode 100644 index 0000000000..0bb10f4497 --- /dev/null +++ b/newsfragments/2699.bugfix @@ -0,0 +1 @@ +``dials.index``: Fix potential crash in max_cell estimation when all ice ring spots diff --git a/src/dials/algorithms/indexing/max_cell.py b/src/dials/algorithms/indexing/max_cell.py index dccb542abf..86c56c1087 100644 --- a/src/dials/algorithms/indexing/max_cell.py +++ b/src/dials/algorithms/indexing/max_cell.py @@ -33,6 +33,10 @@ def find_max_cell( logger.debug( "Rejecting %i reflections at ice ring resolution", ice_sel.count(True) ) + if not reflections.size(): + raise DialsIndexError( + "No spots left for max cell analysis after ice-ring filtering" + ) # need bounding box in reflections to find overlaps; this is not there if # spots are from XDS (for example) From b3ed4609c8a340f67de8b887b15d971e97af3934 Mon Sep 17 00:00:00 2001 From: David McDonagh <60879630+toastisme@users.noreply.github.com> Date: Mon, 22 Jul 2024 09:44:28 +0100 Subject: [PATCH 14/40] Add additional reflection properties to save after indexing. (#2702) * Add additional reflection properties to save after indexing/refinement. --- newsfragments/2702.misc | 1 + src/dials/algorithms/indexing/indexer.py | 16 ++++++++++++++-- 2 files changed, 15 insertions(+), 2 deletions(-) create mode 100644 newsfragments/2702.misc diff --git a/newsfragments/2702.misc b/newsfragments/2702.misc new file mode 100644 index 0000000000..a415426835 --- /dev/null +++ b/newsfragments/2702.misc @@ -0,0 +1 @@ +Add additional reflection properties to save after indexing. diff --git a/src/dials/algorithms/indexing/indexer.py b/src/dials/algorithms/indexing/indexer.py index d551bc472e..13354e9c1f 100644 --- a/src/dials/algorithms/indexing/indexer.py +++ b/src/dials/algorithms/indexing/indexer.py @@ -995,12 +995,24 @@ def index_reflections(self, experiments, reflections): def refine(self, experiments, reflections): from dials.algorithms.indexing.refinement import refine + properties_to_save = [ + "xyzcal.mm", + "entering", + "wavelength_cal", + "s0_cal", + "tof_cal", + ] + refiner, refined, outliers = refine(self.all_params, reflections, experiments) if outliers is not None: reflections["id"].set_selected(outliers, -1) + predicted = refiner.predict_for_indexed() - reflections["xyzcal.mm"] = predicted["xyzcal.mm"] - reflections["entering"] = predicted["entering"] + + for i in properties_to_save: + if i in predicted: + reflections[i] = predicted[i] + reflections.unset_flags( flex.bool(len(reflections), True), reflections.flags.centroid_outlier ) From b9d3fd4ccd4a4fed2bf58a2695be6c5ada6b7d77 Mon Sep 17 00:00:00 2001 From: David Waterman Date: Tue, 30 Jul 2024 09:54:15 +0100 Subject: [PATCH 15/40] dials.symmetry: allow free selection of `significance_level` in the range [0,1] (#2696) * Change significance_level to be a float in [0,1] --- newsfragments/2696.feature | 1 + src/dials/algorithms/symmetry/absences/screw_axes.py | 5 ++--- src/dials/command_line/symmetry.py | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) create mode 100644 newsfragments/2696.feature diff --git a/newsfragments/2696.feature b/newsfragments/2696.feature new file mode 100644 index 0000000000..7d85003dd8 --- /dev/null +++ b/newsfragments/2696.feature @@ -0,0 +1 @@ +``dials.symmetry``: allow free selection of ``significance_level`` in the range [0,1] diff --git a/src/dials/algorithms/symmetry/absences/screw_axes.py b/src/dials/algorithms/symmetry/absences/screw_axes.py index 58010d2c8e..77aed0cee6 100644 --- a/src/dials/algorithms/symmetry/absences/screw_axes.py +++ b/src/dials/algorithms/symmetry/absences/screw_axes.py @@ -209,7 +209,7 @@ def score_axis_fourier(self, reflection_table, significance_level=0.95): def score_axis_direct(self, reflection_table, significance_level=0.95): """Score the axis given a reflection table of data.""" - assert significance_level in [0.95, 0.975, 0.99] + self.get_all_suitable_reflections(reflection_table) expected_sel = self.miller_axis_vals.iround() % self.axis_repeat == 0 @@ -241,8 +241,7 @@ def score_axis_direct(self, reflection_table, significance_level=0.95): # sanity check - is most of intensity in 'expected' channel? intensity_test = self.mean_I_sigma > (20.0 * self.mean_I_sigma_abs) - cutoffs = {0.95: 1.645, 0.975: 1.960, 0.99: 2.326} - cutoff = cutoffs[significance_level] + cutoff = norm.ppf(significance_level) if z_score_absent > cutoff and not intensity_test: # z > 1.65 in only 5% of cases for normal dist diff --git a/src/dials/command_line/symmetry.py b/src/dials/command_line/symmetry.py index e593258f45..9485f27f10 100644 --- a/src/dials/command_line/symmetry.py +++ b/src/dials/command_line/symmetry.py @@ -105,8 +105,8 @@ .help = "Use fourier analysis or direct analysis of I/sigma to determine" "likelihood of systematic absences" - significance_level = *0.95 0.975 0.99 - .type = choice + significance_level = 0.95 + .type = float(value_min=0, value_max=1) .help = "Significance to use when testing whether axial reflections are " "different to zero (absences and reflections in reflecting condition)." From b6bd34850364e7f897c60d2e5be9039f0789eb99 Mon Sep 17 00:00:00 2001 From: David Waterman Date: Wed, 31 Jul 2024 15:37:15 +0100 Subject: [PATCH 16/40] Updates for the small molecule tutorial (#2692) * Fix error in text - wrong viewer * Add note about dials.symmetry catering more for MX * Use dials.export format=shelx --- .../tutorials/small_molecule_tutorial.rst | 15 ++++++--------- newsfragments/2692.bugfix | 1 + 2 files changed, 7 insertions(+), 9 deletions(-) create mode 100644 newsfragments/2692.bugfix diff --git a/doc/sphinx/documentation/tutorials/small_molecule_tutorial.rst b/doc/sphinx/documentation/tutorials/small_molecule_tutorial.rst index a3d58298e3..76a4f8eaa1 100644 --- a/doc/sphinx/documentation/tutorials/small_molecule_tutorial.rst +++ b/doc/sphinx/documentation/tutorials/small_molecule_tutorial.rst @@ -117,7 +117,7 @@ which will integrate each sweep in sequence, again using all available cores. Af .. code-block:: bash - dials.reciprocal_lattice_viewer integrated.refl integrated.expt + dials.image_viewer integrated.refl integrated.expt @@ -132,6 +132,8 @@ So far the data were processed with a triclinic unit cell, which is usually OK b This will look at the shape of the unit cell and determine the maximum possible symmetry based on the cell parameters, with some tolerance. Each of the possible symmetry operations will be individually tested and scored, and those operations identified as being present will be composed into the point group to be assigned to the data. An attempt is then made to estimate the space group from the presence or absence of axial reflections: this is rather less reliable than the point group determination but also less important for the scaling. After the point group has been determined the reflections will be reindexed automatically to match the correct setting, ensuring that the data are correctly prepared for scaling. +.. note:: ``dials.symmetry`` will only suggest one of the 65 Sohncke space groups relevant for chiral molecules. It will not detect mirrors or glide planes. + Scaling ------- @@ -225,19 +227,14 @@ However these may be useful in later structure refinement. Exporting --------- -The output data are by default saved in the standard DIALS reflection format, which is not particularly useful. In MX, a standard format is MTZ which includes the unit cell and symmetry information with the reflection data. This is created with +The output data are by default saved in the standard DIALS reflection format, which is not particularly useful. DIALS is able to convert this to SHELX format though. This can be done by .. code-block:: bash - dials.export scaled.refl scaled.expt - -And there is a useful "jiffy" included with xia2 to convert this to SHELX format and generate .ins and .hkl files for structure solution and refinement viz: - -.. code-block:: bash + dials.export scaled.refl scaled.expt format=shelx shelx.ins=lcys.ins shelx.hklout=lcys.hkl composition=CHNOS - xia2.to_shelx scaled.mtz lcys CHNOS -Such that you can then run +So that you can then run .. code-block:: bash diff --git a/newsfragments/2692.bugfix b/newsfragments/2692.bugfix new file mode 100644 index 0000000000..d392a22846 --- /dev/null +++ b/newsfragments/2692.bugfix @@ -0,0 +1 @@ +Improvements to the small molecule tutorial. From 7cf56ea6cd8cb252e140b3b44e37f64c154b0ca9 Mon Sep 17 00:00:00 2001 From: James Beilsten-Edmands <30625594+jbeilstenedmands@users.noreply.github.com> Date: Fri, 2 Aug 2024 09:46:33 +0100 Subject: [PATCH 17/40] Updates to mmcif export (#2709) For mmcif export, add a few extra items so that the output can be understood with gemmi (and sortmtz), and make sure it works for still data too. --- newsfragments/2709.feature | 1 + src/dials/command_line/export.py | 8 ++++ src/dials/util/export_mmcif.py | 52 +++++++++++++++++++++--- tests/command_line/test_export.py | 11 +++++ tests/command_line/test_ssx_reduction.py | 19 +++++++++ 5 files changed, 86 insertions(+), 5 deletions(-) create mode 100644 newsfragments/2709.feature diff --git a/newsfragments/2709.feature b/newsfragments/2709.feature new file mode 100644 index 0000000000..6af2c983c7 --- /dev/null +++ b/newsfragments/2709.feature @@ -0,0 +1 @@ +``dials.export``: Add support for exporting still data in mmcif format, that can be understood with gemmi diff --git a/src/dials/command_line/export.py b/src/dials/command_line/export.py index 3eecb8ca2e..a7668cc28c 100644 --- a/src/dials/command_line/export.py +++ b/src/dials/command_line/export.py @@ -220,6 +220,14 @@ "mmcif file should comply with. v5_next adds support for" "recording unmerged data as well as additional scan metadata" "and statistics, however writing can be slow for large datasets." + scale = True + .type = bool + .help = "If True, apply a scale such that the minimum intensity is greater" + "than (less negative than) the mmcif.min_scale value below." + min_scale = -999999.0 + .type = float + .help = "If mmcif.scale is True, scale all negative intensities such that" + "they are less negative than this value." } mosflm { diff --git a/src/dials/util/export_mmcif.py b/src/dials/util/export_mmcif.py index bf846c1b6a..ea66e11a1a 100644 --- a/src/dials/util/export_mmcif.py +++ b/src/dials/util/export_mmcif.py @@ -17,6 +17,7 @@ from scitbx.array_family import flex import dials.util.version +from dials.algorithms.symmetry import median_unit_cell from dials.util.filter_reflections import filter_reflection_table logger = logging.getLogger(__name__) @@ -89,7 +90,12 @@ def write(self, experiments, reflections): def make_cif_block(self, experiments, reflections): """Write the data to a cif block""" # Select reflections - selection = reflections.get_flags(reflections.flags.integrated, all=True) + # if rotation, get reflections integrated by both integration methods + # else if stills, only summation integrated reflections are available. + if all(e.scan and e.scan.get_oscillation()[1] != 0.0 for e in experiments): + selection = reflections.get_flags(reflections.flags.integrated, all=True) + else: + selection = reflections.get_flags(reflections.flags.integrated, all=False) reflections = reflections.select(selection) # Filter out bad variances and other issues, but don't filter on ice rings @@ -210,7 +216,8 @@ def make_cif_block(self, experiments, reflections): epochs = [] for exp in experiments: wls.append(round(exp.beam.get_wavelength(), 5)) - epochs.append(exp.scan.get_epochs()[0]) + if exp.scan: + epochs.append(exp.scan.get_epochs()[0]) unique_wls = set(wls) cif_block["_exptl_crystal.id"] = 1 # links to crystal_id cif_block["_diffrn.id"] = 1 # links to diffrn_id @@ -225,10 +232,34 @@ def make_cif_block(self, experiments, reflections): # _diffrn_detector.pdbx_collection_date = (Date of collection yyyy-mm-dd) # _diffrn_detector.type = (full name of detector e.g. DECTRIS PILATUS3 2M) # One date is required, so if multiple just use the first date. - min_epoch = min(epochs) - date_str = time.strftime("%Y-%m-%d", time.gmtime(min_epoch)) cif_block["_diffrn_detector.diffrn_id"] = 1 - cif_block["_diffrn_detector.pdbx_collection_date"] = date_str + if epochs: # some still expts have scans, but some don't + min_epoch = min(epochs) + date_str = time.strftime("%Y-%m-%d", time.gmtime(min_epoch)) + cif_block["_diffrn_detector.pdbx_collection_date"] = date_str + + # add some symmetry information + sginfo = experiments[0].crystal.get_space_group().info() + symbol = sginfo.type().universal_hermann_mauguin_symbol() + number = sginfo.type().number() + symmetry_block = iotbx.cif.model.block() + symmetry_block["_symmetry.entry_id"] = "DIALS" + symmetry_block["_symmetry.space_group_name_H-M"] = symbol + symmetry_block["_symmetry.Int_Tables_number"] = number + cif_block.update(symmetry_block) + + # add a loop with cell values (median if multi-crystal) + median_cell = median_unit_cell(experiments) + a, b, c, al, be, ga = median_cell.parameters() + cell_block = iotbx.cif.model.block() + cell_block["_cell.entry_id"] = "DIALS" + cell_block["_cell.length_a"] = f"{a:.4f}" + cell_block["_cell.length_b"] = f"{b:.4f}" + cell_block["_cell.length_c"] = f"{c:.4f}" + cell_block["_cell.angle_alpha"] = f"{al:.4f}" + cell_block["_cell.angle_beta"] = f"{be:.4f}" + cell_block["_cell.angle_gamma"] = f"{ga:.4f}" + cif_block.update(cell_block) # Write reflection data # Required columns @@ -315,6 +346,17 @@ def make_cif_block(self, experiments, reflections): reflections["angle"] = reflections["xyzcal.mm"].parts()[2] * RAD2DEG variables_present.extend(["angle"]) + if self.params.mmcif.scale and "intensity.scale.value" in reflections: + min_val = min(reflections["intensity.scale.value"]) + if min_val <= self.params.mmcif.min_scale: + # reduce the range of data, for e.g. sortmtz analysis + divisor = abs(min_val) / abs(self.params.mmcif.min_scale) + n = len(str(divisor).split(".")[0]) + divisor = float("1" + int(n) * "0") + reflections["intensity.scale.value"] /= divisor + reflections["intensity.scale.sigma"] /= divisor + reflections["scales"] /= divisor + if self.params.mmcif.pdb_version == "v5_next": if "partiality" in reflections: variables_present.extend(["partiality"]) diff --git a/tests/command_line/test_export.py b/tests/command_line/test_export.py index 51f0c9a78a..2e18096a0d 100644 --- a/tests/command_line/test_export.py +++ b/tests/command_line/test_export.py @@ -319,6 +319,17 @@ def test_mmcif_on_scaled_data(dials_data, tmp_path, pdb_version): model = iotbx.cif.reader(file_path=str(tmp_path / "scaled.mmcif")).model() if pdb_version == "v5": assert "_pdbx_diffrn_data_section.id" not in model["dials"].keys() + # check that gemmi can understand the output + cmd = [ + shutil.which("gemmi"), + "cif2mtz", + tmp_path / "scaled.mmcif", + tmp_path / "test.mtz", + ] + result = subprocess.run(cmd, cwd=tmp_path, capture_output=True) + assert not result.returncode and not result.stderr + assert (tmp_path / "test.mtz").is_file() + elif pdb_version == "v5_next": assert "_pdbx_diffrn_data_section.id" in model["dials"].keys() diff --git a/tests/command_line/test_ssx_reduction.py b/tests/command_line/test_ssx_reduction.py index 09599afdea..3c9a83ad50 100644 --- a/tests/command_line/test_ssx_reduction.py +++ b/tests/command_line/test_ssx_reduction.py @@ -68,6 +68,25 @@ def test_ssx_reduction(dials_data, tmp_path): assert not result.returncode and not result.stderr assert (tmp_path / "scaled.mtz").is_file() + # now try running mmcif export + result = subprocess.run( + [shutil.which("dials.export"), scale_expts, scale_refls, "format=mmcif"], + cwd=tmp_path, + capture_output=True, + ) + assert not result.returncode and not result.stderr + assert (tmp_path / "scaled.cif").is_file() + # check that gemmi can understand the output cif + cmd = [ + shutil.which("gemmi"), + "cif2mtz", + tmp_path / "scaled.cif", + tmp_path / "test.mtz", + ] + result = subprocess.run(cmd, cwd=tmp_path, capture_output=True) + assert not result.returncode and not result.stderr + assert (tmp_path / "test.mtz").is_file() + result = subprocess.run( [shutil.which("dials.merge"), scale_expts, scale_refls], cwd=tmp_path, From 769dab72b7257bf8b945b3857674998d7ed86e64 Mon Sep 17 00:00:00 2001 From: David Waterman Date: Tue, 6 Aug 2024 09:39:47 +0100 Subject: [PATCH 18/40] Convert merged MTZ export to use gemmi (#2700) Add a gemmi-fied merged MTZ creator, called MergedMTZCreator, and deprecate the old MTZWriterBase version. --------- Co-authored-by: Daniel Tchon --- newsfragments/2700.bugfix | 2 + src/dials/algorithms/merging/merge.py | 279 ++++++++++++++++++++++++- src/dials/command_line/merge.py | 9 +- src/dials/command_line/scale.py | 8 +- src/dials/util/export_mtz.py | 19 +- tests/algorithms/merging/test_merge.py | 2 +- 6 files changed, 294 insertions(+), 25 deletions(-) create mode 100644 newsfragments/2700.bugfix diff --git a/newsfragments/2700.bugfix b/newsfragments/2700.bugfix new file mode 100644 index 0000000000..ca2ba74688 --- /dev/null +++ b/newsfragments/2700.bugfix @@ -0,0 +1,2 @@ +``dials.merge``: Use gemmi to output merged MTZs for consistency with ``dials.export`` + diff --git a/src/dials/algorithms/merging/merge.py b/src/dials/algorithms/merging/merge.py index 1757dac567..4b905286e5 100644 --- a/src/dials/algorithms/merging/merge.py +++ b/src/dials/algorithms/merging/merge.py @@ -3,20 +3,31 @@ from __future__ import annotations import logging +import time from contextlib import contextmanager from io import StringIO from typing import List, Optional, Tuple import numpy as np +import pandas as pd import iotbx.mtz -from cctbx import miller, r_free_utils +from cctbx import miller, r_free_utils, sgtbx, uctbx +from dxtbx import flumpy from dxtbx.model import ExperimentList from iotbx import mtz, phil from iotbx.reflection_file_editor import is_rfree_array from iotbx.reflection_file_utils import get_r_free_flags_scores +from libtbx import env from mmtbx.scaling import data_statistics +from dials.util.version import dials_version + +try: + import gemmi +except ModuleNotFoundError: + gemmi = None + from dials.algorithms.merging.reporting import ( MergeJSONCollector, MergingStatisticsData, @@ -196,6 +207,251 @@ def __init__( self.merged_half_datasets = merged_half_datasets +class MergedMTZCreator: + """Creates a gemmi.Mtz object for merged data.""" + + def __init__( + self, space_group: sgtbx.space_group, unit_cell: uctbx.unit_cell + ) -> None: + """ + Initializes the MergedMTZCreator with the provided space group and unit cell. + + Args: + space_group: The sgtbx.space_group object. + unit_cell: The uctbx.unit_cell object with cell parameters. + + Returns: + None + """ + + self.space_group = space_group + self.unit_cell = unit_cell + + mtz = gemmi.Mtz(with_base=True) + mtz.title = f"From {env.dispatcher_name}" + date_str = time.strftime("%Y-%m-%d at %H:%M:%S %Z") + if time.strftime("%Z") != "GMT": + date_str += time.strftime(" (%Y-%m-%d at %H:%M:%S %Z)", time.gmtime()) + mtz.history += [ + f"From {dials_version()}, run on {date_str}", + ] + hall = space_group.type().hall_symbol() + ops = gemmi.symops_from_hall(hall) + mtz.spacegroup = gemmi.find_spacegroup_by_ops(ops) + + if unit_cell: + mtz.set_cell_for_all(gemmi.UnitCell(*unit_cell.parameters())) + + self.type_to_sig_type = {"J": "Q", "F": "Q", "G": "L", "K": "M", "D": "Q"} + + self.mtz = mtz + + def add_data( + self, mtz_datasets: List[MTZDataClass], r_free_array: miller.array = None + ) -> None: + """ + Adds data to the MTZ object based on the provided datasets and optional R-free array. + + Args: + mtz_datasets (List[MTZDataClass]): The list of MTZDataClass objects containing the datasets. + r_free_array (miller.array, optional): The optional R-free array to add. + + Returns: + None + """ + + self._initialise_data_array(mtz_datasets) + + if r_free_array: + self._add_column(r_free_array, label="FreeR_flag", type_char="I") + + if len(mtz_datasets) > 1: + suffixes = [f"_WAVE{i+1}" for i in range(len(mtz_datasets))] + else: + suffixes = [""] + + for i, dataset in enumerate(mtz_datasets): + if dataset.dataset_name is None: + dataset.dataset_name = "FROMDIALS" + if dataset.crystal_name is None: + dataset.crystal_name = f"crystal_{i+2}" + if dataset.project_name is None: + dataset.project_name = "DIALS" + d = self.mtz.add_dataset(dataset.dataset_name) + d.crystal_name = dataset.crystal_name + d.project_name = dataset.project_name + d.wavelength = dataset.wavelength + + suffix = suffixes[i] + if dataset.merged_array: + self._add_column(dataset.merged_array, "IMEAN" + suffix, "J") + if dataset.multiplicities: + self._add_column(dataset.multiplicities, "N" + suffix, "I") + if dataset.amplitudes: + self._add_column(dataset.amplitudes, "F" + suffix, "F") + if dataset.merged_anomalous_array: + i_plus, i_minus = self._separate_anomalous( + dataset.merged_anomalous_array + ) + self._add_column(i_plus, "I" + suffix + "(+)", "K") + self._add_column(i_minus, "I" + suffix + "(-)", "K") + if dataset.anomalous_multiplicities: + n_plus, n_minus = self._separate_anomalous( + dataset.anomalous_multiplicities + ) + self._add_column(n_plus, "N" + suffix + "(+)", "I") + self._add_column(n_minus, "N" + suffix + "(-)", "I") + if dataset.anomalous_amplitudes: + f_plus, f_minus = self._separate_anomalous(dataset.anomalous_amplitudes) + self._add_column(f_plus, "F" + suffix + "(+)", "G") + self._add_column(f_minus, "F" + suffix + "(-)", "G") + if dataset.dano: + self._add_column(dataset.dano, "DANO" + suffix, "D") + if dataset.merged_half_datasets: + self._add_column( + dataset.merged_half_datasets.data1, "IHALF1" + suffix, "J" + ) + self._add_column( + dataset.merged_half_datasets.data2, "IHALF2" + suffix, "J" + ) + self._add_column( + dataset.merged_half_datasets.multiplicity1, "NHALF1" + suffix, "R" + ) + self._add_column( + dataset.merged_half_datasets.multiplicity2, "NHALF2" + suffix, "R" + ) + self.mtz.set_data(self.mtz_data) + + def _initialise_data_array(self, mtz_datasets): + """ + Sets the Miller indices for the data array based on the given MTZ datasets. + + Args: + mtz_datasets (List[MTZDataClass]): A list of MTZDataClass objects + + Returns: + None + + This function creates a merged miller set by combining the indices of all the datasets. + The indices are kept for use by the _add_column() function. A pandas DataFrame `mtz_data` + is created with the indices of the merged set as rows and the columns "H", "K", and "L". + Other columns will be added to the DataFrame by the _add_column() function. + """ + + miller_set = miller.set( + crystal_symmetry=mtz_datasets[0].merged_array.crystal_symmetry(), + indices=mtz_datasets[0].merged_array.indices().deep_copy(), + anomalous_flag=False, + ) + for dataset in mtz_datasets[1:]: + indices = dataset.merged_array.indices() + missing_isel = miller.match_indices(miller_set.indices(), indices).singles( + 1 + ) + miller_set.indices().extend(indices.select(missing_isel)) + + self.indices = miller_set.indices() + + self.mtz_data = pd.DataFrame( + flumpy.to_numpy(self.indices).astype("float32"), + columns=["H", "K", "L"], + ) + + def _add_column(self, column: miller.array, label: str, type_char: str): + """ + Add a data column (and associated sigmas if available) to the MTZ object. + + Args: + column (miller.array): The column to add to the MTZ object. + label (str): The label for the column. + type_char (str): The type character for the column. + + This function adds column data to the MTZ object. It first adds the column + to the most recently added dataset of the MTZ object. Then, it matches the + Miller indices of the column data to those in MTZ object to find the pairs + of matching indices. It selects the data from the column based on the pairs + of matching indices and inserts them into the `mtz_data` DataFrame. If the + column has sigmas the steps are repeated for them too. + + Returns: + None + """ + self.mtz.add_column(label, type_char) + + indices = column.indices() + + matches = miller.match_indices(self.indices, indices) + pairs = matches.pairs() + isel_i = pairs.column(0) + isel_j = pairs.column(1) + + data = flex.double(len(self.indices), float("nan")) + data.set_selected(isel_i, column.data().as_double().select(isel_j)) + + self.mtz_data.insert( + len(self.mtz_data.columns), + label, + flumpy.to_numpy(data).astype("float32"), + ) + + if column.sigmas() is None: + return + + type_char = self.type_to_sig_type[type_char] + self.mtz.add_column("SIG" + label, type_char) + + sigmas = flex.double(len(self.indices), float("nan")) + sigmas.set_selected(isel_i, column.sigmas().as_double().select(isel_j)) + + self.mtz_data.insert( + len(self.mtz_data.columns), + "SIG" + label, + flumpy.to_numpy(sigmas).astype("float32"), + ) + + def _separate_anomalous( + self, miller_array: miller.array + ) -> Tuple[miller.array, miller.array]: + """ + Separates the anomalous pairs from a given Miller array to produce + two arrays: one for the positive and one for the negative hemisphere + + Args: + miller_array (miller.array): The input Miller array. + + Returns: + Tuple[miller.array, miller.array]: A tuple containing the positive + and negative anomalous arrays. + """ + asu, matches = miller_array.match_bijvoet_mates() + + sel = matches.pairs_hemisphere_selection("+") + sel.extend(matches.singles_hemisphere_selection("+")) + + indices = asu.indices().select(sel) + data = asu.data().select(sel) + sigmas = None + if asu.sigmas() is not None: + sigmas = asu.sigmas().select(sel) + + miller_set = miller.set(miller_array.crystal_symmetry(), indices) + plus_array = miller.array(miller_set, data, sigmas) + + sel = matches.pairs_hemisphere_selection("-") + sel.extend(matches.singles_hemisphere_selection("-")) + + indices = -asu.indices().select(sel) + data = asu.data().select(sel) + sigmas = None + if asu.sigmas() is not None: + sigmas = asu.sigmas().select(sel) + + miller_set = miller.set(miller_array.crystal_symmetry(), indices) + minus_array = miller.array(miller_set, data, sigmas) + + return plus_array, minus_array + + def make_merged_mtz_file(mtz_datasets, r_free_array: miller.array = None): """ Make an mtz file object for the data, adding the date, time and program. @@ -207,9 +463,12 @@ def make_merged_mtz_file(mtz_datasets, r_free_array: miller.array = None): experiment. Returns: - An iotbx mtz file object. + A gemmi.Mtz object or an iotbx mtz object, if gemmi is not available. """ + if gemmi: + return make_merged_mtz_file_with_gemmi(mtz_datasets, r_free_array) + if len(mtz_datasets) > 1: writer = MADMergedMTZWriter else: @@ -248,6 +507,22 @@ def make_merged_mtz_file(mtz_datasets, r_free_array: miller.array = None): return mtz_writer.mtz_file +def make_merged_mtz_file_with_gemmi(mtz_datasets, r_free_array=None): + + # XXX This should replace the code in make_merged_mtz_file when + # MergedMTZWriter and MADMergedMTZWriter are removed + writer = MergedMTZCreator + + mtz_writer = writer( + mtz_datasets[0].merged_array.space_group(), + mtz_datasets[0].merged_array.unit_cell(), + ) + + mtz_writer.add_data(mtz_datasets, r_free_array) + + return mtz_writer.mtz + + def merge_scaled_array( experiments, scaled_array, diff --git a/src/dials/command_line/merge.py b/src/dials/command_line/merge.py index 11e15a7ff5..296cf59c74 100644 --- a/src/dials/command_line/merge.py +++ b/src/dials/command_line/merge.py @@ -7,7 +7,6 @@ import json import logging import sys -from io import StringIO from typing import List, Tuple from dxtbx.model import ExperimentList @@ -30,7 +29,7 @@ exclude_image_ranges_from_scans, get_selection_for_valid_image_ranges, ) -from dials.util.export_mtz import match_wavelengths +from dials.util.export_mtz import log_summary, match_wavelengths from dials.util.options import ArgumentParser, reflections_and_experiments_from_files from dials.util.version import dials_version @@ -359,10 +358,8 @@ def run(args=None): raise Sorry(e) logger.info("\nWriting reflections to %s", (params.output.mtz)) - out = StringIO() - mtz_file.show_summary(out=out) - logger.info(out.getvalue()) - mtz_file.write(params.output.mtz) + log_summary(mtz_file) + mtz_file.write_to_file(params.output.mtz) if params.output.json: with open(params.output.json, "w", encoding="utf-8") as f: diff --git a/src/dials/command_line/scale.py b/src/dials/command_line/scale.py index a3284a908a..b53cbef7d8 100644 --- a/src/dials/command_line/scale.py +++ b/src/dials/command_line/scale.py @@ -40,12 +40,12 @@ import logging import sys -from io import StringIO from libtbx import phil from dials.algorithms.scaling.algorithm import ScaleAndFilterAlgorithm, ScalingAlgorithm from dials.util import Sorry, log, show_mail_handle_errors +from dials.util.export_mtz import log_summary from dials.util.options import ArgumentParser, reflections_and_experiments_from_files from dials.util.version import dials_version @@ -135,10 +135,8 @@ def _export_merged_mtz(params, experiments, joint_table): mtz_file = merge_data_to_mtz(merge_params, experiments, [joint_table]) logger.disabled = False logger.info("\nWriting merged data to %s", (params.output.merged_mtz)) - out = StringIO() - mtz_file.show_summary(out=out) - logger.info(out.getvalue()) - mtz_file.write(params.output.merged_mtz) + log_summary(mtz_file) + mtz_file.write_to_file(params.output.merged_mtz) def _export_unmerged_mtz(params, experiments, reflection_table): diff --git a/src/dials/util/export_mtz.py b/src/dials/util/export_mtz.py index 28514ed4bd..f94f96f61c 100644 --- a/src/dials/util/export_mtz.py +++ b/src/dials/util/export_mtz.py @@ -2,21 +2,17 @@ import logging import time +import warnings from collections import Counter from copy import deepcopy from dataclasses import dataclass, field from math import isclose from typing import List, Optional +import gemmi import numpy as np import pandas as pd -try: - import gemmi -except ModuleNotFoundError as e: - gemmi = None - gemmi_import_error = e - from cctbx import uctbx from dxtbx import flumpy from iotbx import mtz @@ -53,6 +49,11 @@ class MTZWriterBase: def __init__(self, space_group, unit_cell=None): """If a unit cell is provided, will be used as default unless specified for each crystal.""" + warnings.warn( + "MTZWriterBase classes (MergedMTZWriter and MADMergedMTZWriter) are deprecated. Use MergedMTZCreator instead.\n", + DeprecationWarning, + stacklevel=2, + ) mtz_file = mtz.object() mtz_file.set_title(f"From {env.dispatcher_name}") date_str = time.strftime("%Y-%m-%d at %H:%M:%S %Z") @@ -315,8 +316,6 @@ def add_batch_list( if max_batch_number > batch_offset: batch_offset = max_batch_number - if gemmi is None: - raise gemmi_import_error batch = gemmi.Mtz.Batch() # Setting fields that are the same for all batches @@ -677,8 +676,6 @@ def export_mtz( ) # Create the mtz file - if gemmi is None: - raise gemmi_import_error mtz = gemmi.Mtz(with_base=True) mtz.title = f"From {env.dispatcher_name}" date_str = time.strftime("%Y-%m-%d at %H:%M:%S %Z") @@ -828,7 +825,7 @@ def log_summary(mtz): for col in mtz.columns: # col.min_value and col.max_value are not set, so we have to calculate them here logger.info( - f"{col.label:<12s} {col.type} {col.dataset_id:2d} {col.array.min():12.6g} {col.array.max():10.6g}" + f"{col.label:<12s} {col.type} {col.dataset_id:2d} {np.nanmin(col.array):12.6g} {np.nanmax(col.array):10.6g}" ) logger.info(f"History ({len(mtz.history)} lines):") for line in mtz.history: diff --git a/tests/algorithms/merging/test_merge.py b/tests/algorithms/merging/test_merge.py index 40739de3fa..f5de41d830 100644 --- a/tests/algorithms/merging/test_merge.py +++ b/tests/algorithms/merging/test_merge.py @@ -85,7 +85,7 @@ def test_r_free_flags_from_reference(tmp_path): params = phil_scope.extract() r_free_flags = generate_r_free_flags(params, mtz_datasets) mtz = make_merged_mtz_file(mtz_datasets, r_free_array=r_free_flags) - mtz.write(str(mtz_file)) + mtz.write_to_file(str(mtz_file)) # Now actually test r_free_flags_from_reference params.r_free_flags.reference = str(mtz_file) From 32ad28bd2248814f09856d0399856e6c2d9ca13f Mon Sep 17 00:00:00 2001 From: David Waterman Date: Wed, 7 Aug 2024 14:10:13 +0100 Subject: [PATCH 19/40] Fix matplotlib v3.9 colormaps (#2688) Fix for "matplotlib.cm has no attribute get_cmap" --- newsfragments/2688.bugfix | 1 + src/dials/algorithms/refinement/corrgram.py | 3 +-- src/dials/command_line/reference_profile_viewer.py | 4 ++-- src/dials/command_line/stereographic_projection.py | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) create mode 100644 newsfragments/2688.bugfix diff --git a/newsfragments/2688.bugfix b/newsfragments/2688.bugfix new file mode 100644 index 0000000000..d8eaa47e9b --- /dev/null +++ b/newsfragments/2688.bugfix @@ -0,0 +1 @@ +Avoid deprecated ``matplotlib.cm.get_cmap`` calls diff --git a/src/dials/algorithms/refinement/corrgram.py b/src/dials/algorithms/refinement/corrgram.py index 56a4116f05..350476d34f 100644 --- a/src/dials/algorithms/refinement/corrgram.py +++ b/src/dials/algorithms/refinement/corrgram.py @@ -63,7 +63,6 @@ def corrgram(corrmat, labels): import matplotlib matplotlib.use("Agg") - import matplotlib.cm as cm import matplotlib.pyplot as plt except ImportError as e: logger.info("matplotlib modules not available " + str(e), exc_info=True) @@ -71,7 +70,7 @@ def corrgram(corrmat, labels): plt.figure(1) ax = plt.subplot(1, 1, 1, aspect="equal") - clrmap = cm.get_cmap("bwr") + clrmap = matplotlib.colormaps["bwr"] for x in range(nr): for y in range(nr): diff --git a/src/dials/command_line/reference_profile_viewer.py b/src/dials/command_line/reference_profile_viewer.py index 5833989c30..f058c1f0af 100644 --- a/src/dials/command_line/reference_profile_viewer.py +++ b/src/dials/command_line/reference_profile_viewer.py @@ -154,9 +154,9 @@ def draw_figure(self): # the direction through the Ewald sphere vals2D = profile["data"].sum(axis=0) cmap = copy.copy( - matplotlib.cm.get_cmap( + matplotlib.colormaps[ self.cmap_choice.GetString(self.cmap_choice.GetSelection()) - ) + ] ) # If any X, Y position is masked down the summed Z stack then mask diff --git a/src/dials/command_line/stereographic_projection.py b/src/dials/command_line/stereographic_projection.py index 8a3c72f208..0457739cad 100644 --- a/src/dials/command_line/stereographic_projection.py +++ b/src/dials/command_line/stereographic_projection.py @@ -357,7 +357,7 @@ def plot_projections( epochs = flex.double(epochs) epochs -= flex.min(epochs) epochs /= flex.max(epochs) - cmap = matplotlib.cm.get_cmap(colour_map) + cmap = matplotlib.colormaps[colour_map] colours = [cmap(e) for e in epochs] elif colours is None or len(colours) == 0: colours = ["b"] * len(projections_all) From 5d8c9bfb29be9dfc89d57e35c4e6bcf926aa38b2 Mon Sep 17 00:00:00 2001 From: David McDonagh <60879630+toastisme@users.noreply.github.com> Date: Wed, 7 Aug 2024 22:59:06 +0100 Subject: [PATCH 20/40] Modified branching to Laue refinement methods (#2715) * Modified branching to Laue refinement methods to depend on ExperimentType. --- newsfragments/2715.bugfix | 1 + .../algorithms/refinement/reflection_manager.py | 2 +- src/dials/array_family/flex_ext.py | 17 +++++++++++------ 3 files changed, 13 insertions(+), 7 deletions(-) create mode 100644 newsfragments/2715.bugfix diff --git a/newsfragments/2715.bugfix b/newsfragments/2715.bugfix new file mode 100644 index 0000000000..deda002998 --- /dev/null +++ b/newsfragments/2715.bugfix @@ -0,0 +1 @@ +Modified branching to Laue refinement methods to check for ExperimentType first. diff --git a/src/dials/algorithms/refinement/reflection_manager.py b/src/dials/algorithms/refinement/reflection_manager.py index 2f3c01790d..a90ecd36e8 100644 --- a/src/dials/algorithms/refinement/reflection_manager.py +++ b/src/dials/algorithms/refinement/reflection_manager.py @@ -234,7 +234,7 @@ def from_parameters_reflections_experiments( flex.set_random_seed(params.random_seed) logger.debug("Random seed set to %d", params.random_seed) - if "wavelength" in reflections: + if experiments.all_laue() or experiments.all_tof(): return ReflectionManagerFactory.laue_manager( experiments, reflections, params ) diff --git a/src/dials/array_family/flex_ext.py b/src/dials/array_family/flex_ext.py index 72a02daecc..65d978922f 100644 --- a/src/dials/array_family/flex_ext.py +++ b/src/dials/array_family/flex_ext.py @@ -23,6 +23,7 @@ import cctbx.array_family.flex import cctbx.miller import libtbx.smart_open +from dxtbx.model import ExperimentType from scitbx import matrix import dials.extensions.glm_background_ext @@ -1331,12 +1332,16 @@ def map_centroids_to_reciprocal_space( cctbx.array_family.flex.vec2_double(x, y) ) - if calculated and "wavelength_cal" in self and "s0_cal" in self: - wavelength = self["wavelength_cal"].select(sel) - s0 = self["s0_cal"].select(sel) - elif "wavelength" in self and "s0" in self: - wavelength = self["wavelength"].select(sel) - s0 = self["s0"].select(sel) + if ( + expt.get_type() == ExperimentType.LAUE + or expt.get_type() == ExperimentType.TOF + ): + if calculated and "wavelength_cal" in self and "s0_cal" in self: + wavelength = self["wavelength_cal"].select(sel) + s0 = self["s0_cal"].select(sel) + elif "wavelength" in self and "s0" in self: + wavelength = self["wavelength"].select(sel) + s0 = self["s0"].select(sel) else: wavelength = expt.beam.get_wavelength() s0 = expt.beam.get_s0() From b9aae978b6d818a7dc17e7b89b66890c2e040a4e Mon Sep 17 00:00:00 2001 From: David Waterman Date: Thu, 8 Aug 2024 16:13:23 +0100 Subject: [PATCH 21/40] Fix `dials.reciprocal_lattice_viewer` middle mouse drag to translate (#2707) Adds pyopengl dependency as part of this --- .conda-envs/linux.txt | 1 + .conda-envs/macos.txt | 1 + .conda-envs/windows.txt | 1 + newsfragments/2707.bugfix | 1 + src/dials/util/wx_viewer.py | 21 ++++++--------------- 5 files changed, 10 insertions(+), 15 deletions(-) create mode 100644 newsfragments/2707.bugfix diff --git a/.conda-envs/linux.txt b/.conda-envs/linux.txt index c4b5eeabbc..23de941a8d 100644 --- a/.conda-envs/linux.txt +++ b/.conda-envs/linux.txt @@ -33,6 +33,7 @@ conda-forge::pint conda-forge::pip conda-forge::psutil conda-forge::pybind11 +conda-forge::pyopengl conda-forge::pyrtf conda-forge::pytest conda-forge::pytest-forked diff --git a/.conda-envs/macos.txt b/.conda-envs/macos.txt index 108cf8d281..20e9269339 100644 --- a/.conda-envs/macos.txt +++ b/.conda-envs/macos.txt @@ -33,6 +33,7 @@ conda-forge::pip conda-forge::psutil conda-forge::pthread-stubs conda-forge::pybind11 +conda-forge::pyopengl conda-forge::pyrtf conda-forge::pytest conda-forge::pytest-forked diff --git a/.conda-envs/windows.txt b/.conda-envs/windows.txt index 4b8d404841..2ec6d88e13 100644 --- a/.conda-envs/windows.txt +++ b/.conda-envs/windows.txt @@ -31,6 +31,7 @@ conda-forge::pint conda-forge::pip conda-forge::psutil conda-forge::pybind11 +conda-forge::pyopengl conda-forge::pyrtf conda-forge::pytest conda-forge::pytest-forked diff --git a/newsfragments/2707.bugfix b/newsfragments/2707.bugfix new file mode 100644 index 0000000000..03028e0ab6 --- /dev/null +++ b/newsfragments/2707.bugfix @@ -0,0 +1 @@ +``dials.reciprocal_lattice_viewer``: fix middle mouse drag to translate function. diff --git a/src/dials/util/wx_viewer.py b/src/dials/util/wx_viewer.py index fa282abdc3..b335f79fb6 100644 --- a/src/dials/util/wx_viewer.py +++ b/src/dials/util/wx_viewer.py @@ -600,28 +600,19 @@ def OnTranslate(self, event): model = gltbx.util.get_gl_modelview_matrix() proj = gltbx.util.get_gl_projection_matrix() view = gltbx.util.get_gl_viewport() - winx = [] - winy = [] - winz = [] rc = self.rotation_center rc_eye = gltbx.util.object_as_eye_coordinates(rc) - assert glu.gluProject(rc[0], rc[1], rc[2], model, proj, view, winx, winy, winz) - objx = [] - objy = [] - objz = [] + winx, winy, winz = glu.gluProject(rc[0], rc[1], rc[2], model, proj, view) win_height = max(1, self.w) - assert glu.gluUnProject( - winx[0], - winy[0] + 0.5 * win_height, - winz[0], + objx, objy, objz = glu.gluUnProject( + winx, + winy + 0.5 * win_height, + winz, model, proj, view, - objx, - objy, - objz, ) - dist = v3distsq((objx[0], objy[0], objz[0]), rc) ** 0.5 + dist = v3distsq((objx, objy, objz), rc) ** 0.5 scale = abs(dist / (0.5 * win_height)) x, y = event.GetX(), event.GetY() gltbx.util.translate_object(scale, x, y, self.xmouse, self.ymouse) From fd7985296fb2dc78852af21f46f3db5be5bd9046 Mon Sep 17 00:00:00 2001 From: Graeme Winter Date: Fri, 9 Aug 2024 14:59:42 +0100 Subject: [PATCH 22/40] Tidy warning around joint_indexing (#2714) Tidy warning around joint_indexing Fixes #2713 --- newsfragments/2714.misc | 2 ++ src/dials/command_line/index.py | 7 ++++--- 2 files changed, 6 insertions(+), 3 deletions(-) create mode 100644 newsfragments/2714.misc diff --git a/newsfragments/2714.misc b/newsfragments/2714.misc new file mode 100644 index 0000000000..eb586aaa39 --- /dev/null +++ b/newsfragments/2714.misc @@ -0,0 +1,2 @@ +Tidy output of ``dials.index`` around the defaults for ``joint_index`` + diff --git a/src/dials/command_line/index.py b/src/dials/command_line/index.py index 02179f8a35..08d11cb7f5 100644 --- a/src/dials/command_line/index.py +++ b/src/dials/command_line/index.py @@ -171,13 +171,14 @@ def index(experiments, reflections, params): if params.indexing.joint_indexing is Auto: if all(e.is_still() for e in experiments): params.indexing.joint_indexing = False - logger.info("joint_indexing=False has been set for stills experiments") + logger.info("Disabling joint_indexing for still data") elif all(not e.is_still() for e in experiments): params.indexing.joint_indexing = True - logger.info("joint_indexing=True has been set for scans experiments") + if len(experiments) > 1: + logger.info("Enabling joint_indexing for rotation data") else: raise ValueError( - "Unable to set joint_indexing automatically for a mixture of stills and scans experiments" + "Unable to set joint_indexing automatically for a mixture of still and rotation data" ) if len(experiments) == 1 or params.indexing.joint_indexing: From a2f8f714a3a2d7a257119ce00baacb5bd51cf4ed Mon Sep 17 00:00:00 2001 From: Daniel Paley Date: Fri, 9 Aug 2024 14:41:26 -0400 Subject: [PATCH 23/40] Reflection table performance improvement (#2718) Eliminate a double loop in selections from reflection tables. Speedup of 12 min per call for a table containing 165k experiment identifiers. Co-authored-by: David Mittan-Moreau Co-authored-by: Aaron Brewster --- newsfragments/2718.bugfix | 3 +++ .../boost_python/reflection_table_suite.h | 11 ++++------- 2 files changed, 7 insertions(+), 7 deletions(-) create mode 100644 newsfragments/2718.bugfix diff --git a/newsfragments/2718.bugfix b/newsfragments/2718.bugfix new file mode 100644 index 0000000000..06ae9674d6 --- /dev/null +++ b/newsfragments/2718.bugfix @@ -0,0 +1,3 @@ +Performance improvement for selections from large reflection tables. For a +table containing 165k experiment identifiers the speedup is 1000x (12 minutes +per call). diff --git a/src/dials/array_family/boost_python/reflection_table_suite.h b/src/dials/array_family/boost_python/reflection_table_suite.h index 5af6bcbb9d..c5b4a44930 100644 --- a/src/dials/array_family/boost_python/reflection_table_suite.h +++ b/src/dials/array_family/boost_python/reflection_table_suite.h @@ -32,12 +32,9 @@ namespace dials { namespace af { namespace boost_python { // Copy across identifiers for ids in new table typedef typename T::experiment_map_type::const_iterator const_iterator; for (std::set::iterator i = new_ids.begin(); i != new_ids.end(); ++i) { - for (const_iterator it = self.experiment_identifiers()->begin(); - it != self.experiment_identifiers()->end(); - ++it) { - if (it->first == *i) { - (*new_table.experiment_identifiers())[it->first] = it->second; - } + const_iterator found = self.experiment_identifiers()->find(*i); + if (found != self.experiment_identifiers()->end()) { + (*new_table.experiment_identifiers())[found->first] = found->second; } } } @@ -262,4 +259,4 @@ namespace dials { namespace af { namespace boost_python { }}}} // namespace dials::af::boost_python::reflection_table_suite -#endif // DIALS_ARRAY_FAMILY_BOOST_PYTHON_REFLECTION_TABLE_SUITE_H \ No newline at end of file +#endif // DIALS_ARRAY_FAMILY_BOOST_PYTHON_REFLECTION_TABLE_SUITE_H From cc9babe37f0fe286b4fce3681328c9bd88d6f4cb Mon Sep 17 00:00:00 2001 From: Irakli Sikharulidze Date: Mon, 19 Aug 2024 09:10:15 +0000 Subject: [PATCH 24/40] Build dials image from rockylinux:9 (#2689) CentOS 7 is EOL and the repositories have been disabled. Since we're refreshing the base image, let's switch to using CMake as the release basis. Fixes #2690. Co-authored-by: Nicholas Devenish --- Dockerfile | 29 +++++++++++++++++++---------- docker-entrypoint.sh | 6 ------ newsfragments/2689.bugfix | 1 + 3 files changed, 20 insertions(+), 16 deletions(-) delete mode 100755 docker-entrypoint.sh create mode 100644 newsfragments/2689.bugfix diff --git a/Dockerfile b/Dockerfile index 392429c530..63efca8a40 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,16 +1,25 @@ -# Build dials -FROM centos:7 as builder +FROM rockylinux:9 as builder -RUN yum install -y git +RUN dnf install -y 'dnf-command(config-manager)' && \ + dnf config-manager --enable crb && \ + dnf install -y git python3 mesa-libGL-devel ninja-build WORKDIR /dials COPY installer/bootstrap.py . -RUN python bootstrap.py +ENV PIP_ROOT_USER_ACTION=ignore +ENV CMAKE_GENERATOR=Ninja +RUN python3 bootstrap.py --cmake +RUN /dials/conda_base/bin/cmake --install build +RUN /dials/conda_base/bin/python3 -mpip install modules/dxtbx modules/dials modules/xia2 # Copy to final image -FROM centos:7 -COPY ./docker-entrypoint.sh . -COPY --from=builder /dials /dials -RUN chmod 0755 /docker-entrypoint.sh +FROM rockylinux:9 +RUN dnf install -y glibc-locale-source +RUN localedef -i en_US -f UTF-8 en_US.UTF-8 +RUN echo "LANG=\"en_US.UTF-8\"" > /etc/locale.conf +ENV LANG en_US.UTF-8 -ENTRYPOINT ["/docker-entrypoint.sh"] -CMD ["dials.version"] \ No newline at end of file +RUN mkdir /dials +COPY --from=builder /dials/conda_base /dials/conda_base +COPY --from=builder /dials/dials /dials +ENV PATH="/dials/conda_base/bin:$PATH" +CMD ["dials.version"] diff --git a/docker-entrypoint.sh b/docker-entrypoint.sh deleted file mode 100755 index da2c80dde5..0000000000 --- a/docker-entrypoint.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash - -source /dials/dials - -# Start service -exec "$@" \ No newline at end of file diff --git a/newsfragments/2689.bugfix b/newsfragments/2689.bugfix new file mode 100644 index 0000000000..dc0f17fd05 --- /dev/null +++ b/newsfragments/2689.bugfix @@ -0,0 +1 @@ +Change Docker base image to rockylinux:8 as centos:7 is EOL \ No newline at end of file From 2fc58132e222c48484d806839935e39657f9574c Mon Sep 17 00:00:00 2001 From: Nicholas Devenish Date: Mon, 19 Aug 2024 10:47:47 +0100 Subject: [PATCH 25/40] Switch linting/formatting to ruff (#2675) ... and generally update pre-commits. ruff is much faster, allows us to drop setup.cfg, gives us many more options for linting, and enables pre-commit to autofix many of the standard issues (only safe fixes are applied by default, however). We didn't upgrade black versions for some time, so running those are included in this commit, as-is updating clang-format, --- .azure-pipelines/azure-pipelines.yml | 6 +- ...lake8-validation.py => lint-validation.py} | 24 +++---- .clang-format | 1 + .pre-commit-config.yaml | 43 +++---------- CONTRIBUTING.md | 8 +-- build.py | 1 + doc/sphinx/button.py | 1 + doc/sphinx/conf.py | 4 +- installer/bootstrap.py | 11 +--- installer/dials_installer.py | 4 +- libtbx_refresh.py | 10 +-- newsfragments/2675.misc | 1 + pyproject.toml | 64 ++++++++++++++++--- setup.cfg | 16 ----- src/dials/__init__.py | 4 +- .../algorithms/background/gmodel/model.h | 2 +- .../algorithms/background/gmodel/modeller.py | 2 - src/dials/algorithms/background/modeller.py | 1 - src/dials/algorithms/centroid/__init__.py | 2 - .../centroid/generate_bias_lookup_table.py | 2 - .../basis_vector_search/combinations.py | 1 - .../indexing/basis_vector_search/fft3d.py | 1 - .../indexing/basis_vector_search/strategy.py | 1 - src/dials/algorithms/indexing/indexer.py | 10 +-- .../indexing/lattice_search/__init__.py | 5 -- .../lattice_search/low_res_spot_match.py | 4 -- .../indexing/lattice_search/strategy.py | 1 - src/dials/algorithms/indexing/max_cell.py | 1 - .../algorithms/indexing/model_evaluation.py | 2 +- .../indexing/non_primitive_basis.py | 1 - src/dials/algorithms/indexing/ssx/analysis.py | 58 +++++++++-------- .../algorithms/indexing/ssx/processing.py | 8 +-- .../algorithms/indexing/stills_indexer.py | 23 ++++--- src/dials/algorithms/indexing/symmetry.py | 8 +-- .../integration/boost_python/kapton_ext.cc | 2 +- .../integration/image_integrator.py | 11 ++-- .../algorithms/integration/integrator.py | 12 ++-- .../integration/kapton_2019_correction.py | 18 ++++-- .../integration/kapton_correction.py | 18 ++++-- .../integration/parallel_integrator.h | 3 +- .../integration/parallel_integrator.py | 6 +- src/dials/algorithms/integration/processor.py | 11 ++-- src/dials/algorithms/integration/report.py | 2 - .../integration/ssx/ellipsoid_integrate.py | 1 - .../integration/ssx/ssx_integrate.py | 4 -- .../integration/ssx/stills_integrate.py | 2 - .../integration/sublattice_helper.py | 1 - src/dials/algorithms/merging/french_wilson.py | 2 - src/dials/algorithms/merging/merge.py | 7 +- src/dials/algorithms/merging/reporting.py | 32 +++++----- .../profile_model/ellipsoid/algorithm.py | 1 - .../profile_model/ellipsoid/model.py | 5 -- .../ellipsoid/parameterisation.py | 12 ++-- .../profile_model/ellipsoid/refiner.py | 3 - .../profile_model/gaussian_rs/algorithm.py | 1 - .../profile_model/gaussian_rs/calculator.py | 4 +- src/dials/algorithms/refinement/__init__.py | 2 +- .../refinement/analysis/centroid_analysis.py | 2 - .../algorithms/refinement/constraints.py | 8 --- src/dials/algorithms/refinement/corrgram.py | 2 - src/dials/algorithms/refinement/engine.py | 11 ---- .../refinement/outlier_detection/mcd.py | 2 - .../outlier_detection/outlier_base.py | 5 -- .../outlier_detection/sauter_poon.py | 2 - .../refinement/outlier_detection/tukey.py | 2 - .../refinement/parameterisation/autoreduce.py | 6 +- .../parameterisation/beam_parameters.py | 6 -- .../refinement/parameterisation/configure.py | 1 - .../parameterisation/crystal_parameters.py | 11 +--- .../parameterisation/detector_parameters.py | 20 ++---- .../parameterisation/goniometer_parameters.py | 3 - .../parameterisation/model_parameters.py | 2 - .../parameterisation/parameter_report.py | 4 -- .../parameterisation/prediction_parameters.py | 14 ---- .../prediction_parameters_stills.py | 6 -- .../scan_varying_beam_parameters.py | 7 +- .../scan_varying_crystal_parameters.py | 7 +- .../scan_varying_detector_parameters.py | 13 ++-- .../scan_varying_goniometer_parameters.py | 5 +- .../scan_varying_model_parameters.py | 3 - .../scan_varying_prediction_parameters.py | 18 ------ .../prediction/managed_predictors.py | 11 ---- .../refinement/refinement_helpers.py | 2 - src/dials/algorithms/refinement/refiner.py | 25 +++----- .../refinement/reflection_manager.py | 17 ++--- .../restraints/restraints_helpers.h | 2 +- .../restraints/restraints_parameterisation.py | 2 - src/dials/algorithms/refinement/rtmats.h | 2 +- .../algorithms/refinement/sparse_engine.py | 8 +-- src/dials/algorithms/refinement/target.py | 15 +---- .../algorithms/refinement/target_stills.py | 3 - .../refinement/two_theta_refiner.py | 11 ---- .../refinement/weighting_strategies.py | 5 -- src/dials/algorithms/rs_mapper/__init__.py | 2 +- src/dials/algorithms/scaling/Ih_table.py | 25 +++----- .../scaling/active_parameter_managers.py | 5 +- src/dials/algorithms/scaling/algorithm.py | 11 ++-- .../cross_validation/cross_validate.py | 1 - .../algorithms/scaling/error_model/engine.py | 3 +- .../scaling/error_model/error_model.py | 12 +--- .../scaling/error_model/error_model_target.py | 4 -- .../model/components/analytical_component.py | 1 - .../model/components/scale_components.py | 8 +-- .../components/smooth_scale_components.py | 14 ++-- src/dials/algorithms/scaling/model/model.py | 5 +- src/dials/algorithms/scaling/observers.py | 6 +- .../algorithms/scaling/outlier_rejection.py | 8 +-- .../algorithms/scaling/parameter_handler.py | 1 - src/dials/algorithms/scaling/plots.py | 1 - .../scaling/reflection_selection.py | 3 +- src/dials/algorithms/scaling/scaling_helper.h | 2 +- .../algorithms/scaling/scaling_library.py | 9 +-- .../algorithms/scaling/scaling_refiner.py | 5 +- .../algorithms/scaling/scaling_utilities.py | 1 - src/dials/algorithms/shoebox/__init__.py | 4 +- .../simulation/generate_test_reflections.py | 1 - src/dials/algorithms/spot_finding/factory.py | 3 +- src/dials/algorithms/spot_finding/finder.py | 6 -- .../algorithms/spot_prediction/__init__.py | 1 - .../spot_prediction/reflection_predictor.py | 4 +- .../spot_prediction/rotation_angles.h | 2 +- .../statistics/cc_half_algorithm.py | 6 +- .../algorithms/statistics/delta_cchalf.py | 1 - src/dials/algorithms/statistics/fast_mcd.py | 7 -- src/dials/algorithms/symmetry/__init__.py | 5 +- .../algorithms/symmetry/absences/plots.py | 10 +-- .../symmetry/absences/run_absences_checks.py | 1 + .../algorithms/symmetry/cosym/__init__.py | 1 - src/dials/algorithms/symmetry/cosym/engine.py | 1 - src/dials/algorithms/symmetry/cosym/plots.py | 1 - src/dials/algorithms/symmetry/cosym/target.py | 8 +-- src/dials/algorithms/symmetry/laue_group.py | 1 - src/dials/algorithms/symmetry/origin.py | 2 - .../array_family/boost_python/flex_int6.cc | 2 +- src/dials/array_family/flex_ext.py | 2 - src/dials/command_line/align_crystal.py | 4 +- src/dials/command_line/anvil_correction.py | 1 - src/dials/command_line/cluster_unit_cell.py | 1 - src/dials/command_line/combine_experiments.py | 3 +- src/dials/command_line/cosym.py | 4 +- src/dials/command_line/damage_analysis.py | 17 +++-- src/dials/command_line/dials_import.py | 5 -- src/dials/command_line/find_spots.py | 2 - src/dials/command_line/frame_orientations.py | 1 - src/dials/command_line/generate_mask.py | 1 - src/dials/command_line/geometry_viewer.py | 2 - src/dials/command_line/import_xds.py | 1 - src/dials/command_line/integrate.py | 3 - src/dials/command_line/merge_cbf.py | 10 +-- src/dials/command_line/model_background.py | 1 - src/dials/command_line/modify_geometry.py | 2 - src/dials/command_line/plot_Fo_vs_Fc.py | 5 +- src/dials/command_line/plot_reflections.py | 1 - .../command_line/plot_scan_varying_model.py | 3 - src/dials/command_line/refine.py | 2 - .../command_line/refine_bravais_settings.py | 1 - src/dials/command_line/reflection_viewer.py | 1 - src/dials/command_line/reindex.py | 2 - src/dials/command_line/report.py | 12 +--- .../command_line/search_beam_position.py | 7 +- src/dials/command_line/sequence_to_stills.py | 2 - src/dials/command_line/show.py | 6 -- src/dials/command_line/show_extensions.py | 2 - src/dials/command_line/simple_integrate.py | 2 - src/dials/command_line/slice_sequence.py | 5 +- src/dials/command_line/split_experiments.py | 1 - src/dials/command_line/ssx_integrate.py | 9 +-- src/dials/command_line/stills_process.py | 22 +------ .../dispersion_spotfinder_threshold_ext.py | 1 - src/dials/model/data/__init__.py | 2 +- src/dials/nexus/__init__.py | 2 +- src/dials/pychef/__init__.py | 2 - src/dials/pychef/damage_series.py | 2 - src/dials/report/analysis.py | 4 +- src/dials/report/html_report.py | 20 ++---- src/dials/report/plots.py | 57 +++++++++-------- src/dials/util/ascii_art.py | 6 +- src/dials/util/batch_handling.py | 8 ++- .../util/boost_python/streambuf_test_ext.cpp | 2 +- src/dials/util/cluster_map.py | 3 - src/dials/util/combine_experiments.py | 1 - src/dials/util/command_line.py | 4 -- src/dials/util/export_mtz.py | 3 - src/dials/util/export_pets.py | 2 - src/dials/util/export_sadabs.py | 2 - src/dials/util/export_shelx.py | 2 +- src/dials/util/filter_reflections.py | 1 - src/dials/util/image_grouping.py | 28 ++++---- src/dials/util/image_viewer/mask_frame.py | 4 -- src/dials/util/image_viewer/rstbx_frame.py | 4 +- .../slip_viewer/calibration_frame.py | 2 +- .../image_viewer/slip_viewer/flex_image.py | 1 - .../util/image_viewer/slip_viewer/frame.py | 2 +- .../util/image_viewer/slip_viewer/pyslip.py | 57 ++++++++--------- .../image_viewer/slip_viewer/ring_frame.py | 3 +- .../image_viewer/slip_viewer/score_frame.py | 2 +- .../slip_viewer/tile_generation.py | 1 - .../util/image_viewer/slip_viewer/uc_frame.py | 2 +- .../util/image_viewer/spotfinder_frame.py | 13 +--- src/dials/util/image_viewer/viewer_tools.py | 1 - src/dials/util/installer.py | 1 - src/dials/util/log.py | 1 + src/dials/util/masking.h | 2 +- src/dials/util/masking.py | 2 - src/dials/util/multi_dataset_handling.py | 5 +- src/dials/util/napari_rlv/viewer.py | 2 - src/dials/util/nexus/nx_mx.py | 9 +-- src/dials/util/pprint.py | 1 - src/dials/util/pycbf_extra.py | 1 - src/dials/util/reindex.py | 1 - src/dials/util/resolution_analysis.py | 15 +++-- src/dials/util/show_version.py | 1 - src/dials/util/slice.py | 1 - src/dials/viewer/bitmap_from_array.py | 2 - .../viewer/flex_3d_array_viewer_tests.py | 1 - src/dials/viewer/from_flex_to_wxbitmap.py | 3 - src/dials/viewer/mask_bmp_2D.h | 2 +- src/dials/viewer/slice_viewer.py | 3 - src/dials/viewer/viewer_low_level_util.py | 5 -- .../test_connected_components.py | 2 - .../algorithms/image/threshold/test_local.py | 2 - .../basis_vector_search/test_combinations.py | 1 - .../indexing/test_assign_indices.py | 1 - .../indexing/test_non_primitive_basis.py | 1 - tests/algorithms/indexing/test_symmetry.py | 3 - .../profile/test_profile_fitting.py | 5 -- .../integration/test_filter_overlaps.py | 11 ++-- .../integration/test_parallel_integrator.py | 3 - .../algorithms/polygon/clip/test_clipping.py | 11 ---- .../ellipsoid/test_derivatives.py | 8 --- .../profile_model/ellipsoid/test_model.py | 7 -- .../ellipsoid/test_parameterisation.py | 9 --- .../profile_model/ellipsoid/test_refiner.py | 16 +---- tests/algorithms/refinement/setup_geometry.py | 17 ----- .../algorithms/refinement/setup_minimiser.py | 4 -- tests/algorithms/refinement/sim_images.py | 2 - .../test_angle_derivatives_wrt_vector_elts.py | 1 - .../refinement/test_beam_parameters.py | 1 - .../refinement/test_centroid_outlier.py | 1 - .../refinement/test_crystal_parameters.py | 1 - .../refinement/test_detector_parameters.py | 4 -- .../refinement/test_finite_diffs.py | 2 +- .../test_multi_experiment_refinement.py | 1 - ...t_multi_panel_detector_parameterisation.py | 4 -- .../refinement/test_orientation_refinement.py | 2 - .../test_parameter_auto_reduction.py | 4 -- .../refinement/test_prediction_parameters.py | 1 - .../test_ref_passage_categorisation.py | 1 - .../refinement/test_refine_multi_wedges.py | 1 - .../refinement/test_refinement_regression.py | 1 - .../refinement/test_refiner_config.py | 3 - .../refinement/test_refiner_units.py | 1 - .../refinement/test_reflection_manager.py | 1 - .../refinement/test_restraints_gradients.py | 2 - .../test_restraints_parameterisation.py | 1 - .../refinement/test_rotation_decomposition.py | 4 -- .../test_scan_varying_block_calculation.py | 4 -- .../test_scan_varying_model_parameters.py | 1 - ...test_scan_varying_prediction_parameters.py | 13 +--- .../test_stills_prediction_parameters.py | 3 - .../refinement/test_stills_refinement.py | 1 - .../test_stills_spherical_relp_derivatives.py | 11 ---- .../refinement/test_two_theta_refinement.py | 3 - .../test_coordinate_system.py | 5 +- .../reflection_basis/test_map_frames.py | 3 - .../reflection_basis/test_transform.py | 3 - .../scaling/test_outlier_rejection.py | 2 - tests/algorithms/scaling/test_plots.py | 1 - .../scaling/test_scale_and_filter.py | 1 + .../scaling/test_scaling_restraints.py | 5 +- .../scaling/test_target_function.py | 4 +- .../spot_prediction/test_ray_predictor.py | 1 - .../test_reeke_index_generator.py | 3 - .../spot_prediction/test_rotation_angles.py | 2 - .../test_scan_varying_predictor.py | 3 +- .../spot_prediction/test_spot_prediction.py | 2 - .../statistics/test_binned_statistics.py | 2 - tests/algorithms/statistics/test_fast_mcd.py | 2 - .../array_family/test_identifiers_handling.py | 1 + tests/command_line/test_anvil_correction.py | 1 - .../command_line/test_combine_experiments.py | 2 - tests/command_line/test_export_bitmaps.py | 1 - tests/command_line/test_export_mosflm.py | 8 +-- tests/command_line/test_export_xds.py | 4 +- tests/command_line/test_import.py | 4 +- tests/command_line/test_merge.py | 1 - tests/command_line/test_powder_calibrate.py | 7 +- tests/command_line/test_refine.py | 1 - tests/command_line/test_report.py | 1 + tests/command_line/test_ssx_integrate.py | 3 +- tests/command_line/test_symmetry.py | 5 +- tests/command_line/test_two_theta_refine.py | 1 - tests/model/data/test_pixel_list.py | 2 - tests/report/test_plots.py | 1 - tests/util/__init__.py | 1 + tests/util/test_export_mtz.py | 1 - tests/util/test_image_grouping.py | 2 - tests/util/test_log.py | 2 - tests/util/test_render_3d.py | 4 +- 299 files changed, 497 insertions(+), 1164 deletions(-) rename .azure-pipelines/{flake8-validation.py => lint-validation.py} (60%) create mode 100644 newsfragments/2675.misc delete mode 100644 setup.cfg diff --git a/.azure-pipelines/azure-pipelines.yml b/.azure-pipelines/azure-pipelines.yml index c205a96375..89b16441cf 100644 --- a/.azure-pipelines/azure-pipelines.yml +++ b/.azure-pipelines/azure-pipelines.yml @@ -37,10 +37,10 @@ stages: - bash: | set -eux - pip install flake8 + pip install ruff cd repository - python .azure-pipelines/flake8-validation.py - displayName: Flake8 validation + python .azure-pipelines/lint-validation.py + displayName: Ruff validation # Set up constants for further build steps - bash: | diff --git a/.azure-pipelines/flake8-validation.py b/.azure-pipelines/lint-validation.py similarity index 60% rename from .azure-pipelines/flake8-validation.py rename to .azure-pipelines/lint-validation.py index 6381853eb1..50a18bad80 100644 --- a/.azure-pipelines/flake8-validation.py +++ b/.azure-pipelines/lint-validation.py @@ -3,18 +3,14 @@ import os import subprocess -# Flake8 validation -known_bad = { - "src/dials/algorithms/rs_mapper/__init__.py": {"F401", "F403"}, - "src/dials/algorithms/shoebox/__init__.py": {"F401", "F403"}, - "src/dials/nexus/__init__.py": {"F401", "F403"}, - "src/dials/test/command_line/test_generate_distortion_maps.py": {"F841"}, -} +# Ruff validation +known_bad = {} failures = 0 try: - flake8 = subprocess.run( + process = subprocess.run( [ - "flake8", + "ruff", + "check", "--exit-zero", ], capture_output=True, @@ -24,14 +20,14 @@ ) except (subprocess.CalledProcessError, subprocess.TimeoutExpired) as e: print( - "##vso[task.logissue type=error;]flake8 validation failed with", + "##vso[task.logissue type=error;]Ruff validation failed with", str(e.__class__.__name__), ) print(e.stdout) print(e.stderr) - print("##vso[task.complete result=Failed;]flake8 validation failed") + print("##vso[task.complete result=Failed;]Ruff validation failed") exit() -for line in flake8.stdout.split("\n"): +for line in process.stdout.split("\n"): if ":" not in line: continue filename, lineno, column, error = line.split(":", maxsplit=3) @@ -47,5 +43,5 @@ ) if failures: - print(f"##vso[task.logissue type=warning]Found {failures} flake8 violation(s)") - print(f"##vso[task.complete result=Failed;]Found {failures} flake8 violation(s)") + print(f"##vso[task.logissue type=warning]Found {failures} Ruff violation(s)") + print(f"##vso[task.complete result=Failed;]Found {failures} Ruff violation(s)") diff --git a/.clang-format b/.clang-format index 0b02d50f7c..4b79758e1e 100644 --- a/.clang-format +++ b/.clang-format @@ -26,4 +26,5 @@ IndentCaseLabels: false NamespaceIndentation: Inner # Don't sort includes SortIncludes: false +AllowShortLoopsOnASingleLine: false --- diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a4e0a07838..a8a2bd7eab 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,7 +1,7 @@ repos: # Syntax validation and some basic sanity checks - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.1.0 + rev: v4.6.0 hooks: - id: check-merge-conflict - id: check-ast @@ -14,46 +14,21 @@ repos: - id: no-commit-to-branch name: "Don't commit to 'main' directly" -# Automatically sort imports -- repo: https://github.com/PyCQA/isort - rev: 5.12.0 +- repo: https://github.com/charliermarsh/ruff-pre-commit + rev: v0.6.1 hooks: - - id: isort - args: [ - '-a', 'from __future__ import annotations', # 3.7-3.11 - '--rm', 'from __future__ import absolute_import', # -3.0 - '--rm', 'from __future__ import division', # -3.0 - '--rm', 'from __future__ import generator_stop', # -3.7 - '--rm', 'from __future__ import generators', # -2.3 - '--rm', 'from __future__ import nested_scopes', # -2.2 - '--rm', 'from __future__ import print_function', # -3.0 - '--rm', 'from __future__ import unicode_literals', # -3.0 - '--rm', 'from __future__ import with_statement', # -2.6 - ] - exclude: ^installer/ - -# Automatic source code formatting -- repo: https://github.com/psf/black - rev: 22.3.0 - hooks: - - id: black - args: [--safe, --quiet] - files: \.pyi?$|SConscript$|^libtbx_config$ - types: [file] + - id: ruff + args: [--fix, --exit-non-zero-on-fix, --show-fixes] + - id: ruff-format + files: \.pyi?$|SConscript$|^libtbx_config$ + types: [file] - repo: https://github.com/pre-commit/mirrors-clang-format - rev: v13.0.1 + rev: v18.1.8 hooks: - id: clang-format files: \.c(c|pp|xx)?$|\.h(pp)?$ -# Linting -- repo: https://github.com/PyCQA/flake8 - rev: 4.0.1 - hooks: - - id: flake8 - additional_dependencies: ['flake8-comprehensions==3.8.0'] - # Give a specific warning for added image files - repo: local hooks: diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 66c52af4e8..6099c9f0b5 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -101,7 +101,7 @@ message that the code in question is special and care should be taken. install` - if in the libtbx ecosystem - or manually install the hooks with `pre-commit install`. These use the [pre-commit] tool and ensure that various sanity checks are run before commit, including import order, - formatting, syntax compatibility, basic flake8 checks, lack of conflict + formatting, syntax compatibility, basic Ruff checks, lack of conflict markers and file size limits. Basically, most of the essential rules will be checked automatically by this. - **We format python code with [black]**. This means that while writing code @@ -110,11 +110,11 @@ message that the code in question is special and care should be taken. black (the pre-commit hook will help do this for you), but if for some reason you miss this, the whole codebase is auto-cleaned once a week. Most IDEs and editors have support for running formatters like black automatically. -- **Avoid introducing new flake8 warnings** - if you feel that it's appropriate +- **Avoid introducing new Ruff warnings** - if you feel that it's appropriate to ignore a warning, mark it up explicitly with a [noqa] comment. The most important subset of checks are run as part of the pre-commit checks, but please try to resolve any other valid warnings shown with a normal run of - flake8. The configuration in the repository turns off any warnings that + Ruff. The configuration in the repository turns off any warnings that disagree with our standard practice. - **We format C++ code with [clang-format]**. We use a configuration for style broadly compatible with what our existing prevailing style was. We don't @@ -127,7 +127,7 @@ message that the code in question is special and care should be taken. [black]: https://github.com/psf/black [isort]: https://github.com/PyCQA/isort [clang-format]: https://clang.llvm.org/docs/ClangFormat.html -[noqa]: http://flake8.pycqa.org/en/3.7.7/user/violations.html#in-line-ignoring-errors +[noqa]: http://Ruff.pycqa.org/en/3.7.7/user/violations.html#in-line-ignoring-errors [PEP8]: https://www.python.org/dev/peps/pep-0008 [Google-style]: https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html [Zen of Python]: https://www.python.org/dev/peps/pep-0020/#the-zen-of-python diff --git a/build.py b/build.py index 496571f86f..da3b4ac16c 100644 --- a/build.py +++ b/build.py @@ -5,6 +5,7 @@ out of a setup.py, but mainly because at the moment it's how poetry offloads the unresolved build phases. """ + from __future__ import annotations import ast diff --git a/doc/sphinx/button.py b/doc/sphinx/button.py index 7d5fce3910..4231e07f88 100644 --- a/doc/sphinx/button.py +++ b/doc/sphinx/button.py @@ -9,6 +9,7 @@ '{{ text }}' ) + # placeholder node for document graph class button_node(nodes.General, nodes.Element): pass diff --git a/doc/sphinx/conf.py b/doc/sphinx/conf.py index e2fac55052..8c1f65afc2 100644 --- a/doc/sphinx/conf.py +++ b/doc/sphinx/conf.py @@ -17,6 +17,8 @@ import os import sys +import alabaster + # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. @@ -121,8 +123,6 @@ # -- Options for HTML output ---------------------------------------------- -import alabaster - html_theme_path = [alabaster.get_path()] extensions.append("alabaster") html_theme = "alabaster" diff --git a/installer/bootstrap.py b/installer/bootstrap.py index ac1aa0a992..3b30935dda 100755 --- a/installer/bootstrap.py +++ b/installer/bootstrap.py @@ -172,9 +172,7 @@ def install_micromamba(python, include_cctbx, cmake): There was a failure in constructing the conda environment. Attempt {retry} of 5 will start {retry} minute(s) from {t}. ******************************************************************************* -""".format( - retry=retry, t=time.asctime() - ) +""".format(retry=retry, t=time.asctime()) ) time.sleep(retry * 60) else: @@ -254,7 +252,7 @@ def get_environments(): paths = f.readlines() except IOError: paths = [] - environments = set( # noqa; C401, Python 2.7 compatibility + environments = set( # noqa: C401 # Python 2.7 compatibility os.path.normpath(env.strip()) for env in paths if os.path.isdir(env.strip()) ) env_dirs = ( @@ -377,9 +375,7 @@ def get_environments(): There was a failure in constructing the conda environment. Attempt {retry} of 5 will start {retry} minute(s) from {t}. ******************************************************************************* -""".format( - retry=retry, t=time.asctime() - ) +""".format(retry=retry, t=time.asctime()) ) time.sleep(retry * 60) else: @@ -1251,7 +1247,6 @@ def configure_build_cmake(): conda activate {dist_root}/conda_base """.format( dist_root=os.getcwd(), - build_lib=os.path.join(os.getcwd(), "build", "lib"), ) ) diff --git a/installer/dials_installer.py b/installer/dials_installer.py index ec02294cc9..fa8c20651d 100644 --- a/installer/dials_installer.py +++ b/installer/dials_installer.py @@ -13,8 +13,8 @@ libtbx_path = os.path.join(installer_path, "lib") if libtbx_path not in sys.path: sys.path.append(libtbx_path) -from libtbx.auto_build import install_distribution -from libtbx.auto_build import installer_utils +from libtbx.auto_build import install_distribution # noqa: E402 +from libtbx.auto_build import installer_utils # noqa: E402 class installer(install_distribution.installer): diff --git a/libtbx_refresh.py b/libtbx_refresh.py index 30f4e427fe..e6a62507cf 100644 --- a/libtbx_refresh.py +++ b/libtbx_refresh.py @@ -60,7 +60,7 @@ """ ) -import dials.precommitbx.nagger +import dials.precommitbx.nagger # noqa: E402 try: from dials.util.version import dials_version @@ -218,9 +218,7 @@ def _create_dials_env_script(): } unset LIBTBX_BUILD -""" % abs( - libtbx.env.build_path - ) +""" % abs(libtbx.env.build_path) with open(filename, "w") as fh: fh.write(script.lstrip()) @@ -311,9 +309,7 @@ def dispatcher_inner(name): Requires(ac, Dir(libtbx.env.under_build("lib"))) Depends(ac, os.path.join(libtbx.env.dist_path("dials"), "src", "dials", "util", "options.py")) Depends(ac, os.path.join(libtbx.env.dist_path("dials"), "util", "autocomplete.sh")) -""".format( - "\n".join([f' "{cmd}",' for cmd in command_list]) - ) +""".format("\n".join([f' "{cmd}",' for cmd in command_list])) ) # Generate a bash script activating command line completion for each relevant command diff --git a/newsfragments/2675.misc b/newsfragments/2675.misc new file mode 100644 index 0000000000..ec988a5462 --- /dev/null +++ b/newsfragments/2675.misc @@ -0,0 +1 @@ +Migrate pre-commit tooling to ruff. diff --git a/pyproject.toml b/pyproject.toml index a4779828b4..3ffd854b4b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,15 +1,63 @@ -[tool.black] -include='\.pyi?$|/SConscript$|/libtbx_config$' - [tool.towncrier] package = "dials" package_dir = ".." filename = "CHANGELOG.rst" issue_format = "`#{issue} `_" -[tool.isort] -sections="FUTURE,STDLIB,THIRDPARTY,CCTBX,FIRSTPARTY,LOCALFOLDER" -known_firstparty="dials_*" -known_cctbx="boost,boost_adaptbx,cbflib_adaptbx,cctbx,chiltbx,clipper_adaptbx,cma_es,cootbx,crys3d,cudatbx,dxtbx,fable,fast_linalg,fftw3tbx,gltbx,iota,iotbx,libtbx,mmtbx,omptbx,prime,rstbx,scitbx,simtbx,smtbx,spotfinder,tbxx,ucif,wxtbx,xfel" -profile="black" +[tool.ruff.lint] +select = ["E", "F", "W", "C4", "I"] +unfixable = ["F841"] +# E501 line too long (handled by formatter) +# E741 Ambiguous variable name (We have lots of meaningful I, L, l) +ignore = ["E501", "E741"] + +[tool.ruff.lint.per-file-ignores] +"installer/**.py" = ["I"] +"**/__init__.py" = ["F401"] + +[tool.ruff.lint.isort] +known-first-party = ["dials_*", "dials"] +required-imports = ["from __future__ import annotations"] +section-order = [ + "future", + "standard-library", + "third-party", + "cctbx", + "first-party", + "local-folder", +] +[tool.ruff.lint.isort.sections] +"cctbx" = [ + "boost", + "boost_adaptbx", + "cbflib_adaptbx", + "cctbx", + "chiltbx", + "clipper_adaptbx", + "cma_es", + "cootbx", + "crys3d", + "cudatbx", + "dxtbx", + "fable", + "fast_linalg", + "fftw3tbx", + "gltbx", + "iota", + "iotbx", + "libtbx", + "mmtbx", + "omptbx", + "prime", + "rstbx", + "scitbx", + "serialtbx", + "simtbx", + "smtbx", + "spotfinder", + "tbxx", + "ucif", + "wxtbx", + "xfel", +] diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index 0f2786ff3d..0000000000 --- a/setup.cfg +++ /dev/null @@ -1,16 +0,0 @@ -[flake8] -# Black disagrees with flake8 on a few points. Ignore those. -ignore = E203, E266, E501, W503 -# E203 whitespace before ':' -# E266 too many leading '#' for block comment -# E501 line too long -# W503 line break before binary operator - -max-line-length = 88 - -select = - E401,E711,E712,E713,E714,E721,E722,E901, - F401,F402,F403,F405,F541,F631,F632,F633,F811,F812,F821,F822,F841,F901, - W191,W291,W292,W293,W602,W603,W604,W605,W606, - # flake8-comprehensions, https://github.com/adamchainz/flake8-comprehensions - C4, diff --git a/src/dials/__init__.py b/src/dials/__init__.py index 777551ee79..7ef2dffa48 100644 --- a/src/dials/__init__.py +++ b/src/dials/__init__.py @@ -2,14 +2,14 @@ import logging -logging.getLogger("dials").addHandler(logging.NullHandler()) - # Intercept easy_mp exceptions to extract stack traces before they are lost at # the libtbx process boundary/the easy_mp API. In the case of a subprocess # crash we print the subprocess stack trace, which will be most useful for # debugging parallelized sections of DIALS code. import libtbx.scheduling.stacktrace as _lss +logging.getLogger("dials").addHandler(logging.NullHandler()) + def _stacktrace_tracer(error, trace, intercepted_call=_lss.set_last_exception): """Intercepts and prints ephemeral stacktraces.""" diff --git a/src/dials/algorithms/background/gmodel/model.h b/src/dials/algorithms/background/gmodel/model.h index 381f0c23b8..f276c5fbcf 100644 --- a/src/dials/algorithms/background/gmodel/model.h +++ b/src/dials/algorithms/background/gmodel/model.h @@ -22,7 +22,7 @@ namespace dials { namespace algorithms { */ class BackgroundModel { public: - virtual ~BackgroundModel(){}; + virtual ~BackgroundModel() {}; virtual af::versa > extract(std::size_t panel, int6 bbox) const = 0; diff --git a/src/dials/algorithms/background/gmodel/modeller.py b/src/dials/algorithms/background/gmodel/modeller.py index 0914c70a45..ae496f4e69 100644 --- a/src/dials/algorithms/background/gmodel/modeller.py +++ b/src/dials/algorithms/background/gmodel/modeller.py @@ -51,7 +51,6 @@ def __init__( self.detector_mask = None def add_image(self, frame, image, mask, reflections): - height, width = image.all() _, _, _, _, z0, z1 = reflections["bbox"].parts() @@ -90,7 +89,6 @@ def compute(self): class Creator: def __init__(self, experiment, params): - self.modeller = None self.background = None self.experiment = experiment diff --git a/src/dials/algorithms/background/modeller.py b/src/dials/algorithms/background/modeller.py index 51ef303e84..293d0ff737 100644 --- a/src/dials/algorithms/background/modeller.py +++ b/src/dials/algorithms/background/modeller.py @@ -223,7 +223,6 @@ def finalize_model(self): result = [] for i in range(len(self.result)): - # Get the statistics stats = self.result.get(i) mean = stats.mean(self.min_images) diff --git a/src/dials/algorithms/centroid/__init__.py b/src/dials/algorithms/centroid/__init__.py index f78ce90bab..94953c92dc 100644 --- a/src/dials/algorithms/centroid/__init__.py +++ b/src/dials/algorithms/centroid/__init__.py @@ -49,7 +49,6 @@ def centroid_px_to_mm_panel(panel, scan, position, variance, sd_error): sd_error_mm = [sde * s for sde, s in zip(sd_error, scale2)] else: - # Convert Pixel coordinate into mm/rad x, y, z = position.parts() xy_mm = panel.pixel_to_millimeter(flex.vec2_double(x, y)) @@ -98,7 +97,6 @@ def tof_centroid_px_to_mm_panel(panel, scan, position, variance, sd_error): sd_error_mm = [sde * s for sde, s in zip(sd_error, scale2)] else: - # Convert Pixel coordinate into mm/tof x, y, z = position.parts() xy_mm = panel.pixel_to_millimeter(flex.vec2_double(x, y)) diff --git a/src/dials/algorithms/image/centroid/generate_bias_lookup_table.py b/src/dials/algorithms/image/centroid/generate_bias_lookup_table.py index b8c9bdfd4b..80a6fae6de 100644 --- a/src/dials/algorithms/image/centroid/generate_bias_lookup_table.py +++ b/src/dials/algorithms/image/centroid/generate_bias_lookup_table.py @@ -2,7 +2,6 @@ Code to generate lookup values for algorithms/image/centroid/bias.h. """ - from __future__ import annotations @@ -71,7 +70,6 @@ def compute_lookup_table(max_sigma=0.5, N1=1000, N2=1000, N3=50): if __name__ == "__main__": - sigma, bias_sq = compute_lookup_table() for s, b in zip(sigma, bias_sq): diff --git a/src/dials/algorithms/indexing/basis_vector_search/combinations.py b/src/dials/algorithms/indexing/basis_vector_search/combinations.py index 1e2494001e..4431f3a280 100644 --- a/src/dials/algorithms/indexing/basis_vector_search/combinations.py +++ b/src/dials/algorithms/indexing/basis_vector_search/combinations.py @@ -18,7 +18,6 @@ def candidate_orientation_matrices(basis_vectors, max_combinations=None): - # select unique combinations of input vectors to test # the order of combinations is such that combinations comprising vectors # nearer the beginning of the input list will appear before combinations diff --git a/src/dials/algorithms/indexing/basis_vector_search/fft3d.py b/src/dials/algorithms/indexing/basis_vector_search/fft3d.py index d589edccb4..d0af91a055 100644 --- a/src/dials/algorithms/indexing/basis_vector_search/fft3d.py +++ b/src/dials/algorithms/indexing/basis_vector_search/fft3d.py @@ -200,7 +200,6 @@ def find_basis_vectors(self, reciprocal_lattice_vectors): return self.candidate_basis_vectors, used_in_indexing def _fft(self, reciprocal_lattice_vectors, d_min): - ( reciprocal_space_grid, used_in_indexing, diff --git a/src/dials/algorithms/indexing/basis_vector_search/strategy.py b/src/dials/algorithms/indexing/basis_vector_search/strategy.py index fc6e9080ae..1a5cd41047 100644 --- a/src/dials/algorithms/indexing/basis_vector_search/strategy.py +++ b/src/dials/algorithms/indexing/basis_vector_search/strategy.py @@ -1,6 +1,5 @@ """Basis vector search strategies.""" - from __future__ import annotations diff --git a/src/dials/algorithms/indexing/indexer.py b/src/dials/algorithms/indexing/indexer.py index 13354e9c1f..1215b53917 100644 --- a/src/dials/algorithms/indexing/indexer.py +++ b/src/dials/algorithms/indexing/indexer.py @@ -369,7 +369,6 @@ def __init__(self, reflections, experiments, params): def from_parameters( reflections, experiments, known_crystal_models=None, params=None ): - if known_crystal_models is not None: from dials.algorithms.indexing.known_orientation import ( IndexerKnownOrientation, @@ -527,9 +526,7 @@ def index(self): if max_lattices is not None and len(experiments.crystals()) >= max_lattices: break if len(experiments) > 0: - cutoff_fraction = ( - self.params.multiple_lattice_search.recycle_unindexed_reflections_cutoff - ) + cutoff_fraction = self.params.multiple_lattice_search.recycle_unindexed_reflections_cutoff d_spacings = 1 / self.reflections["rlp"].norms() d_min_indexed = flex.min(d_spacings.select(self.indexed_reflections)) min_reflections_for_indexing = cutoff_fraction * len( @@ -944,13 +941,10 @@ def find_max_cell(self): params = self.params.max_cell_estimation if self.params.max_cell is libtbx.Auto: if self.params.known_symmetry.unit_cell is not None: - uc_params = ( - self._symmetry_handler.target_symmetry_primitive.unit_cell().parameters() - ) + uc_params = self._symmetry_handler.target_symmetry_primitive.unit_cell().parameters() self.params.max_cell = params.multiplier * max(uc_params[:3]) logger.info("Using max_cell: %.1f Angstrom", self.params.max_cell) else: - convert_reflections_z_to_deg = True all_tof_experiments = False for expt in self.experiments: diff --git a/src/dials/algorithms/indexing/lattice_search/__init__.py b/src/dials/algorithms/indexing/lattice_search/__init__.py index c4b9ffc8c2..e192763f90 100644 --- a/src/dials/algorithms/indexing/lattice_search/__init__.py +++ b/src/dials/algorithms/indexing/lattice_search/__init__.py @@ -126,7 +126,6 @@ def __init__(self, reflections, experiments, params): break def find_candidate_crystal_models(self): - candidate_crystal_models = [] if self._lattice_search_strategy: candidate_crystal_models = ( @@ -162,7 +161,6 @@ def find_lattices(self): return experiments def choose_best_orientation_matrix(self, candidate_orientation_matrices): - from dials.algorithms.indexing import model_evaluation solution_scorer = self.params.basis_vector_combinations.solution_scorer @@ -353,7 +351,6 @@ def find_candidate_crystal_models(self): return candidate_crystal_models def find_candidate_orientation_matrices(self, candidate_basis_vectors): - candidate_crystal_models = combinations.candidate_orientation_matrices( candidate_basis_vectors, max_combinations=self.params.basis_vector_combinations.max_combinations, @@ -383,7 +380,6 @@ def find_candidate_orientation_matrices(self, candidate_basis_vectors): return candidate_crystal_models def optimise_basis_vectors(self): - optimised_basis_vectors = optimise.optimise_basis_vectors( self.reflections["rlp"].select(self._used_in_indexing), self.candidate_basis_vectors, @@ -393,7 +389,6 @@ def optimise_basis_vectors(self): ] def debug_show_candidate_basis_vectors(self): - vectors = self.candidate_basis_vectors logger.debug("Candidate basis vectors:") diff --git a/src/dials/algorithms/indexing/lattice_search/low_res_spot_match.py b/src/dials/algorithms/indexing/lattice_search/low_res_spot_match.py index 2db09ab42a..d7876d3f6b 100644 --- a/src/dials/algorithms/indexing/lattice_search/low_res_spot_match.py +++ b/src/dials/algorithms/indexing/lattice_search/low_res_spot_match.py @@ -471,7 +471,6 @@ def _pairs_with_seed(self, seed): return result def _extend_by_candidates(self, graph): - existing_ids = [e["spot_id"] for e in graph.vertices] obs_relps = [matrix.col(self.spots[e]["rlp"]) for e in existing_ids] exp_relps = [e["rlp_datum"] for e in graph.vertices] @@ -532,7 +531,6 @@ def _extend_by_candidates(self, graph): @staticmethod def _fit_U_from_superposed_points(reference, other): - # Add the origin to both sets of points reference.append((0, 0, 0)) other.append((0, 0, 0)) @@ -542,7 +540,6 @@ def _fit_U_from_superposed_points(reference, other): return fit.r def _fit_crystal_model(self, graph): - vertices = graph.vertices # Reciprocal lattice points of the observations @@ -556,7 +553,6 @@ def _fit_crystal_model(self, graph): UB = U * self.Bmat if self._params.bootstrap_crystal: - # Attempt to index the low resolution spots from dials_algorithms_indexing_ext import AssignIndices diff --git a/src/dials/algorithms/indexing/lattice_search/strategy.py b/src/dials/algorithms/indexing/lattice_search/strategy.py index 6a2c699450..30af832802 100644 --- a/src/dials/algorithms/indexing/lattice_search/strategy.py +++ b/src/dials/algorithms/indexing/lattice_search/strategy.py @@ -1,6 +1,5 @@ """Lattice search strategies.""" - from __future__ import annotations diff --git a/src/dials/algorithms/indexing/max_cell.py b/src/dials/algorithms/indexing/max_cell.py index 86c56c1087..cd8a4eeefd 100644 --- a/src/dials/algorithms/indexing/max_cell.py +++ b/src/dials/algorithms/indexing/max_cell.py @@ -27,7 +27,6 @@ def find_max_cell( logger.debug("Finding suitable max_cell based on %i reflections", len(reflections)) # Exclude potential ice-ring spots from nearest neighbour analysis if needed if filter_ice: - ice_sel = ice_rings_selection(reflections) reflections = reflections.select(~ice_sel) logger.debug( diff --git a/src/dials/algorithms/indexing/model_evaluation.py b/src/dials/algorithms/indexing/model_evaluation.py index b31801603f..5fcc88eb6f 100644 --- a/src/dials/algorithms/indexing/model_evaluation.py +++ b/src/dials/algorithms/indexing/model_evaluation.py @@ -36,7 +36,7 @@ def filter_doubled_cell(solutions): accepted_solutions = [] for i1, s1 in enumerate(solutions): doubled_cell = False - for (m1, m2, m3) in ( + for m1, m2, m3 in ( (2, 1, 1), (1, 2, 1), (1, 1, 2), diff --git a/src/dials/algorithms/indexing/non_primitive_basis.py b/src/dials/algorithms/indexing/non_primitive_basis.py index 25101cb153..9053147f75 100644 --- a/src/dials/algorithms/indexing/non_primitive_basis.py +++ b/src/dials/algorithms/indexing/non_primitive_basis.py @@ -45,7 +45,6 @@ def correct(experiments, reflections, assign_indices, threshold=0.9): def detect(miller_indices, threshold=0.9): - for test in tools.R: cum = tools.cpp_absence_test(miller_indices, test["mod"], test["vec"]) for counter in range(test["mod"]): diff --git a/src/dials/algorithms/indexing/ssx/analysis.py b/src/dials/algorithms/indexing/ssx/analysis.py index 794dba36d7..3bedfde9ab 100644 --- a/src/dials/algorithms/indexing/ssx/analysis.py +++ b/src/dials/algorithms/indexing/ssx/analysis.py @@ -363,16 +363,18 @@ def _generate_hist_data(rmsd_arrays, step=0.01): "yaxis": {"title": "RMSD (px)"}, }, }, - "rmsdz": { - "data": rmsdz_data, - "layout": { - "title": "RMSD (dPsi) per image", - "xaxis": {"title": "image number"}, - "yaxis": {"title": "RMSD dPsi (deg)"}, - }, - } - if rmsdz_data - else {}, + "rmsdz": ( + { + "data": rmsdz_data, + "layout": { + "title": "RMSD (dPsi) per image", + "xaxis": {"title": "image number"}, + "yaxis": {"title": "RMSD dPsi (deg)"}, + }, + } + if rmsdz_data + else {} + ), "rmsdxy_hist": { "data": [ { @@ -398,23 +400,25 @@ def _generate_hist_data(rmsd_arrays, step=0.01): "barmode": "overlay", }, }, - "rmsdz_hist": { - "data": [ - { - "x": bin_centers_z.tolist(), - "y": hist_z.tolist(), - "type": "bar", - "name": "RMSD dPsi", + "rmsdz_hist": ( + { + "data": [ + { + "x": bin_centers_z.tolist(), + "y": hist_z.tolist(), + "type": "bar", + "name": "RMSD dPsi", + }, + ], + "layout": { + "title": "Distribution of RMSDs (dPsi)", + "xaxis": {"title": "RMSD dPsi (deg)"}, + "yaxis": {"title": "Number of images"}, + "bargap": 0, }, - ], - "layout": { - "title": "Distribution of RMSDs (dPsi)", - "xaxis": {"title": "RMSD dPsi (deg)"}, - "yaxis": {"title": "Number of images"}, - "bargap": 0, - }, - } - if rmsdz_data - else {}, + } + if rmsdz_data + else {} + ), } return plots diff --git a/src/dials/algorithms/indexing/ssx/processing.py b/src/dials/algorithms/indexing/ssx/processing.py index dc6bc1a35e..d73f180f44 100644 --- a/src/dials/algorithms/indexing/ssx/processing.py +++ b/src/dials/algorithms/indexing/ssx/processing.py @@ -75,7 +75,6 @@ class IndexingResult: class manage_loggers(object): - """ A contextmanager for reducing logging levels for the underlying code of parallel ssx programs. @@ -126,7 +125,6 @@ def index_one( image_no: int, known_crystal_models: List[Crystal] = None, ) -> Union[Tuple[ExperimentList, flex.reflection_table], Tuple[bool, bool]]: - elist = ExperimentList([experiment]) params.indexing.nproc = 1 # make sure none of the processes try to spawn multiprocessing within existing multiprocessing. for method in method_list: @@ -228,7 +226,6 @@ def index_all_concurrent( params: phil.scope_extract, method_list: List[str], ) -> Tuple[ExperimentList, flex.reflection_table, dict]: - input_iterable = [] results_summary = { i: [] for i in range(len(experiments)) @@ -452,11 +449,11 @@ def preprocess( if n_cells > 20: centile_95_pos = int(math.floor(0.95 * n_cells)) limit = sorted_cells[centile_95_pos] - logger.info(f"Setting max cell to {limit:.1f} " + "\u212B") + logger.info(f"Setting max cell to {limit:.1f} " + "\u212b") params.indexing.max_cell = limit else: params.indexing.max_cell = sorted_cells[-1] - logger.info(f"Setting max cell to {sorted_cells[-1]:.1f} " + "\u212B") + logger.info(f"Setting max cell to {sorted_cells[-1]:.1f} " + "\u212b") # Determine which methods to try method_list = params.method @@ -476,7 +473,6 @@ def index( observed: flex.reflection_table, params: phil.scope_extract, ) -> Tuple[ExperimentList, flex.reflection_table, dict]: - if params.output.nuggets: params.output.nuggets = pathlib.Path( params.output.nuggets diff --git a/src/dials/algorithms/indexing/stills_indexer.py b/src/dials/algorithms/indexing/stills_indexer.py index e7c8f35dd7..33ae6e590a 100644 --- a/src/dials/algorithms/indexing/stills_indexer.py +++ b/src/dials/algorithms/indexing/stills_indexer.py @@ -65,10 +65,15 @@ def plot_displacements(reflections, predictions, experiments): def e_refine(params, experiments, reflections, graph_verbose=False): # Stills-specific parameters we always want - assert params.refinement.reflections.outlier.algorithm in ( - None, - "null", - ), "Cannot index, set refinement.reflections.outlier.algorithm=null" # we do our own outlier rejection + assert ( + params.refinement.reflections.outlier.algorithm + in ( + None, + "null", + ) + ), ( + "Cannot index, set refinement.reflections.outlier.algorithm=null" + ) # we do our own outlier rejection from dials.algorithms.refinement.refiner import RefinerFactory @@ -114,9 +119,7 @@ def index(self): if max_lattices is not None and len(experiments.crystals()) >= max_lattices: break if len(experiments) > 0: - cutoff_fraction = ( - self.params.multiple_lattice_search.recycle_unindexed_reflections_cutoff - ) + cutoff_fraction = self.params.multiple_lattice_search.recycle_unindexed_reflections_cutoff d_spacings = 1 / self.reflections["rlp"].norms() d_min_indexed = flex.min(d_spacings.select(self.indexed_reflections)) min_reflections_for_indexing = cutoff_fraction * len( @@ -202,9 +205,7 @@ def index(self): # Note, changes to params after initial indexing. Cannot use tie to target when fixing the unit cell. self.all_params.refinement.reflections.outlier.algorithm = "null" self.all_params.refinement.parameterisation.crystal.fix = "cell" - self.all_params.refinement.parameterisation.crystal.unit_cell.restraints.tie_to_target = ( - [] - ) + self.all_params.refinement.parameterisation.crystal.unit_cell.restraints.tie_to_target = [] for expt_id, experiment in enumerate(experiments): reflections = reflections_for_refinement.select( @@ -312,7 +313,6 @@ def index(self): reflections_for_refinement = isoform_reflections if self.params.refinement_protocol.mode == "repredict_only": - from dials.algorithms.indexing.nave_parameters import NaveParameters from dials.algorithms.refinement.prediction.managed_predictors import ( ExperimentsPredictorFactory, @@ -474,7 +474,6 @@ def index(self): if ( "xyzcal.mm" in self.refined_reflections ): # won't be there if refine_all_candidates = False and no isoforms - self._xyzcal_mm_to_px(self.experiments, self.refined_reflections) def experiment_list_for_crystal(self, crystal): diff --git a/src/dials/algorithms/indexing/symmetry.py b/src/dials/algorithms/indexing/symmetry.py index ac70a0592c..d79baf3f91 100644 --- a/src/dials/algorithms/indexing/symmetry.py +++ b/src/dials/algorithms/indexing/symmetry.py @@ -25,7 +25,6 @@ def metric_supergroup(group): def groups_cache(fn): class MultiClassCache(object): - "A set of caches for different bravais types" instances = {} @@ -120,7 +119,6 @@ def find_matching_symmetry( for acentric_subgroup, acentric_supergroup, cb_op_minimum_ref in zip( acentric_subgroups, acentric_supergroups, cb_ops ): - # Make symmetry object: unit-cell + space-group # The unit cell is potentially modified to be exactly compatible # with the space group symmetry. @@ -181,7 +179,6 @@ def find_matching_symmetry( class SymmetryHandler: def __init__(self, unit_cell=None, space_group=None, max_delta=5): - self._max_delta = max_delta self.target_symmetry_primitive = None self.target_symmetry_reference_setting = None @@ -193,7 +190,6 @@ def __init__(self, unit_cell=None, space_group=None, max_delta=5): target_space_group = target_space_group.build_derived_patterson_group() if unit_cell is not None: - assert ( space_group ), "space_group must be provided in combination with unit_cell" @@ -226,9 +222,7 @@ def __init__(self, unit_cell=None, space_group=None, max_delta=5): space_group=target_space_group.change_basis(self.cb_op_inp_ref) ) - cb_op_reference_to_primitive = ( - self.target_symmetry_reference_setting.change_of_basis_op_to_primitive_setting() - ) + cb_op_reference_to_primitive = self.target_symmetry_reference_setting.change_of_basis_op_to_primitive_setting() if unit_cell: self.target_symmetry_primitive = ( self.target_symmetry_reference_setting.change_basis( diff --git a/src/dials/algorithms/integration/boost_python/kapton_ext.cc b/src/dials/algorithms/integration/boost_python/kapton_ext.cc index 78895a3342..7d079c159a 100644 --- a/src/dials/algorithms/integration/boost_python/kapton_ext.cc +++ b/src/dials/algorithms/integration/boost_python/kapton_ext.cc @@ -95,7 +95,7 @@ namespace kapton { namespace boost_python { namespace { def("get_kapton_path_cpp", &kapton::get_kapton_path_cpp); } -}}} // namespace kapton::boost_python:: +}}} // namespace kapton::boost_python BOOST_PYTHON_MODULE(dials_algorithms_integration_kapton_ext) { kapton::boost_python::kapton_init_module(); diff --git a/src/dials/algorithms/integration/image_integrator.py b/src/dials/algorithms/integration/image_integrator.py index 52a639ce2c..cce7812fe4 100644 --- a/src/dials/algorithms/integration/image_integrator.py +++ b/src/dials/algorithms/integration/image_integrator.py @@ -188,11 +188,12 @@ def __call__(self): image = imageset.get_corrected_data(i) mask = imageset.get_mask(i) if self.params.integration.lookup.mask is not None: - assert len(mask) == len( - self.params.lookup.mask - ), "Mask/Image are incorrect size %d %d" % ( - len(mask), - len(self.params.integration.lookup.mask), + assert len(mask) == len(self.params.lookup.mask), ( + "Mask/Image are incorrect size %d %d" + % ( + len(mask), + len(self.params.integration.lookup.mask), + ) ) mask = tuple( m1 & m2 for m1, m2 in zip(self.params.integration.lookup.mask, mask) diff --git a/src/dials/algorithms/integration/integrator.py b/src/dials/algorithms/integration/integrator.py index dddb21fbd2..1568af3fd0 100644 --- a/src/dials/algorithms/integration/integrator.py +++ b/src/dials/algorithms/integration/integrator.py @@ -587,9 +587,9 @@ def _finalize_stills(reflections, experiments, params): ) # apply detector gain to summation variances - integrated[ - "intensity.sum.variance" - ] *= params.integration.summation.detector_gain + integrated["intensity.sum.variance"] *= ( + params.integration.summation.detector_gain + ) if "background.sum.value" in integrated: if (integrated["background.sum.variance"] < 0).count(True) > 0: raise Sorry( @@ -602,9 +602,9 @@ def _finalize_stills(reflections, experiments, params): ) integrated = integrated.select(integrated["background.sum.variance"] > 0) # apply detector gain to background summation variances - integrated[ - "background.sum.variance" - ] *= params.integration.summation.detector_gain + integrated["background.sum.variance"] *= ( + params.integration.summation.detector_gain + ) reflections = integrated diff --git a/src/dials/algorithms/integration/kapton_2019_correction.py b/src/dials/algorithms/integration/kapton_2019_correction.py index 8d552b1ae6..2b729d65e8 100644 --- a/src/dials/algorithms/integration/kapton_2019_correction.py +++ b/src/dials/algorithms/integration/kapton_2019_correction.py @@ -559,18 +559,22 @@ def extract_params(self): ), "Kapton param sigmas must be non-negative" self.kapton_params_maxes = [ [ - self.kapton_params[i] + self.kapton_params_sigmas[j] - if j == i - else self.kapton_params[i] + ( + self.kapton_params[i] + self.kapton_params_sigmas[j] + if j == i + else self.kapton_params[i] + ) for i in range(4) ] for j in range(4) ] self.kapton_params_mins = [ [ - max(self.kapton_params[i] - self.kapton_params_sigmas[j], 0.001) - if j == i - else self.kapton_params[i] + ( + max(self.kapton_params[i] - self.kapton_params_sigmas[j], 0.001) + if j == i + else self.kapton_params[i] + ) for i in range(3) ] + [a] @@ -657,7 +661,7 @@ def correction_and_within_spot_sigma(params_version, variance_within_spot=True): if plot: from matplotlib import pyplot as plt - for (title, data) in [("corrections", corrections), ("sigmas", sigmas)]: + for title, data in [("corrections", corrections), ("sigmas", sigmas)]: plt.hist(data, 20) plt.title(title) plt.show() diff --git a/src/dials/algorithms/integration/kapton_correction.py b/src/dials/algorithms/integration/kapton_correction.py index cccdef4fdd..15003053aa 100644 --- a/src/dials/algorithms/integration/kapton_correction.py +++ b/src/dials/algorithms/integration/kapton_correction.py @@ -460,18 +460,22 @@ def extract_params(self): ), "Kapton param sigmas must be nonnegative" self.kapton_params_maxes = [ [ - self.kapton_params[i] + self.kapton_params_sigmas[j] - if j == i - else self.kapton_params[i] + ( + self.kapton_params[i] + self.kapton_params_sigmas[j] + if j == i + else self.kapton_params[i] + ) for i in range(4) ] for j in range(4) ] self.kapton_params_mins = [ [ - max(self.kapton_params[i] - self.kapton_params_sigmas[j], 0.001) - if j == i - else self.kapton_params[i] + ( + max(self.kapton_params[i] - self.kapton_params_sigmas[j], 0.001) + if j == i + else self.kapton_params[i] + ) for i in range(3) ] + [a] @@ -561,7 +565,7 @@ def correction_and_within_spot_sigma(params_version, variance_within_spot=True): if plot: from matplotlib import pyplot as plt - for (title, data) in [("corrections", corrections), ("sigmas", sigmas)]: + for title, data in [("corrections", corrections), ("sigmas", sigmas)]: plt.hist(data, 20) plt.title(title) plt.show() diff --git a/src/dials/algorithms/integration/parallel_integrator.h b/src/dials/algorithms/integration/parallel_integrator.h index 1bd8374da0..a12b62fcb8 100644 --- a/src/dials/algorithms/integration/parallel_integrator.h +++ b/src/dials/algorithms/integration/parallel_integrator.h @@ -798,7 +798,8 @@ namespace dials { namespace algorithms { std::size_t i = 0; offset_.push_back(0); for (std::size_t j = 0; j < n; ++j) { - while (i < indices_.size() && bbox[indices_[i]][5] - zstart <= j + 1) i++; + while (i < indices_.size() && bbox[indices_[i]][5] - zstart <= j + 1) + i++; offset_.push_back(i); } DIALS_ASSERT(offset_.size() == n + 1); diff --git a/src/dials/algorithms/integration/parallel_integrator.py b/src/dials/algorithms/integration/parallel_integrator.py index c4dce893be..af46ec2aa1 100644 --- a/src/dials/algorithms/integration/parallel_integrator.py +++ b/src/dials/algorithms/integration/parallel_integrator.py @@ -13,9 +13,13 @@ from dials.util.mp import multi_node_parallel_map from dials.util.system import MEMORY_LIMIT +# isort: split + # Need this import first because loads extension that parallel_integrator_ext # relies on - it assumes the binding for EmpiricalProfileModeller exists -import dials.algorithms.profile_model.modeller # noqa: F401 # isort: split +import dials.algorithms.profile_model.modeller + +# isort: split from dials.constants import EPS, FULL_PARTIALITY from dials_algorithms_integration_parallel_integrator_ext import ( diff --git a/src/dials/algorithms/integration/processor.py b/src/dials/algorithms/integration/processor.py index d8fdfaae72..2b7f705217 100644 --- a/src/dials/algorithms/integration/processor.py +++ b/src/dials/algorithms/integration/processor.py @@ -469,11 +469,12 @@ def __call__(self): else: mask = imageset.get_mask(i) if self.params.lookup.mask is not None: - assert len(mask) == len( - self.params.lookup.mask - ), "Mask/Image are incorrect size %d %d" % ( - len(mask), - len(self.params.lookup.mask), + assert len(mask) == len(self.params.lookup.mask), ( + "Mask/Image are incorrect size %d %d" + % ( + len(mask), + len(self.params.lookup.mask), + ) ) mask = tuple( m1 & m2 for m1, m2 in zip(self.params.lookup.mask, mask) diff --git a/src/dials/algorithms/integration/report.py b/src/dials/algorithms/integration/report.py index ef96ce1499..8e34688230 100644 --- a/src/dials/algorithms/integration/report.py +++ b/src/dials/algorithms/integration/report.py @@ -92,7 +92,6 @@ def overall_report(data): return report def binned_report(binner, index, data): - # Create the indexers indexer_all = binner.indexer(index) indexer_sum = binner.indexer(index.select(data["sum"])) @@ -159,7 +158,6 @@ def binned_report(binner, index, data): return report def resolution_bins(experiment, hkl, nbins): - # Create the crystal symmetry object cs = crystal.symmetry( space_group=experiment.crystal.get_space_group(), diff --git a/src/dials/algorithms/integration/ssx/ellipsoid_integrate.py b/src/dials/algorithms/integration/ssx/ellipsoid_integrate.py index cd98b64ad7..2682eeac7f 100644 --- a/src/dials/algorithms/integration/ssx/ellipsoid_integrate.py +++ b/src/dials/algorithms/integration/ssx/ellipsoid_integrate.py @@ -78,7 +78,6 @@ def __init__(self, params, collect_data=False): self.collector = EllipsoidOutputCollector() def run(self, experiment, table): - # first set ids to zero so can integrate (this is how integration # finds the image in the imageset) ids_map = dict(table.experiment_identifiers()) diff --git a/src/dials/algorithms/integration/ssx/ssx_integrate.py b/src/dials/algorithms/integration/ssx/ssx_integrate.py index 681ded53d7..1312e4399a 100644 --- a/src/dials/algorithms/integration/ssx/ssx_integrate.py +++ b/src/dials/algorithms/integration/ssx/ssx_integrate.py @@ -29,7 +29,6 @@ def generate_html_report(plots_data, filename): class SimpleIntegrator(ABC): - """Define an interface for ssx prediction/integration processing""" def __init__(self, params): @@ -77,7 +76,6 @@ def integrate(experiment, reflection_table, *args, **kwargs): class NullCollector(object): - """ Defines a null data collector for cases where you don't want to record data during the process. @@ -103,7 +101,6 @@ def collect_after_integration(self, *args, **kwargs): class OutputCollector(object): - """ Defines a data collector to log common quantities for all algorithm choices for an individual image. @@ -148,7 +145,6 @@ def collect_after_integration(self, experiment, reflection_table): class OutputAggregator: - """ Simple aggregator class to aggregate data from all images and generate json data for output/plotting. diff --git a/src/dials/algorithms/integration/ssx/stills_integrate.py b/src/dials/algorithms/integration/ssx/stills_integrate.py index ba343491ec..4265b9c8ee 100644 --- a/src/dials/algorithms/integration/ssx/stills_integrate.py +++ b/src/dials/algorithms/integration/ssx/stills_integrate.py @@ -33,7 +33,6 @@ def __init__(self, params, collect_data=False): self.collector = StillsOutputCollector() def run(self, experiment, table): - # first set ids to zero so can integrate (this is how integration # finds the image in the imageset) ids_map = dict(table.experiment_identifiers()) @@ -83,7 +82,6 @@ def predict(experiment, table, params): @staticmethod def integrate(experiments, table, params): - _params = Parameters.from_phil(params.integration) experiments[0].scan = None _initialize_stills(experiments, _params, table) diff --git a/src/dials/algorithms/integration/sublattice_helper.py b/src/dials/algorithms/integration/sublattice_helper.py index decd86caa6..1a38d8d233 100644 --- a/src/dials/algorithms/integration/sublattice_helper.py +++ b/src/dials/algorithms/integration/sublattice_helper.py @@ -136,7 +136,6 @@ def integrate_coset(self, experiments, indexed): else: # Dump experiments to disk if self.params.output.coset_experiments_filename: - experiments_local.as_json(self.params.output.coset_experiments_filename) if self.params.output.coset_filename: diff --git a/src/dials/algorithms/merging/french_wilson.py b/src/dials/algorithms/merging/french_wilson.py index 2e87562135..f9c3becc2f 100644 --- a/src/dials/algorithms/merging/french_wilson.py +++ b/src/dials/algorithms/merging/french_wilson.py @@ -156,7 +156,6 @@ def compute_posterior_moments_acentric( expected_intensities: np.ndarray, h_min: int = -4, ) -> PosteriorMoments: - h = (intensities / sigmas) - np.abs(sigmas / expected_intensities) logger.debug(f"h range: {h.min():.4f} - {h.max():.4f}") i_sig_min = h_min + 0.3 @@ -204,7 +203,6 @@ def compute_posterior_moments_centric( expected_intensities: np.ndarray, h_min: int = -4, ) -> PosteriorMoments: - h = (intensities / sigmas) - np.abs(sigmas / (2 * expected_intensities)) logger.debug(f"h range: {h.min():.4f} - {h.max():.4f}") i_sig_min = h_min + 0.3 diff --git a/src/dials/algorithms/merging/merge.py b/src/dials/algorithms/merging/merge.py index 4b905286e5..baa6bcf452 100644 --- a/src/dials/algorithms/merging/merge.py +++ b/src/dials/algorithms/merging/merge.py @@ -175,7 +175,6 @@ def prepare_merged_reflection_table( class MTZDataClass: - """Container class (i.e. Python 3.7 dataclass) for per-wavelength mtz dataset.""" def __init__( @@ -508,7 +507,6 @@ def make_merged_mtz_file(mtz_datasets, r_free_array: miller.array = None): def make_merged_mtz_file_with_gemmi(mtz_datasets, r_free_array=None): - # XXX This should replace the code in make_merged_mtz_file when # MergedMTZWriter and MADMergedMTZWriter are removed writer = MergedMTZCreator @@ -717,14 +715,14 @@ def truncate( "falling back to assumption of a flat, positive prior, i.e.: " " |F| = sqrt((Io+sqrt(Io**2 +2sigma**2))/2.0)" ) - do_french_wilson = lambda ma: ma.enforce_positive_amplitudes() + do_french_wilson = lambda ma: ma.enforce_positive_amplitudes() # noqa: E731 elif n_refl < min_reflections: raise ValueError( "Insufficient reflections for French & Wilson procedure. " "Either set fallback_to_flat_prior=True or truncate=False." ) elif implementation == "cctbx": - do_french_wilson = lambda ma: ma.french_wilson(log=out) + do_french_wilson = lambda ma: ma.french_wilson(log=out) # noqa: E731 else: do_french_wilson = french_wilson @@ -866,7 +864,6 @@ def r_free_flags_from_reference( params: phil.scope_extract, mtz_datasets: List[MTZDataClass], ) -> miller.array: - mtz = iotbx.mtz.object(params.r_free_flags.reference) r_free_arrays = [] for ma in mtz.as_miller_arrays(): diff --git a/src/dials/algorithms/merging/reporting.py b/src/dials/algorithms/merging/reporting.py index c1c6fd2382..4aec98c0cf 100644 --- a/src/dials/algorithms/merging/reporting.py +++ b/src/dials/algorithms/merging/reporting.py @@ -15,9 +15,11 @@ from dials.algorithms.clustering.observers import uc_params_from_experiments from dials.algorithms.scaling.observers import make_merging_stats_plots from dials.array_family import flex -from dials.command_line.stereographic_projection import calculate_projections +from dials.command_line.stereographic_projection import ( + calculate_projections, + projections_as_dict, +) from dials.command_line.stereographic_projection import phil_scope as stereo_phil_scope -from dials.command_line.stereographic_projection import projections_as_dict from dials.report.analysis import ( format_statistics, make_merging_statistics_summary, @@ -31,7 +33,6 @@ class MergeJSONCollector(object): - initiated = False data = {} @@ -52,9 +53,9 @@ def create_json(self): class MergingStatisticsData: experiments: ExperimentList scaled_miller_array: miller.array - reflections: Optional[ - List[flex.reflection_table] - ] = None # only needed if using this class like a script when making batch plots + reflections: Optional[List[flex.reflection_table]] = ( + None # only needed if using this class like a script when making batch plots + ) merging_statistics_result: Optional[Type[dataset_statistics]] = None anom_merging_statistics_result: Optional[Type[dataset_statistics]] = None cut_merging_statistics_result: Optional[Type[dataset_statistics]] = None @@ -71,9 +72,9 @@ def __str__(self): stats_summary += ( "\n" "Resolution limit suggested from CC" - + "\u00BD" + + "\u00bd" + " fit (limit CC" - + "\u00BD" + + "\u00bd" + f"=0.3): {d_min:.2f}\n" ) stats_summary += table_1_summary( @@ -101,7 +102,6 @@ def __repr__(self): def make_stereo_plots(experiments): - orientation_graphs = OrderedDict() # now make stereo projections params = stereo_phil_scope.extract() @@ -151,14 +151,14 @@ def generate_json_data(data: dict[float, MergingStatisticsData]) -> dict: make_dano_plots({wl: stats.anomalous_amplitudes})["dF"] ) if stats.merging_statistics_result: - json_data[wl_key][ - "merging_stats" - ] = stats.merging_statistics_result.as_dict() + json_data[wl_key]["merging_stats"] = ( + stats.merging_statistics_result.as_dict() + ) json_data[wl_key]["table_1_stats"] = stats.table_1_stats() if stats.anom_merging_statistics_result: - json_data[wl_key][ - "merging_stats_anom" - ] = stats.anom_merging_statistics_result.as_dict() + json_data[wl_key]["merging_stats_anom"] = ( + stats.anom_merging_statistics_result.as_dict() + ) if len(json_data) > 1: # create an overall summary table headers = [""] + ["Wavelength " + f"{wl:.5f}" + " Å" for wl in data.keys()] @@ -273,7 +273,7 @@ def make_dano_plots(anomalous_data): "x": d_star_sq_bins, "y": list(dFsdF), "type": "scatter", - "name": "\u03BB" + f"={wave:.4f}", + "name": "\u03bb" + f"={wave:.4f}", } ) if not data["dF"]["dano"]["data"]: diff --git a/src/dials/algorithms/profile_model/ellipsoid/algorithm.py b/src/dials/algorithms/profile_model/ellipsoid/algorithm.py index 19448629c8..ea91f0643f 100644 --- a/src/dials/algorithms/profile_model/ellipsoid/algorithm.py +++ b/src/dials/algorithms/profile_model/ellipsoid/algorithm.py @@ -377,7 +377,6 @@ def predict_after_ellipsoid_refinement(experiment, reflection_table): def compute_prediction_probability(experiment, reflection_table): - # Get stuff from experiment s0 = np.array([experiment.beam.get_s0()], dtype=np.float64).reshape(3, 1) s0_length = norm(s0) diff --git a/src/dials/algorithms/profile_model/ellipsoid/model.py b/src/dials/algorithms/profile_model/ellipsoid/model.py index 3d6fda0a75..7963900909 100644 --- a/src/dials/algorithms/profile_model/ellipsoid/model.py +++ b/src/dials/algorithms/profile_model/ellipsoid/model.py @@ -120,7 +120,6 @@ class EllipsoidProfileModel(ProfileModelExt): - """ An overall model class that conforms to the requirements of a dxtbx.profile_model entry point. @@ -617,7 +616,6 @@ def from_params(Class, params): class Simple1Angular1ProfileModel(AngularProfileModelBase): - name = "simple1angular1" def parameterisation(self): @@ -637,7 +635,6 @@ def from_sigma_d(Class, sigma_d): class Simple1Angular3ProfileModel(AngularProfileModelBase): - name = "simple1angular3" def parameterisation(self): @@ -662,7 +659,6 @@ def from_sigma_d(Class, sigma_d): class Simple6Angular1ProfileModel(AngularProfileModelBase): - name = "simple6angular1" def parameterisation(self): @@ -695,7 +691,6 @@ def from_sigma_d(Class, sigma_d): class Simple6Angular3ProfileModel(AngularProfileModelBase): - name = "simple6angular3" def parameterisation(self): diff --git a/src/dials/algorithms/profile_model/ellipsoid/parameterisation.py b/src/dials/algorithms/profile_model/ellipsoid/parameterisation.py index 812f41df36..e5c00e0fb8 100644 --- a/src/dials/algorithms/profile_model/ellipsoid/parameterisation.py +++ b/src/dials/algorithms/profile_model/ellipsoid/parameterisation.py @@ -282,7 +282,6 @@ def first_derivatives(self) -> np.array: return d1 def first_derivatives_angular(self): - b2 = self.params[1] d2 = np.array( [[2 * b2, 0, 0], [0, 2 * b2, 0], [0, 0, 0]], dtype=np.float64 @@ -354,7 +353,6 @@ def first_derivatives(self) -> np.array: return d1 def first_derivatives_angular(self): - b1 = self.params[1] b2 = self.params[2] b3 = self.params[3] @@ -452,7 +450,6 @@ def first_derivatives(self) -> np.array: return ds def first_derivatives_angular(self): - b2 = self.params[6] d2 = np.array( [[2 * b2, 0, 0], [0, 2 * b2, 0], [0, 0, 0]], dtype=np.float64 @@ -545,7 +542,6 @@ def first_derivatives(self) -> np.array: return ds def first_derivatives_angular(self): - b1 = self.params[6] b2 = self.params[7] b3 = self.params[8] @@ -658,7 +654,9 @@ def U_params(self) -> np.array: def U_params(self, params) -> None: try: self._U_parameterisation.set_param_vals(tuple(float(i) for i in params)) - except ValueError as e: # Rare, from rstbx parameter_reduction.py, set_orientation L38 + except ( + ValueError + ) as e: # Rare, from rstbx parameter_reduction.py, set_orientation L38 raise RuntimeError(f"Error setting U parameterisation: {e}") @property @@ -670,7 +668,9 @@ def B_params(self) -> np.array: def B_params(self, params) -> None: try: self._B_parameterisation.set_param_vals(tuple(float(i) for i in params)) - except ValueError as e: # Rare, from rstbx parameter_reduction.py, set_orientation L38 + except ( + ValueError + ) as e: # Rare, from rstbx parameter_reduction.py, set_orientation L38 raise RuntimeError(f"Error setting B parameterisation: {e}") @property diff --git a/src/dials/algorithms/profile_model/ellipsoid/refiner.py b/src/dials/algorithms/profile_model/ellipsoid/refiner.py index 1d665685b8..c5352a047d 100644 --- a/src/dials/algorithms/profile_model/ellipsoid/refiner.py +++ b/src/dials/algorithms/profile_model/ellipsoid/refiner.py @@ -176,7 +176,6 @@ def rotate_mat3_double(R, A): class ReflectionLikelihood(object): def __init__(self, model, s0, sp, h, ctot, mobs, sobs, panel_id=0): - # Save stuff modelstate = ReflectionModelState(model, s0, h) self.modelstate = modelstate @@ -210,7 +209,6 @@ def __init__(self, model, s0, sp, h, ctot, mobs, sobs, panel_id=0): ) def update(self): - # The s2 vector s2 = self.s0 + self.modelstate.get_r() # Rotate the mean vector @@ -395,7 +393,6 @@ class MaximumLikelihoodTarget(object): def __init__( self, model, s0, sp_list, h_list, ctot_list, mobs_list, sobs_list, panel_ids ): - # Check input assert len(h_list) == sp_list.shape[-1] assert len(h_list) == ctot_list.shape[-1] diff --git a/src/dials/algorithms/profile_model/gaussian_rs/algorithm.py b/src/dials/algorithms/profile_model/gaussian_rs/algorithm.py index 2d2fcedc3a..c0110c631b 100644 --- a/src/dials/algorithms/profile_model/gaussian_rs/algorithm.py +++ b/src/dials/algorithms/profile_model/gaussian_rs/algorithm.py @@ -102,7 +102,6 @@ def create(experiments, grid_size=5, scan_step=5, grid_method="circular_grid"): # Create the spec list spec_list = [] for experiment in experiments: - spec = TransformSpec( experiment.beam, experiment.detector, diff --git a/src/dials/algorithms/profile_model/gaussian_rs/calculator.py b/src/dials/algorithms/profile_model/gaussian_rs/calculator.py index be4bfab97c..97e3e4f138 100644 --- a/src/dials/algorithms/profile_model/gaussian_rs/calculator.py +++ b/src/dials/algorithms/profile_model/gaussian_rs/calculator.py @@ -380,7 +380,7 @@ def target(self, log_sigma): # recorded. # L = 0 - for (kj, nj, i0, i1) in zip( + for kj, nj, i0, i1 in zip( self.K, self.nj, self.indices[:-1], self.indices[1:] ): zj = zi[i0:i1] @@ -452,7 +452,6 @@ def __init__( """ if algorithm == "basic": - # Calculate sigma_m try: estimator = ComputeEsdReflectingRange.Estimator( @@ -636,7 +635,6 @@ def __init__( sigma_b = flex.double() sigma_m = flex.double() for i in range(z0, z1): - # Get reflections at the index reflections = reflection_list[i] diff --git a/src/dials/algorithms/refinement/__init__.py b/src/dials/algorithms/refinement/__init__.py index 5a441ca8e6..a69aa3e6f3 100644 --- a/src/dials/algorithms/refinement/__init__.py +++ b/src/dials/algorithms/refinement/__init__.py @@ -9,7 +9,7 @@ class DialsRefineRuntimeError(RuntimeError): pass -from dials.algorithms.refinement.refiner import Refiner, RefinerFactory +from dials.algorithms.refinement.refiner import Refiner, RefinerFactory # noqa: E402 __all__ = [ "DialsRefineConfigError", diff --git a/src/dials/algorithms/refinement/analysis/centroid_analysis.py b/src/dials/algorithms/refinement/analysis/centroid_analysis.py index cc29737bbb..5457eb4bc4 100644 --- a/src/dials/algorithms/refinement/analysis/centroid_analysis.py +++ b/src/dials/algorithms/refinement/analysis/centroid_analysis.py @@ -1,7 +1,6 @@ """Analysis of centroid residuals for determining suitable refinement and outlier rejection parameters automatically""" - from __future__ import annotations import math @@ -15,7 +14,6 @@ class CentroidAnalyser: def __init__(self, reflections, av_callback=flex.mean, debug=False): - # flags to indicate at what level the analysis has been performed self._average_residuals = False self._spectral_analysis = False diff --git a/src/dials/algorithms/refinement/constraints.py b/src/dials/algorithms/refinement/constraints.py index 521d983a6e..2b909c524f 100644 --- a/src/dials/algorithms/refinement/constraints.py +++ b/src/dials/algorithms/refinement/constraints.py @@ -45,7 +45,6 @@ class EqualShiftConstraint: parameterisations""" def __init__(self, indices, parameter_vector): - self.indices = indices parameter_vector = flex.double(parameter_vector) self.constrained_value = flex.mean(parameter_vector.select(indices)) @@ -60,7 +59,6 @@ def get_expanded_values(self): class ConstraintManager: def __init__(self, constraints, n_full_params): - self._constraints = [e for e in constraints if e is not None] # constraints should be a list of EqualShiftConstraint objects @@ -81,7 +79,6 @@ def get_constrained_parameter_indices(self): return [c.indices for c in self._constraints] def constrain_parameters(self, x): - assert len(x) == self._n_full_params constrained_vals = flex.double([c.constrained_value for c in self._constraints]) @@ -94,7 +91,6 @@ def constrain_parameters(self, x): return constrained_x def expand_parameters(self, constrained_x): - unconstrained_part = constrained_x[0 : self._n_unconstrained_params] constrained_part = constrained_x[self._n_unconstrained_params :] @@ -113,7 +109,6 @@ def expand_parameters(self, constrained_x): return full_x def constrain_jacobian(self, jacobian): - # set up result matrix nrow = jacobian.all()[0] ncol = self._n_unconstrained_params + len(self._constraints) @@ -139,7 +134,6 @@ def constrain_jacobian(self, jacobian): return constrained_jacobian def constrain_gradient_vector(self, grad): - # extract unconstrained gradients into the result result = [] result = list(flex.double(grad).select(self._unconstrained_idx)) @@ -184,7 +178,6 @@ class ConstraintManagerFactory: a constraints manager to be linked to the Refinery""" def __init__(self, refinement_phil, pred_param, sparse=False): - self._params = refinement_phil self._pred_param = pred_param @@ -242,7 +235,6 @@ def build_constraint(self, constraint_scope, parameterisation, model_type): return EqualShiftConstraint(indices, self._all_vals) def __call__(self): - # shorten options path options = self._params.refinement.parameterisation diff --git a/src/dials/algorithms/refinement/corrgram.py b/src/dials/algorithms/refinement/corrgram.py index 350476d34f..c9e0152972 100644 --- a/src/dials/algorithms/refinement/corrgram.py +++ b/src/dials/algorithms/refinement/corrgram.py @@ -1,6 +1,5 @@ """Creation of 'corrgram' correlation matrix plots""" - from __future__ import annotations import json @@ -131,7 +130,6 @@ def create_correlation_plots(refiner, params): corrmats, labels = refiner.get_parameter_correlation_matrix(step, col_select) if [corrmats, labels].count(None) == 0: - for resid_name, corrmat in corrmats.items(): plot_fname = fname_base + "_" + resid_name + ext plt = corrgram(corrmat, labels) diff --git a/src/dials/algorithms/refinement/engine.py b/src/dials/algorithms/refinement/engine.py index 8c95380e30..62bdba622c 100644 --- a/src/dials/algorithms/refinement/engine.py +++ b/src/dials/algorithms/refinement/engine.py @@ -190,7 +190,6 @@ def __init__( tracking=None, max_iterations=None, ): - # reference to PredictionParameterisation, Target and ConstraintsManager # objects self._parameters = prediction_parameterisation @@ -515,7 +514,6 @@ class AdaptLbfgs(Refinery): """Adapt Refinery for L-BFGS minimiser""" def __init__(self, *args, **kwargs): - Refinery.__init__(self, *args, **kwargs) self._termination_params = lbfgs.termination_parameters( @@ -531,7 +529,6 @@ def compute_functional_and_gradients(self): return self._f, self._g def compute_functional_gradients_and_curvatures(self): - self.prepare_for_step() # observation terms @@ -640,7 +637,6 @@ def run(self): return self.run_lbfgs(curvatures=True) def compute_functional_gradients_diag(self): - L, dL_dp, curvs = self.compute_functional_gradients_and_curvatures() self._f = L self._g = dL_dp @@ -668,7 +664,6 @@ def __init__( tracking=None, max_iterations=None, ): - Refinery.__init__( self, target, @@ -695,7 +690,6 @@ def parameter_vector_norm(self): return self.x.norm() def build_up(self, objective_only=False): - # code here to calculate the residuals. Rely on the target class # for this @@ -717,7 +711,6 @@ def build_up(self, objective_only=False): blocks = self._target.split_matches_into_blocks(nproc=self._nproc) if self._nproc > 1: - # ensure the jacobian is not tracked self._jacobian = None @@ -854,7 +847,6 @@ def __init__( max_iterations=20, **kwds, ): - AdaptLstbx.__init__( self, target, @@ -883,7 +875,6 @@ def run(self): return while True: - # set functional and gradients for the step (to add to the history) self._f = self.objective() self._g = -self.opposite_of_gradient() @@ -979,7 +970,6 @@ def report_progress(self, objective): pass def _run_core(self): - # add an attribute to the journal self.history.add_column("mu") self.history.add_column("nu") @@ -1010,7 +1000,6 @@ def _run_core(self): self.setup_mu() while True: - # set functional and gradients for the step self._f = self.objective() self._g = -self.opposite_of_gradient() diff --git a/src/dials/algorithms/refinement/outlier_detection/mcd.py b/src/dials/algorithms/refinement/outlier_detection/mcd.py index 08e23fa31f..e878313eec 100644 --- a/src/dials/algorithms/refinement/outlier_detection/mcd.py +++ b/src/dials/algorithms/refinement/outlier_detection/mcd.py @@ -28,7 +28,6 @@ def __init__( k3=100, threshold_probability=0.975, ): - if cols is None: cols = ["x_resid", "y_resid", "phi_resid"] CentroidOutlier.__init__( @@ -58,7 +57,6 @@ def __init__( return def _detect_outliers(self, cols): - fast_mcd = FastMCD( cols, alpha=self._alpha, diff --git a/src/dials/algorithms/refinement/outlier_detection/outlier_base.py b/src/dials/algorithms/refinement/outlier_detection/outlier_base.py index eea1809841..60084f9feb 100644 --- a/src/dials/algorithms/refinement/outlier_detection/outlier_base.py +++ b/src/dials/algorithms/refinement/outlier_detection/outlier_base.py @@ -27,7 +27,6 @@ def __init__( block_width=None, nproc=1, ): - # column names of the data in which to look for outliers if cols is None: cols = ["x_resid", "y_resid", "phi_resid"] @@ -248,7 +247,6 @@ def __call__(self, reflections): # loop over the completed jobs for i, job in enumerate(jobs3): - iexp = job["id"] ipanel = job["panel"] nref = len(job["indices"]) @@ -299,7 +297,6 @@ def _run_job(self, job, i): msg = None if nref >= self._min_num_obs: - # get the subset of data as a list of columns cols = [data[col] for col in self._cols] @@ -477,7 +474,6 @@ def _run_job(self, job, i): class CentroidOutlierFactory: @staticmethod def from_parameters_and_colnames(params, colnames): - # id the relevant scope for the requested method method = params.outlier.algorithm if method == "null": @@ -539,7 +535,6 @@ def from_parameters_and_colnames(params, colnames): if __name__ == "__main__": - # test construction params = phil_scope.extract() params.outlier.algorithm = "tukey" diff --git a/src/dials/algorithms/refinement/outlier_detection/sauter_poon.py b/src/dials/algorithms/refinement/outlier_detection/sauter_poon.py index f9c8120335..5b315aaf57 100644 --- a/src/dials/algorithms/refinement/outlier_detection/sauter_poon.py +++ b/src/dials/algorithms/refinement/outlier_detection/sauter_poon.py @@ -20,7 +20,6 @@ def __init__( verbose=False, pdf=None, ): - # here the column names are fixed by the algorithm, so what's passed in is # ignored. CentroidOutlier.__init__( @@ -41,7 +40,6 @@ def __init__( return def _detect_outliers(self, cols): - # cols is guaranteed to be a list of three flex arrays, containing miller # indices, observed pixel coordinates and calculated pixel coordinates. # Copy the data into matches diff --git a/src/dials/algorithms/refinement/outlier_detection/tukey.py b/src/dials/algorithms/refinement/outlier_detection/tukey.py index 7c53a88a2d..11a1d4874d 100644 --- a/src/dials/algorithms/refinement/outlier_detection/tukey.py +++ b/src/dials/algorithms/refinement/outlier_detection/tukey.py @@ -20,7 +20,6 @@ def __init__( nproc=1, iqr_multiplier=1.5, ): - if cols is None: cols = ["x_resid", "y_resid", "phi_resid"] CentroidOutlier.__init__( @@ -39,7 +38,6 @@ def __init__( return def _detect_outliers(self, cols): - from scitbx.math import five_number_summary outliers = flex.bool(len(cols[0]), False) diff --git a/src/dials/algorithms/refinement/parameterisation/autoreduce.py b/src/dials/algorithms/refinement/parameterisation/autoreduce.py index cd9e0c064d..ed62e45d8d 100644 --- a/src/dials/algorithms/refinement/parameterisation/autoreduce.py +++ b/src/dials/algorithms/refinement/parameterisation/autoreduce.py @@ -2,13 +2,13 @@ import logging -logger = logging.getLogger(__name__) - from libtbx.phil import parse from scitbx.array_family import flex from dials.algorithms.refinement import DialsRefineConfigError +logger = logging.getLogger(__name__) + phil_str = """ min_nref_per_parameter = 5 .help = "the smallest number of reflections per parameter for a" @@ -43,10 +43,10 @@ """ phil_scope = parse(phil_str) + # A callback for PredictionParameterisation.get_gradients which will give # the positions of reflections associated with a particular parameter def id_associated_refs(result): - # There are usually 3 parts to results: gradients in X, Y and Z vals = list(result.values()) try: diff --git a/src/dials/algorithms/refinement/parameterisation/beam_parameters.py b/src/dials/algorithms/refinement/parameterisation/beam_parameters.py index ce1deeff3c..1f401b0d44 100644 --- a/src/dials/algorithms/refinement/parameterisation/beam_parameters.py +++ b/src/dials/algorithms/refinement/parameterisation/beam_parameters.py @@ -43,7 +43,6 @@ def _build_p_list(s0, goniometer, parameter_type=Parameter): @staticmethod def _compose_core(is0, ipn, mu1, mu2, nu, mu1_axis, mu2_axis): - # convert angles to radians mu1rad, mu2rad = mu1 / 1000.0, mu2 / 1000.0 @@ -127,7 +126,6 @@ def __init__(self, beam, goniometer=None, experiment_ids=None): return def compose(self): - # extract direction from the initial state ius0 = self._initial_state["unit_s0"] ipn = self._initial_state["polarization_normal"] @@ -153,7 +151,6 @@ def compose(self): return def get_state(self): - # only a single beam exists, so no multi_state_elt argument is allowed return matrix.col(self._model.get_s0()) @@ -188,7 +185,6 @@ def _build_p_list(unit_s0, goniometer, parameter_type=Parameter): @staticmethod def _compose_core(is0, ipn, mu1, mu2, mu1_axis, mu2_axis): - # convert angles to radians mu1rad, mu2rad = mu1 / 1000.0, mu2 / 1000.0 @@ -270,7 +266,6 @@ def __init__(self, beam, goniometer=None, experiment_ids=None): return def compose(self): - # extract direction from the initial state ius0 = self._initial_state["unit_s0"] ipn = self._initial_state["polarization_normal"] @@ -295,6 +290,5 @@ def compose(self): return def get_state(self): - # only a single beam exists, so no multi_state_elt argument is allowed return matrix.col(self._model.get_unit_s0()) diff --git a/src/dials/algorithms/refinement/parameterisation/configure.py b/src/dials/algorithms/refinement/parameterisation/configure.py index 91b1c46d84..0bc94967b5 100644 --- a/src/dials/algorithms/refinement/parameterisation/configure.py +++ b/src/dials/algorithms/refinement/parameterisation/configure.py @@ -320,7 +320,6 @@ def _filter_parameter_names(parameterisation): # Helper function to perform centroid analysis def _centroid_analysis(options, experiments, reflection_manager): - analysis = None if not options.scan_varying: return analysis diff --git a/src/dials/algorithms/refinement/parameterisation/crystal_parameters.py b/src/dials/algorithms/refinement/parameterisation/crystal_parameters.py index bafe5a5eff..ed295ab4e2 100644 --- a/src/dials/algorithms/refinement/parameterisation/crystal_parameters.py +++ b/src/dials/algorithms/refinement/parameterisation/crystal_parameters.py @@ -2,8 +2,6 @@ import logging -logger = logging.getLogger(__name__) - from rstbx.symmetry.constraints.parameter_reduction import symmetrize_reduce_enlarge from scitbx import matrix @@ -13,6 +11,8 @@ ) from dials.algorithms.refinement.refinement_helpers import CrystalOrientationCompose +logger = logging.getLogger(__name__) + class CrystalOrientationMixin: """Mix-in class defining some functionality unique to crystal orientation @@ -78,7 +78,6 @@ def __init__(self, crystal, experiment_ids=None): return def compose(self): - # Extract orientation from the initial state U0 = self._initial_state @@ -99,7 +98,6 @@ def compose(self): return def get_state(self): - # only a single crystal is parameterised here, so no multi_state_elt # argument is allowed return matrix.sqr(self._model.get_U()) @@ -126,7 +124,6 @@ def _build_p_list(self, crystal, parameter_type=Parameter): return p_list def _compose_core(self, raw_vals): - # obtain metrical matrix parameters on natural scale vals = [v * 1.0e-5 for v in raw_vals] @@ -137,7 +134,6 @@ def _compose_core(self, raw_vals): try: newB = matrix.sqr(S.backward_orientation(vals).reciprocal_matrix()) except RuntimeError as e: - # write original error to debug log logger.debug("Unable to compose the crystal model") logger.debug("Original error message: %s", str(e)) @@ -201,7 +197,6 @@ def __init__(self, crystal, experiment_ids=None): return def compose(self): - # calculate new B and derivatives newB, self._dstate_dp = self._compose_core([p.value for p in self._param]) @@ -211,13 +206,11 @@ def compose(self): return def get_state(self): - # only a single crystal is parameterised here, so no multi_state_elt # argument is allowed return matrix.sqr(self._model.get_B()) def set_state_uncertainties(self, var_cov, multi_state_elt=None): - self._model.set_B_covariance(var_cov) return diff --git a/src/dials/algorithms/refinement/parameterisation/detector_parameters.py b/src/dials/algorithms/refinement/parameterisation/detector_parameters.py index 420b10ddd3..b53d50ac23 100644 --- a/src/dials/algorithms/refinement/parameterisation/detector_parameters.py +++ b/src/dials/algorithms/refinement/parameterisation/detector_parameters.py @@ -72,7 +72,6 @@ def _init_core(detector, parameter_type=Parameter): return {"istate": istate, "p_list": p_list} def _compose_core(self, dist, shift1, shift2, tau1, tau2, tau3): - # extract items from the initial state id1 = self._initial_state["d1"] id2 = self._initial_state["d2"] @@ -351,7 +350,6 @@ def __init__(self, detector, experiment_ids=None): self.compose() def compose(self): - # extract parameters from the internal list dist, shift1, shift2, tau1, tau2, tau3 = self._param @@ -367,7 +365,6 @@ def compose(self): ) def get_state(self): - # only a single panel exists, so no multi_state_elt argument is allowed panel = (self._model)[0] return matrix.sqr(panel.get_d_matrix()) @@ -498,7 +495,6 @@ def __init__(self, detector, beam, experiment_ids=None): self.compose() def compose(self): - # extract parameters from the internal list dist, shift1, shift2, tau1, tau2, tau3 = self._param @@ -548,7 +544,6 @@ def compose(self): ] def get_state(self, multi_state_elt=0): - # There is only one detector, but the req. panel must be specified panel = (self._model)[multi_state_elt] return matrix.sqr(panel.get_d_matrix()) @@ -561,7 +556,6 @@ class PyDetectorParameterisationMultiPanel(DetectorParameterisationMultiPanel): the base class for more details""" def compose(self): - # extract items from the initial state id1 = self._initial_state["d1"] id2 = self._initial_state["d2"] @@ -621,7 +615,6 @@ def compose(self): # now update the panels with their new position and orientation. for p, dir1, dir2, org in zip(detector, dir1s, dir2s, origins): - p.set_frame(dir1, dir2, org) # calculate derivatives of the state wrt parameters @@ -732,7 +725,6 @@ def compose(self): for panel_id, (offset, dir1_new_basis, dir2_new_basis) in enumerate( zip(self._offsets, self._dir1s, self._dir2s) ): - # Panel origin: # o = dorg + offset[0] * d1 + offset[1] * d2 + offset[2] * dn @@ -955,7 +947,6 @@ def __init__(self, detector, experiment_ids=None, level=0): # loop over the groups, collecting initial parameters and states for igp, pnl_ids in enumerate(self._panel_ids_by_group): - panel_centres_in_lab_frame = [] for i in pnl_ids: pnl = detector[i] @@ -1087,7 +1078,6 @@ def get_param_panel_groups(self): return self._group_ids_by_parameter def compose(self): - # reset the list that holds derivatives for i in range(len(self._model)): self._multi_state_derivatives[i] = [None] * len(self._dstate_dp) @@ -1096,7 +1086,6 @@ def compose(self): # parameters param = iter(self._param) for igp, pnl_ids in enumerate(self._panel_ids_by_group): - # extract parameters from the internal list dist = next(param) shift1 = next(param) @@ -1139,10 +1128,9 @@ def compose(self): # Loop over attached Panel matrices, using the helper class to calculate # derivatives of the d matrix in each case and store them. i = igp * 6 - for (panel_id, offset, dir1_new_basis, dir2_new_basis) in zip( + for panel_id, offset, dir1_new_basis, dir2_new_basis in zip( pnl_ids, offsets, dir1s, dir2s ): - - self._multi_state_derivatives[panel_id][ - i : (i + 6) - ] = pgc.derivatives_for_panel(offset, dir1_new_basis, dir2_new_basis) + self._multi_state_derivatives[panel_id][i : (i + 6)] = ( + pgc.derivatives_for_panel(offset, dir1_new_basis, dir2_new_basis) + ) diff --git a/src/dials/algorithms/refinement/parameterisation/goniometer_parameters.py b/src/dials/algorithms/refinement/parameterisation/goniometer_parameters.py index 4c6114b3f2..8e6821f34f 100644 --- a/src/dials/algorithms/refinement/parameterisation/goniometer_parameters.py +++ b/src/dials/algorithms/refinement/parameterisation/goniometer_parameters.py @@ -41,7 +41,6 @@ def _build_p_list(axis, beam, parameter_type=Parameter): @staticmethod def _compose_core(iS, gamma1, gamma2, gamma1_axis, gamma2_axis): - # convert angles to radians g1rad, g2rad = gamma1 / 1000.0, gamma2 / 1000.0 @@ -115,7 +114,6 @@ def __init__(self, goniometer, beam=None, experiment_ids=None): return def compose(self): - # extract setting matrix from the initial state iS = self._initial_state @@ -137,7 +135,6 @@ def compose(self): return def get_state(self): - # only a single setting matrix exists, so no multi_state_elt argument is # allowed diff --git a/src/dials/algorithms/refinement/parameterisation/model_parameters.py b/src/dials/algorithms/refinement/parameterisation/model_parameters.py index 36a7d2efa0..4a474cb89b 100644 --- a/src/dials/algorithms/refinement/parameterisation/model_parameters.py +++ b/src/dials/algorithms/refinement/parameterisation/model_parameters.py @@ -198,7 +198,6 @@ def get_params(self, only_free=True): """ if only_free: - return [x for x in self._param if not x.get_fixed()] else: @@ -214,7 +213,6 @@ def get_param_vals(self, only_free=True): """ if only_free: - return [x.value for x in self._param if not x.get_fixed()] else: diff --git a/src/dials/algorithms/refinement/parameterisation/parameter_report.py b/src/dials/algorithms/refinement/parameterisation/parameter_report.py index 03efbeb2b7..40de549198 100644 --- a/src/dials/algorithms/refinement/parameterisation/parameter_report.py +++ b/src/dials/algorithms/refinement/parameterisation/parameter_report.py @@ -25,7 +25,6 @@ def __init__( xl_unit_cell_parameterisations=None, goniometer_parameterisations=None, ): - if detector_parameterisations is None: detector_parameterisations = [] if beam_parameterisations is None: @@ -47,7 +46,6 @@ def __init__( self._length = self._len() def _len(self): - length = 0 for model in self._detector_parameterisations: length += model.num_free() @@ -69,7 +67,6 @@ def _indent(self, string): return "\n".join(" " + e for e in str(string).split("\n")) def __str__(self): - s = "Parameter Report:\n" if self._detector_parameterisations: s += "Detector parameters:\n" @@ -172,7 +169,6 @@ class TableColumn: """Bucket to store data to be used for constructing tables to print.""" def __init__(self, title, values): - self._title = title self._values = values diff --git a/src/dials/algorithms/refinement/parameterisation/prediction_parameters.py b/src/dials/algorithms/refinement/parameterisation/prediction_parameters.py index f85be0ed11..99c10504eb 100644 --- a/src/dials/algorithms/refinement/parameterisation/prediction_parameters.py +++ b/src/dials/algorithms/refinement/parameterisation/prediction_parameters.py @@ -64,7 +64,6 @@ def __init__( xl_unit_cell_parameterisations=None, goniometer_parameterisations=None, ): - if detector_parameterisations is None: detector_parameterisations = [] if beam_parameterisations is None: @@ -147,7 +146,6 @@ def get_goniometer_parameterisations(self): return self._goniometer_parameterisations def _len(self): - length = 0 for model in self._detector_parameterisations: length += model.num_free() @@ -328,7 +326,6 @@ def get_gradients(self, reflections, callback=None): # Populate values in these arrays for iexp, exp in enumerate(self._experiments): - sel = reflections["id"] == iexp isel = sel.iselection() self._experiment_to_idx.append(isel) @@ -450,7 +447,6 @@ def _detector_derivatives( D = self._D.select(isel) if dd_ddet_p is None: - # get the derivatives of detector d matrix for this panel dd_ddet_p = parameterisation.get_ds_dp( multi_state_elt=panel_id, use_none_as_null=True @@ -497,7 +493,6 @@ def _grads_detector_loop(self, reflections, results, callback=None): # loop over the detector parameterisations for dp in self._detector_parameterisations: - # Determine (sub)set of reflections affected by this parameterisation isel = flex.size_t() for exp_id in dp.get_experiment_ids(): @@ -516,7 +511,6 @@ def _grads_detector_loop(self, reflections, results, callback=None): # loop through the panels in this detector for panel_id, _ in enumerate(detector): - # get the right subset of array indices to set for this panel sub_isel = isel.select(panel == panel_id) if len(sub_isel) == 0: @@ -578,7 +572,6 @@ def _grads_model_loop( ): # loop over the parameterisations for p in parameterisations: - # Determine (sub)set of reflections affected by this parameterisation isel = flex.size_t() for exp_id in p.get_experiment_ids(): @@ -715,7 +708,6 @@ def _extend_gradient_vectors(results, m, n, keys=("dX_dp", "dY_dp", "dZ_dp")): class XYPhiPredictionParameterisation(PredictionParameterisation): - _grad_names = ("dX_dp", "dY_dp", "dphi_dp") def _local_setup(self, reflections): @@ -791,7 +783,6 @@ def _beam_derivatives( D = self._D.select(isel) if ds0_dbeam_p is None: - # get the derivatives of the beam vector wrt the parameters ds0_dbeam_p = parameterisation.get_ds_dp(use_none_as_null=True) @@ -805,7 +796,6 @@ def _beam_derivatives( # loop through the parameters for der in ds0_dbeam_p: - if der is None: dphi_dp.append(None) dpv_dp.append(None) @@ -908,7 +898,6 @@ def _goniometer_derivatives( D = self._D.select(isel) if dS_dgon_p is None: - # get derivatives of the setting matrix S wrt the parameters dS_dgon_p = [ None if der is None else flex.mat3_double(len(isel), der.elems) @@ -920,7 +909,6 @@ def _goniometer_derivatives( # loop through the parameters for der in dS_dgon_p: - if der is None: dphi_dp.append(None) dpv_dp.append(None) @@ -1006,7 +994,6 @@ def _beam_derivatives( D = self._D.select(isel) if ds0_dbeam_p is None: - # get the derivatives of the beam vector wrt the parameters ds0_dbeam_p = parameterisation.get_ds_dp(use_none_as_null=True) @@ -1021,7 +1008,6 @@ def _beam_derivatives( # loop through the parameters for der in ds0_dbeam_p: - if der is None: dphi_dp.append(None) dpv_dp.append(None) diff --git a/src/dials/algorithms/refinement/parameterisation/prediction_parameters_stills.py b/src/dials/algorithms/refinement/parameterisation/prediction_parameters_stills.py index 410a06fdc9..2753a31362 100644 --- a/src/dials/algorithms/refinement/parameterisation/prediction_parameters_stills.py +++ b/src/dials/algorithms/refinement/parameterisation/prediction_parameters_stills.py @@ -79,7 +79,6 @@ def _beam_derivatives(self, isel, parameterisation=None, reflections=None): # loop through the parameters for der in ds0_dbeam_p: - if der is None: dpv_dp.append(None) dDeltaPsi_dp.append(None) @@ -144,7 +143,6 @@ def _xl_orientation_derivatives( # loop through the parameters for der in dU_dxlo_p: - if der is None: dpv_dp.append(None) dDeltaPsi_dp.append(None) @@ -213,7 +211,6 @@ def _xl_unit_cell_derivatives(self, isel, parameterisation=None, reflections=Non # loop through the parameters for der in dB_dxluc_p: - if der is None: dpv_dp.append(None) dDeltaPsi_dp.append(None) @@ -359,7 +356,6 @@ def _beam_derivatives(self, isel, parameterisation=None, reflections=None): # loop through the parameters for der in ds0_dbeam_p: - if der is None: dpv_dp.append(None) dDeltaPsi_dp.append(None) @@ -416,7 +412,6 @@ def _xl_orientation_derivatives( # loop through the parameters for der in dU_dxlo_p: - if der is None: dpv_dp.append(None) dDeltaPsi_dp.append(None) @@ -471,7 +466,6 @@ def _xl_unit_cell_derivatives(self, isel, parameterisation=None, reflections=Non # loop through the parameters for der in dB_dxluc_p: - if der is None: dpv_dp.append(None) dDeltaPsi_dp.append(None) diff --git a/src/dials/algorithms/refinement/parameterisation/scan_varying_beam_parameters.py b/src/dials/algorithms/refinement/parameterisation/scan_varying_beam_parameters.py index 2ace88dda6..637e1d8ba3 100644 --- a/src/dials/algorithms/refinement/parameterisation/scan_varying_beam_parameters.py +++ b/src/dials/algorithms/refinement/parameterisation/scan_varying_beam_parameters.py @@ -17,7 +17,6 @@ class ScanVaryingBeamParameterisation(ScanVaryingModelParameterisation, BeamMixi def __init__( self, beam, t_range, num_intervals, goniometer=None, experiment_ids=None ): - if experiment_ids is None: experiment_ids = [0] @@ -87,13 +86,13 @@ def compose(self, t): # calculate derivatives of state wrt underlying smoother parameters ds0_dp1 = [None] * dmu1_dp.size - for (i, v) in dmu1_dp: + for i, v in dmu1_dp: ds0_dp1[i] = ds0_dval[0] * v ds0_dp2 = [None] * dmu2_dp.size - for (i, v) in dmu2_dp: + for i, v in dmu2_dp: ds0_dp2[i] = ds0_dval[1] * v ds0_dp3 = [None] * dnu_dp.size - for (i, v) in dnu_dp: + for i, v in dnu_dp: ds0_dp3[i] = ds0_dval[2] * v # store derivatives as list-of-lists diff --git a/src/dials/algorithms/refinement/parameterisation/scan_varying_crystal_parameters.py b/src/dials/algorithms/refinement/parameterisation/scan_varying_crystal_parameters.py index 6311a6e6ab..6451968a1a 100644 --- a/src/dials/algorithms/refinement/parameterisation/scan_varying_crystal_parameters.py +++ b/src/dials/algorithms/refinement/parameterisation/scan_varying_crystal_parameters.py @@ -90,13 +90,13 @@ def compose(self, t): # calculate derivatives of state wrt underlying parameters dU_dp1 = [None] * dphi1_dp.size - for (i, v) in dphi1_dp: + for i, v in dphi1_dp: dU_dp1[i] = dU_dphi1 * v dU_dp2 = [None] * dphi2_dp.size - for (i, v) in dphi2_dp: + for i, v in dphi2_dp: dU_dp2[i] = dU_dphi2 * v dU_dp3 = [None] * dphi3_dp.size - for (i, v) in dphi3_dp: + for i, v in dphi3_dp: dU_dp3[i] = dU_dphi3 * v # store derivatives as list-of-lists @@ -126,7 +126,6 @@ def __init__( experiment_ids=None, set_state_uncertainties=False, ): - self._set_state_uncertainties = set_state_uncertainties from scitbx import matrix diff --git a/src/dials/algorithms/refinement/parameterisation/scan_varying_detector_parameters.py b/src/dials/algorithms/refinement/parameterisation/scan_varying_detector_parameters.py index fa3918b03e..6d5e986fba 100644 --- a/src/dials/algorithms/refinement/parameterisation/scan_varying_detector_parameters.py +++ b/src/dials/algorithms/refinement/parameterisation/scan_varying_detector_parameters.py @@ -24,7 +24,6 @@ class ScanVaryingDetectorParameterisationSinglePanel( angles expressed in mrad""" def __init__(self, detector, t_range, num_intervals, experiment_ids=None): - if experiment_ids is None: experiment_ids = [0] @@ -103,22 +102,22 @@ def compose(self, t): # calculate derivatives of state wrt underlying smoother parameters dd_dp1 = [None] * ddist_dp.size - for (i, v) in ddist_dp: + for i, v in ddist_dp: dd_dp1[i] = dd_dval[0] * v dd_dp2 = [None] * dshift1_dp.size - for (i, v) in dshift1_dp: + for i, v in dshift1_dp: dd_dp2[i] = dd_dval[1] * v dd_dp3 = [None] * dshift2_dp.size - for (i, v) in dshift2_dp: + for i, v in dshift2_dp: dd_dp3[i] = dd_dval[2] * v dd_dp4 = [None] * dtau1_dp.size - for (i, v) in dtau1_dp: + for i, v in dtau1_dp: dd_dp4[i] = dd_dval[3] * v dd_dp5 = [None] * dtau2_dp.size - for (i, v) in dtau2_dp: + for i, v in dtau2_dp: dd_dp5[i] = dd_dval[4] * v dd_dp6 = [None] * dtau3_dp.size - for (i, v) in dtau3_dp: + for i, v in dtau3_dp: dd_dp6[i] = dd_dval[5] * v # store derivatives as list-of-lists diff --git a/src/dials/algorithms/refinement/parameterisation/scan_varying_goniometer_parameters.py b/src/dials/algorithms/refinement/parameterisation/scan_varying_goniometer_parameters.py index 97b796c93e..4379c556d7 100644 --- a/src/dials/algorithms/refinement/parameterisation/scan_varying_goniometer_parameters.py +++ b/src/dials/algorithms/refinement/parameterisation/scan_varying_goniometer_parameters.py @@ -21,7 +21,6 @@ class ScanVaryingGoniometerParameterisation( def __init__( self, goniometer, t_range, num_intervals, beam=None, experiment_ids=None ): - if experiment_ids is None: experiment_ids = [0] @@ -92,10 +91,10 @@ def compose(self, t): # calculate derivatives of state wrt underlying smoother parameters dS_dp1 = [None] * dgamma1_dp.size - for (i, v) in dgamma1_dp: + for i, v in dgamma1_dp: dS_dp1[i] = dS_dval[0] * v dS_dp2 = [None] * dgamma2_dp.size - for (i, v) in dgamma2_dp: + for i, v in dgamma2_dp: dS_dp2[i] = dS_dval[1] * v # store derivatives as list-of-lists diff --git a/src/dials/algorithms/refinement/parameterisation/scan_varying_model_parameters.py b/src/dials/algorithms/refinement/parameterisation/scan_varying_model_parameters.py index 9e17462e9f..6febf98040 100644 --- a/src/dials/algorithms/refinement/parameterisation/scan_varying_model_parameters.py +++ b/src/dials/algorithms/refinement/parameterisation/scan_varying_model_parameters.py @@ -43,7 +43,6 @@ def __init__( ptype=None, name="ScanVaryingParameterSet", ): - assert num_samples >= 2 # otherwise use scan-independent parameterisation value = [value] * num_samples @@ -77,7 +76,6 @@ def name_stem(self): return self._name_stem def __str__(self): - msg = "ScanVaryingParameterSet " + self.name_stem + ":\n" try: msg += " Type: " + self.param_type + "\n" @@ -137,7 +135,6 @@ def __init__( experiment_ids, is_multi_state=False, ): - ModelParameterisation.__init__( self, model, initial_state, param_sets, experiment_ids, is_multi_state ) diff --git a/src/dials/algorithms/refinement/parameterisation/scan_varying_prediction_parameters.py b/src/dials/algorithms/refinement/parameterisation/scan_varying_prediction_parameters.py index 1608111f5a..4b4d8bfc6f 100644 --- a/src/dials/algorithms/refinement/parameterisation/scan_varying_prediction_parameters.py +++ b/src/dials/algorithms/refinement/parameterisation/scan_varying_prediction_parameters.py @@ -30,7 +30,6 @@ class SparseFlex: structural zeroes.""" def __init__(self, dimension, elements, indices): - if len(elements) != len(indices): raise ValueError( "The arrays of elements and indices must be of equal length" @@ -40,7 +39,6 @@ def __init__(self, dimension, elements, indices): self._indices = indices def select(self, indices): - try: indices = indices.iselection() except AttributeError: @@ -96,25 +94,21 @@ def _extract_explicit_data(self, other): return other def __mul__(self, other): - other = self._extract_explicit_data(other) return SparseFlex(self._size, self._data * other, self._indices) def __rmul__(self, other): - other = self._extract_explicit_data(other) return SparseFlex(self._size, other * self._data, self._indices) def __truediv__(self, other): - other = self._extract_explicit_data(other) return SparseFlex(self._size, self._data / other, self._indices) def __add__(self, other): - if not isinstance(other, SparseFlex): raise TypeError("Addition is only defined between two SparseFlex arrays") @@ -123,7 +117,6 @@ def __add__(self, other): return SparseFlex(self._size, self._data + other, self._indices) def __sub__(self, other): - if not isinstance(other, SparseFlex): raise TypeError("Subtraction is only defined between two SparseFlex arrays") @@ -132,13 +125,11 @@ def __sub__(self, other): return SparseFlex(self._size, self._data - other, self._indices) def dot(self, other): - other = self._extract_explicit_data(other) return SparseFlex(self._size, self._data.dot(other), self._indices) def rotate_around_origin(self, direction, angle): - angle = self._extract_explicit_data(angle) direction = self._extract_explicit_data(direction) return SparseFlex( @@ -146,7 +137,6 @@ def rotate_around_origin(self, direction, angle): ) def parts(self): - x, y, z = self._data.parts() return ( SparseFlex(self._size, x, self._indices), @@ -161,7 +151,6 @@ class StateDerivativeCache: by that derivative""" def __init__(self, parameterisations=None): - if parameterisations is None: parameterisations = [] self._cache = dict.fromkeys(parameterisations) @@ -200,7 +189,6 @@ def build_gradients(self, parameterisation, isel=None, imatch=None): # Loop over the data for each parameter for p_data in entry: - # Reconstitute full array from the cache and pack into a SparseFlex total_nelem = sum(pair.iselection.size() for pair in p_data) recon = build(total_nelem) @@ -264,7 +252,6 @@ def __init__( xl_unit_cell_parameterisations=None, goniometer_parameterisations=None, ): - if detector_parameterisations is None: detector_parameterisations = [] if beam_parameterisations is None: @@ -430,7 +417,6 @@ def compose(self, reflections, skip_derivatives=False): self._prepare_for_compose(reflections, skip_derivatives) for iexp, exp in enumerate(self._experiments): - # select the reflections of interest sel = reflections["id"] == iexp isel = sel.iselection() @@ -453,7 +439,6 @@ def compose(self, reflections, skip_derivatives=False): # get state and derivatives for each block for block in range(flex.min(blocks), flex.max(blocks) + 1): - # determine the subset of reflections this affects subsel = isel.select(blocks == block) if len(subsel) == 0: @@ -497,10 +482,8 @@ def compose(self, reflections, skip_derivatives=False): # set states and derivatives for this detector if dp is not None: # detector is parameterised if dp.is_multi_state(): # parameterised detector is multi panel - # loop through the panels in this detector for panel_id, _ in enumerate(exp.detector): - # get the right subset of array indices to set for this panel subsel2 = subsel.select(panels == panel_id) if len(subsel2) == 0: @@ -543,7 +526,6 @@ def compose(self, reflections, skip_derivatives=False): else: # set states for unparameterised detector (dp is None) # loop through the panels in this detector for panel_id, _ in enumerate(exp.detector): - # get the right subset of array indices to set for this panel subsel2 = subsel.select(panels == panel_id) if len(subsel2) == 0: diff --git a/src/dials/algorithms/refinement/prediction/managed_predictors.py b/src/dials/algorithms/refinement/prediction/managed_predictors.py index a4f5ca6e35..215a56e542 100644 --- a/src/dials/algorithms/refinement/prediction/managed_predictors.py +++ b/src/dials/algorithms/refinement/prediction/managed_predictors.py @@ -6,7 +6,6 @@ the naive assumption that the relp is already in reflecting position """ - from __future__ import annotations from math import pi @@ -81,7 +80,6 @@ def __call__(self, reflections): """Predict for all reflections at the current model geometry""" for iexp, e in enumerate(self._experiments): - # select the reflections for this experiment only sel = reflections["id"] == iexp refs = reflections.select(sel) @@ -97,7 +95,6 @@ def __call__(self, reflections): return reflections def _predict_one_experiment(self, experiment, reflections): - raise NotImplementedError() def _post_predict_one_experiment(self, experiment, reflections): @@ -112,7 +109,6 @@ def _post_prediction(self, reflections): class ScansExperimentsPredictor(ExperimentsPredictor): def _predict_one_experiment(self, experiment, reflections): - # scan-varying if "ub_matrix" in reflections: predictor = sv(experiment) @@ -128,7 +124,6 @@ def _predict_one_experiment(self, experiment, reflections): predictor.for_reflection_table(reflections, UB) def _post_prediction(self, reflections): - if "xyzobs.mm.value" in reflections: reflections = self._match_full_turns(reflections) @@ -163,11 +158,9 @@ def _match_full_turns(self, reflections): class StillsExperimentsPredictor(ExperimentsPredictor): - spherical_relp_model = False def _predict_one_experiment(self, experiment, reflections): - predictor = st(experiment, spherical_relp=self.spherical_relp_model) UB = experiment.crystal.get_A() predictor.for_reflection_table(reflections, UB) @@ -175,7 +168,6 @@ def _predict_one_experiment(self, experiment, reflections): class LaueExperimentsPredictor(ExperimentsPredictor): def _predict_one_experiment(self, experiment, reflections): - min_s0_idx = min( range(len(reflections["wavelength"])), key=reflections["wavelength"].__getitem__, @@ -196,7 +188,6 @@ def _predict_one_experiment(self, experiment, reflections): class TOFExperimentsPredictor(LaueExperimentsPredictor): def _post_predict_one_experiment(self, experiment, reflections): - # Add ToF to xyzcal.mm wavelength_cal = reflections["wavelength_cal"] distance = experiment.beam.get_sample_to_source_distance() * 10**-3 @@ -222,7 +213,6 @@ def _post_predict_one_experiment(self, experiment, reflections): class ExperimentsPredictorFactory: @staticmethod def from_experiments(experiments, force_stills=False, spherical_relp=False): - # Determine whether or not to use a stills predictor if not force_stills: for exp in experiments: @@ -236,7 +226,6 @@ def from_experiments(experiments, force_stills=False, spherical_relp=False): predictor.spherical_relp_model = spherical_relp else: - all_tof_experiments = False for expt in experiments: if expt.scan is not None and expt.scan.has_property("time_of_flight"): diff --git a/src/dials/algorithms/refinement/refinement_helpers.py b/src/dials/algorithms/refinement/refinement_helpers.py index 51ea4b7b35..696c936335 100644 --- a/src/dials/algorithms/refinement/refinement_helpers.py +++ b/src/dials/algorithms/refinement/refinement_helpers.py @@ -1,6 +1,5 @@ """Auxiliary functions for the refinement package""" - from __future__ import annotations import logging @@ -182,7 +181,6 @@ def get_fd_gradients(mp, deltas, multi_state_elt=None): fd_grad = [] for i in range(len(deltas)): - val = p_vals[i] p_vals[i] -= deltas[i] / 2.0 diff --git a/src/dials/algorithms/refinement/refiner.py b/src/dials/algorithms/refinement/refiner.py index 5bb05ef715..275520e500 100644 --- a/src/dials/algorithms/refinement/refiner.py +++ b/src/dials/algorithms/refinement/refiner.py @@ -1,7 +1,6 @@ """Refiner is the refinement module public interface. RefinerFactory is what should usually be used to construct a Refiner.""" - from __future__ import annotations import copy @@ -172,7 +171,6 @@ def _trim_scans_to_observations(experiments, reflections): shoebox = None for iexp, exp in enumerate(experiments): - sel = reflections["id"] == iexp isel = sel.iselection() if obs_z is not None: @@ -263,7 +261,6 @@ def _filter_reflections(reflections): @classmethod def from_parameters_data_experiments(cls, params, reflections, experiments): - # copy the experiments experiments = _copy_experiments_for_refining(experiments) @@ -283,7 +280,6 @@ def from_parameters_data_experiments(cls, params, reflections, experiments): @classmethod def reflections_after_outlier_rejection(cls, params, reflections, experiments): - # copy the experiments experiments = _copy_experiments_for_refining(experiments) @@ -298,7 +294,6 @@ def reflections_after_outlier_rejection(cls, params, reflections, experiments): @classmethod def _build_reflection_manager_and_predictor(cls, params, reflections, experiments): - # Currently a refinement job can only have one parameterisation of the # prediction equation. This can either be of the XYDelPsi (stills) type, the # XYPhi (scans) type or the scan-varying XYPhi type with a varying crystal @@ -681,7 +676,6 @@ def config_target( do_stills, do_sparse, ): - target = TargetFactory.from_parameters_and_experiments( params, experiments, @@ -806,14 +800,13 @@ def get_parameter_correlation_matrix(self, step, col_select=None): return None, None for k, corrmat in corrmats.items(): - assert corrmat.is_square_matrix() idx = flex.bool(sel).iselection() sub_corrmat = flex.double(flex.grid(num_cols, num_cols)) - for (i, x) in enumerate(idx): - for (j, y) in enumerate(idx): + for i, x in enumerate(idx): + for j, y in enumerate(idx): sub_corrmat[i, j] = corrmat[x, y] corrmats[k] = sub_corrmat @@ -832,7 +825,7 @@ def print_step_table(self): rmsd_multipliers = [] header = ["Step", "Nref"] - for (name, units) in zip(self._target.rmsd_names, self._target.rmsd_units): + for name, units in zip(self._target.rmsd_names, self._target.rmsd_units): if units == "mm": header.append(name + "\n(mm)") rmsd_multipliers.append(1.0) @@ -875,7 +868,7 @@ def print_out_of_sample_rmsd_table(self): rmsd_multipliers = [] header = ["Step", "Nref"] - for (name, units) in zip(self._target.rmsd_names, self._target.rmsd_units): + for name, units in zip(self._target.rmsd_names, self._target.rmsd_units): if units == "mm": header.append(name + "\n(mm)") rmsd_multipliers.append(1.0) @@ -904,7 +897,7 @@ def calc_exp_rmsd_table(self): return self._exp_rmsd_table_data header = ["Exp\nid", "Nref"] - for (name, units) in zip(self._target.rmsd_names, self._target.rmsd_units): + for name, units in zip(self._target.rmsd_names, self._target.rmsd_units): if name == "RMSD_X" or name == "RMSD_Y" and units == "mm": header.append(name + "\n(px)") elif name == "RMSD_Phi" and units == "rad": @@ -945,7 +938,7 @@ def calc_exp_rmsd_table(self): continue # skip experiments where rmsd cannot be calculated num = self._target.get_num_matches_for_experiment(iexp) rmsds = [] - for (name, units, rmsd) in zip( + for name, units, rmsd in zip( self._target.rmsd_names, self._target.rmsd_units, raw_rmsds ): if name == "RMSD_X" and units == "mm": @@ -995,7 +988,7 @@ def print_panel_rmsd_table(self): logger.info("\nDetector %s RMSDs by panel:", idetector + 1) header = ["Panel\nid", "Nref"] - for (name, units) in zip(self._target.rmsd_names, self._target.rmsd_units): + for name, units in zip(self._target.rmsd_names, self._target.rmsd_units): if name == "RMSD_X" or name == "RMSD_Y" and units == "mm": header.append(name + "\n(px)") elif ( @@ -1013,7 +1006,6 @@ def print_panel_rmsd_table(self): rows = [] for ipanel, panel in enumerate(detector): - px_size = panel.get_pixel_size() px_per_mm = [1.0 / e for e in px_size] num = self._target.get_num_matches_for_panel(ipanel) @@ -1023,7 +1015,7 @@ def print_panel_rmsd_table(self): if raw_rmsds is None: continue # skip panels where rmsd cannot be calculated rmsds = [] - for (name, units, rmsd) in zip( + for name, units, rmsd in zip( self._target.rmsd_names, self._target.rmsd_units, raw_rmsds ): if name == "RMSD_X" and units == "mm": @@ -1174,7 +1166,6 @@ def _update_models(self): # Calculate scan-varying errors if requested if self._pred_param.set_scan_varying_errors: - # get state covariance matrices the whole range of images. We select # the first element of this at each image because crystal scan-varying # parameterisations are not multi-state diff --git a/src/dials/algorithms/refinement/reflection_manager.py b/src/dials/algorithms/refinement/reflection_manager.py index a90ecd36e8..7603ece79a 100644 --- a/src/dials/algorithms/refinement/reflection_manager.py +++ b/src/dials/algorithms/refinement/reflection_manager.py @@ -124,7 +124,6 @@ class BlockCalculator: the centre of the block""" def __init__(self, experiments, reflections): - self._experiments = experiments self._reflections = reflections @@ -150,7 +149,6 @@ def per_width(self, width, deg=True): phi_obs = self._reflections["xyzobs.mm.value"].parts()[2] for iexp, exp in enumerate(self._experiments): - sel = self._reflections["id"] == iexp isel = sel.iselection() exp_phi = phi_obs.select(isel) @@ -192,7 +190,6 @@ def per_image(self): phi_obs = self._reflections["xyzobs.mm.value"].parts()[2] for iexp, exp in enumerate(self._experiments): - sel = self._reflections["id"] == iexp isel = sel.iselection() exp_phi = phi_obs.select(isel) @@ -255,7 +252,6 @@ def stills_manager( reflections: flex.reflection_table, params: libtbx.phil.scope_extract, ) -> StillsReflectionManager: - refman = StillsReflectionManager ## Outlier detection @@ -314,7 +310,6 @@ def rotation_scan_manager( reflections: flex.reflection_table, params: libtbx.phil.scope_extract, ) -> ReflectionManager: - refman = ReflectionManager ## Outlier detection @@ -371,7 +366,6 @@ def laue_manager( reflections: flex.reflection_table, params: libtbx.phil.scope_extract, ) -> LaueReflectionManager: - all_tof_experiments = False for expt in experiments: if expt.scan is not None and expt.scan.has_property("time_of_flight"): @@ -444,8 +438,10 @@ def laue_manager( @staticmethod def get_weighting_strategy_override( params: libtbx.phil.scope_extract, - ) -> weighting_strategies.StatisticalWeightingStrategy | weighting_strategies.ConstantWeightingStrategy: - + ) -> ( + weighting_strategies.StatisticalWeightingStrategy + | weighting_strategies.ConstantWeightingStrategy + ): if params.weighting_strategy.override == "statistical": return weighting_strategies.StatisticalWeightingStrategy() @@ -504,7 +500,6 @@ def __init__( outlier_detector=None, weighting_strategy_override=None, ): - if len(reflections) == 0: raise ValueError("Empty reflections table provided to ReflectionManager") @@ -758,7 +753,6 @@ def _create_working_set(self): working_isel = flex.size_t() for iexp, exp in enumerate(self._experiments): - sel = self._reflections["id"] == iexp isel = sel.iselection() # refs = self._reflections.select(sel) @@ -974,7 +968,6 @@ def print_stats_on_matches(self): class LaueReflectionManager(ReflectionManager): - _weighting_strategy = weighting_strategies.LaueStatisticalWeightingStrategy() experiment_type = "laue" @@ -991,7 +984,6 @@ def __init__( weighting_strategy_override=None, wavelength_weight=1e7, ): - if len(reflections) == 0: raise ValueError("Empty reflections table provided to ReflectionManager") @@ -1159,7 +1151,6 @@ def __init__( weighting_strategy_override=None, wavelength_weight=1e7, ): - super().__init__( reflections=reflections, experiments=experiments, diff --git a/src/dials/algorithms/refinement/restraints/restraints_helpers.h b/src/dials/algorithms/refinement/restraints/restraints_helpers.h index fc72ee75d0..087ea96441 100644 --- a/src/dials/algorithms/refinement/restraints/restraints_helpers.h +++ b/src/dials/algorithms/refinement/restraints/restraints_helpers.h @@ -12,7 +12,7 @@ #define DIALS_REFINEMENT_RESTRAINTS_HELPERS_H #ifndef RAD2DEG -#define RAD2DEG(x) ((x)*57.29577951308232087721) +#define RAD2DEG(x) ((x) * 57.29577951308232087721) #endif #include diff --git a/src/dials/algorithms/refinement/restraints/restraints_parameterisation.py b/src/dials/algorithms/refinement/restraints/restraints_parameterisation.py index 5ed7a88d52..f463cfedd4 100644 --- a/src/dials/algorithms/refinement/restraints/restraints_parameterisation.py +++ b/src/dials/algorithms/refinement/restraints/restraints_parameterisation.py @@ -86,7 +86,6 @@ def __init__( xl_unit_cell_parameterisations=None, goniometer_parameterisations=None, ): - if detector_parameterisations is None: detector_parameterisations = [] if beam_parameterisations is None: @@ -163,7 +162,6 @@ def add_restraints_to_target_xl_unit_cell(self, experiment_id, values, sigma): self._param_to_restraint.add(param_i.parameterisation) def add_restraints_to_group_xl_unit_cell(self, target, experiment_ids, sigma): - # select the right parameterisations, if they exist if experiment_ids == "all": param_indices = list(self._exp_to_xluc_param.values()) diff --git a/src/dials/algorithms/refinement/rtmats.h b/src/dials/algorithms/refinement/rtmats.h index 448392982b..cdf921a5e8 100644 --- a/src/dials/algorithms/refinement/rtmats.h +++ b/src/dials/algorithms/refinement/rtmats.h @@ -12,7 +12,7 @@ #define DIALS_REFINEMENT_RTMATS_H #ifndef DEG2RAD -#define DEG2RAD(x) ((x)*0.01745329251994329575) +#define DEG2RAD(x) ((x) * 0.01745329251994329575) #endif #include diff --git a/src/dials/algorithms/refinement/sparse_engine.py b/src/dials/algorithms/refinement/sparse_engine.py index 45652ae8f7..5fee37c16e 100644 --- a/src/dials/algorithms/refinement/sparse_engine.py +++ b/src/dials/algorithms/refinement/sparse_engine.py @@ -16,11 +16,13 @@ from dials.algorithms.refinement import DialsRefineConfigError from dials.algorithms.refinement.engine import AdaptLstbx as AdaptLstbxBase -from dials.algorithms.refinement.engine import DisableMPmixin +from dials.algorithms.refinement.engine import ( + DisableMPmixin, + LevenbergMarquardtIterations, +) from dials.algorithms.refinement.engine import ( GaussNewtonIterations as GaussNewtonIterationsBase, ) -from dials.algorithms.refinement.engine import LevenbergMarquardtIterations try: from scitbx.examples.bevington import non_linear_ls_eigen_wrapper @@ -47,7 +49,6 @@ def __init__( tracking=None, max_iterations=None, ): - AdaptLstbxBase.__init__( self, target, @@ -74,7 +75,6 @@ def __init__( max_iterations=20, **kwds, ): - AdaptLstbxSparse.__init__( self, target, diff --git a/src/dials/algorithms/refinement/target.py b/src/dials/algorithms/refinement/target.py index 3ecb1d2758..6c4f2687df 100644 --- a/src/dials/algorithms/refinement/target.py +++ b/src/dials/algorithms/refinement/target.py @@ -1,7 +1,6 @@ """Contains classes used to construct a target function for refinement, principally Target and ReflectionManager.""" - from __future__ import annotations import math @@ -156,7 +155,6 @@ def __init__( restraints_parameterisation=None, gradient_calculation_blocksize=None, ): - self._experiments = experiments self._reflection_predictor = predictor self._reflection_manager = reflection_manager @@ -399,7 +397,8 @@ def split_matches_into_blocks(self, nproc: int = 1): """Return a list of the matches, split into blocks according to the gradient_calculation_blocksize parameter and the number of processes (if relevant). The number of blocks will be set such that the total number of reflections - being processed by concurrent processes does not exceed gradient_calculation_blocksize""" + being processed by concurrent processes does not exceed gradient_calculation_blocksize + """ self.update_matches() @@ -584,7 +583,6 @@ def __init__( absolute_cutoffs=None, gradient_calculation_blocksize=None, ): - Target.__init__( self, experiments, @@ -619,7 +617,6 @@ def __init__( @staticmethod def _extract_residuals_and_weights(matches) -> Tuple[flex.double, Any]: - # return residuals and weights as 1d flex.double vectors residuals = flex.double.concatenate(matches["x_resid"], matches["y_resid"]) residuals.extend(matches["phi_resid"]) @@ -632,7 +629,6 @@ def _extract_residuals_and_weights(matches) -> Tuple[flex.double, Any]: @staticmethod def _extract_squared_residuals(matches): - residuals2 = flex.double.concatenate(matches["x_resid2"], matches["y_resid2"]) residuals2.extend(matches["phi_resid2"]) @@ -719,7 +715,6 @@ class LeastSquaresPositionalResidualWithRmsdCutoffSparse( class LaueLeastSquaresResidualWithRmsdCutoff(Target): - """A Laue implementation of the target class providing a least squares residual in terms of detector impact position X, Y, and observed wavelength""" @@ -739,7 +734,6 @@ def __init__( absolute_cutoffs: Optional[list] = None, gradient_calculation_blocksize=None, ): - Target.__init__( self, experiments, @@ -776,7 +770,6 @@ def __init__( @staticmethod def _extract_residuals_and_weights(matches): - # return residuals and weights as 1d flex.double vectors residuals = flex.double.concatenate(matches["x_resid"], matches["y_resid"]) @@ -790,14 +783,12 @@ def _extract_residuals_and_weights(matches): @staticmethod def _extract_squared_residuals(matches): - return flex.double.concatenate( matches["x_resid2"], flex.double.concatenate(matches["y_resid2"], matches["wavelength_resid2"]), ) def _rmsds_core(self, reflections): - """calculate unweighted RMSDs for the specified reflections""" resid_x = flex.sum(reflections["x_resid2"]) @@ -858,13 +849,11 @@ def achieved(self): class TOFLeastSquaresResidualWithRmsdCutoff(LaueLeastSquaresResidualWithRmsdCutoff): - _grad_names = ["dX_dp", "dY_dp", "dwavelength_dp"] rmsd_names = ["RMSD_X", "RMSD_Y", "RMSD_wavelength", "RMSD_wavelength"] rmsd_units = ["mm", "mm", "A", "frame"] def _rmsds_core(self, reflections): - """calculate unweighted RMSDs for the specified reflections""" resid_x = flex.sum(reflections["x_resid2"]) diff --git a/src/dials/algorithms/refinement/target_stills.py b/src/dials/algorithms/refinement/target_stills.py index fcf4745d29..e0e9552405 100644 --- a/src/dials/algorithms/refinement/target_stills.py +++ b/src/dials/algorithms/refinement/target_stills.py @@ -31,7 +31,6 @@ def __init__( absolute_cutoffs=None, gradient_calculation_blocksize=None, ): - Target.__init__( self, experiments, @@ -86,7 +85,6 @@ def predict_for_reflection_table(self, reflections, skip_derivatives=False): @staticmethod def _extract_residuals_and_weights(matches): - # return residuals and weights as 1d flex.double vectors residuals = flex.double.concatenate(matches["x_resid"], matches["y_resid"]) residuals.extend(matches["delpsical.rad"]) @@ -100,7 +98,6 @@ def _extract_residuals_and_weights(matches): @staticmethod def _extract_squared_residuals(matches): - residuals2 = flex.double.concatenate(matches["x_resid2"], matches["y_resid2"]) residuals2.extend(matches["delpsical2"]) diff --git a/src/dials/algorithms/refinement/two_theta_refiner.py b/src/dials/algorithms/refinement/two_theta_refiner.py index 0d6efffa9a..401fb41823 100644 --- a/src/dials/algorithms/refinement/two_theta_refiner.py +++ b/src/dials/algorithms/refinement/two_theta_refiner.py @@ -1,6 +1,5 @@ """Versions of refinement classes for two theta refinement of the unit cell""" - from __future__ import annotations import logging @@ -29,7 +28,6 @@ class ConstantTwoThetaWeightingStrategy: def calculate_weights(self, reflections): - reflections["2theta.weights"] = flex.double(len(reflections), 1) return reflections @@ -59,7 +57,6 @@ class TwoThetaReflectionManager(ReflectionManager): _weighting_strategy = ConstantTwoThetaWeightingStrategy() def __init__(self, *args, **kwargs): - # call base __init__ super().__init__(*args, **kwargs) @@ -74,7 +71,6 @@ def __init__(self, *args, **kwargs): return def print_stats_on_matches(self): - l = self.get_matches() nref = len(l) if nref == 0: @@ -103,7 +99,6 @@ def print_stats_on_matches(self): class TwoThetaExperimentsPredictor(ExperimentsPredictor): def _predict_one_experiment(self, experiment, reflections): - B = flex.mat3_double(len(reflections), experiment.crystal.get_B()) r0 = B * reflections["miller_index"].as_vec3_double() r0len = r0.norms() @@ -172,7 +167,6 @@ def predict(self): @staticmethod def _extract_residuals_and_weights(matches): - # return residuals and weights as 1d flex.double vectors residuals = matches["2theta_resid"] @@ -182,7 +176,6 @@ def _extract_residuals_and_weights(matches): @staticmethod def _extract_squared_residuals(matches): - residuals2 = matches["2theta_resid2"] return residuals2 @@ -221,14 +214,12 @@ def __init__(self, *args, **kwargs): return def _local_setup(self, reflections): - # we want the wavelength self._wavelength = 1.0 / self._s0.norms() return def _xl_unit_cell_derivatives(self, isel, parameterisation=None, reflections=None): - # Get required data h = self._h.select(isel) B = self._B.select(isel) @@ -244,7 +235,6 @@ def _xl_unit_cell_derivatives(self, isel, parameterisation=None, reflections=Non # loop through the parameters for der in dB_dxluc_p: - if der is None: d2theta_dp.append(None) continue @@ -269,7 +259,6 @@ def _grads_xl_unit_cell_loop(self, reflections, results, callback=None): # loop over the crystal unit cell parameterisations for xlucp in self._xl_unit_cell_parameterisations: - # Determine (sub)set of reflections affected by this parameterisation isel = flex.size_t() for exp_id in xlucp.get_experiment_ids(): diff --git a/src/dials/algorithms/refinement/weighting_strategies.py b/src/dials/algorithms/refinement/weighting_strategies.py index 595a9de71b..d28a2e5a8a 100644 --- a/src/dials/algorithms/refinement/weighting_strategies.py +++ b/src/dials/algorithms/refinement/weighting_strategies.py @@ -65,7 +65,6 @@ def calculate_weights(self, reflections): reflections = super().calculate_weights(reflections) if "delpsical.weights" not in reflections: - raise DialsRefineConfigError( 'The key "delpsical.weights" is expected within the input reflections' ) @@ -99,7 +98,6 @@ def calculate_weights(self, reflections): class LaueStatisticalWeightingStrategy(StatisticalWeightingStrategy): - """ Variance in z estimated from sqrt(x^2+y^2) """ @@ -111,7 +109,6 @@ def __init__( self._wavelength_weight = wavelength_weight def calculate_weights(self, reflections): - reflections = super().calculate_weights(reflections) wx, wy, _ = reflections["xyzobs.mm.weights"].parts() @@ -122,7 +119,6 @@ def calculate_weights(self, reflections): class LaueMixedWeightingStrategy(StatisticalWeightingStrategy): - """ Use statistical weighting for x and y, and constant weighting for z """ @@ -134,7 +130,6 @@ def __init__( self._wavelength_weight = wavelength_weight def calculate_weights(self, reflections): - reflections = super().calculate_weights(reflections) wx, wy, wz = reflections["xyzobs.mm.weights"].parts() diff --git a/src/dials/algorithms/rs_mapper/__init__.py b/src/dials/algorithms/rs_mapper/__init__.py index 17efb2a29d..0f0dcd672a 100644 --- a/src/dials/algorithms/rs_mapper/__init__.py +++ b/src/dials/algorithms/rs_mapper/__init__.py @@ -8,4 +8,4 @@ ext = boost_adaptbx.boost.python.import_ext("recviewer_ext", optional=False) if ext is not None: - from recviewer_ext import * + from recviewer_ext import * # noqa: F403 diff --git a/src/dials/algorithms/scaling/Ih_table.py b/src/dials/algorithms/scaling/Ih_table.py index 6c9ff905bf..0d2ab939b0 100644 --- a/src/dials/algorithms/scaling/Ih_table.py +++ b/src/dials/algorithms/scaling/Ih_table.py @@ -153,9 +153,9 @@ def update_data_in_blocks( data_for_block = data[block.block_selections[dataset_id]] start = block.dataset_info[dataset_id]["start_index"] end = block.dataset_info[dataset_id]["end_index"] - block.Ih_table.loc[ - np.arange(start=start, stop=end), column - ] = data_for_block + block.Ih_table.loc[np.arange(start=start, stop=end), column] = ( + data_for_block + ) def get_block_selections_for_dataset(self, dataset: int) -> List[flex.size_t]: """Generate the block selection list for a given dataset.""" @@ -304,9 +304,9 @@ def _determine_required_block_structures( for i, index in enumerate(sorted_joint_asu_indices): if index == boundary: n_in_prev_group = i - idx_prev - self.properties_dict["n_reflections_in_each_block"][ - block_id - ] = n_in_prev_group + self.properties_dict["n_reflections_in_each_block"][block_id] = ( + n_in_prev_group + ) block_id += 1 boundary = self.properties_dict["miller_index_boundaries"][block_id] idx_prev = i @@ -474,7 +474,6 @@ def as_miller_array( class TargetAsuDictCache(object): - instances = {} def __new__(cls, target_Ih_table): @@ -545,17 +544,13 @@ def add_data( Add data to the Ih_table, write data to the h_index_matrix and add the loc indices to the block_selections list. """ - assert not self._setup_info[ - "setup_complete" - ], """ + assert not self._setup_info["setup_complete"], """ No further data can be added to the IhTableBlock as setup marked complete.""" assert ( self._setup_info["next_row"] + len(group_ids) <= self.h_index_matrix.n_rows ), """ Not enough space left to add this data, please check for correct block initialisation.""" - assert ( - dataset_id == self._setup_info["next_dataset"] - ), """ + assert dataset_id == self._setup_info["next_dataset"], """ Datasets must be added in correct order: expected: {}, this dataset: {}""".format( self._setup_info["next_dataset"], dataset_id, @@ -591,9 +586,7 @@ def add_data( def _complete_setup(self) -> None: """Finish the setup of the Ih_table once all data has been added.""" self.h_index_matrix.compact() - assert ( - self._setup_info["next_row"] == self.h_index_matrix.n_rows - ), """ + assert self._setup_info["next_row"] == self.h_index_matrix.n_rows, """ Not all rows of h_index_matrix appear to be filled in IhTableBlock setup.""" self.h_expand_matrix = self.h_index_matrix.transpose() data = np.full(self._csc_cols.size, 1.0) diff --git a/src/dials/algorithms/scaling/active_parameter_managers.py b/src/dials/algorithms/scaling/active_parameter_managers.py index 7afa505937..1c334a7c42 100644 --- a/src/dials/algorithms/scaling/active_parameter_managers.py +++ b/src/dials/algorithms/scaling/active_parameter_managers.py @@ -37,9 +37,7 @@ def __init__(self, components, selection_list): n_cumul_params = 0 for component, obj in components.items(): if component in selection_list: - assert hasattr( - obj, "parameters" - ), """component object must have the + assert hasattr(obj, "parameters"), """component object must have the attribute 'parameters' for access to the component parameters.""" self.x.extend(obj.free_parameters) n_params = len(obj.free_parameters) @@ -204,7 +202,6 @@ def calculate_model_state_uncertainties(self, var_cov): class shared_active_parameter_manager(multi_active_parameter_manager): - """Class to enforce sharing of model components. Intercept calls to a multi_apm, to override set_params calls and manage diff --git a/src/dials/algorithms/scaling/algorithm.py b/src/dials/algorithms/scaling/algorithm.py index bfe09e3a0e..0c760cdf14 100644 --- a/src/dials/algorithms/scaling/algorithm.py +++ b/src/dials/algorithms/scaling/algorithm.py @@ -272,7 +272,6 @@ def scale(self): self.params.scaling_options.only_target or self.params.scaling_options.reference ): - self.scaler = targeted_scaling_algorithm(self.scaler) return # Now pass to a multiscaler ready for next round of scaling. @@ -406,9 +405,11 @@ def run(self): if counter == 1: results.initial_expids_and_image_ranges = [ - (exp.identifier, exp.scan.get_image_range()) - if exp.scan - else None + ( + (exp.identifier, exp.scan.get_image_range()) + if exp.scan + else None + ) for exp in self.experiments ] @@ -584,7 +585,6 @@ def scaling_algorithm(scaler): scaler.params.reflection_selection.intensity_choice == "combine" or scaler.params.scaling_options.outlier_rejection ): - expand_and_do_outlier_rejection(scaler) do_intensity_combination(scaler, reselect=True) @@ -605,7 +605,6 @@ def scaling_algorithm(scaler): need_to_rescale = True if scaler.params.scaling_options.full_matrix: - scaler.perform_scaling( engine=scaler.params.scaling_refinery.full_matrix_engine, max_iterations=scaler.params.scaling_refinery.full_matrix_max_iterations, diff --git a/src/dials/algorithms/scaling/cross_validation/cross_validate.py b/src/dials/algorithms/scaling/cross_validation/cross_validate.py index 03b220eb6b..5436a3b67a 100644 --- a/src/dials/algorithms/scaling/cross_validation/cross_validate.py +++ b/src/dials/algorithms/scaling/cross_validation/cross_validate.py @@ -36,7 +36,6 @@ cross_validation_mode=multi parameter=model parameter_values="array physical" """ - from __future__ import annotations import itertools diff --git a/src/dials/algorithms/scaling/error_model/engine.py b/src/dials/algorithms/scaling/error_model/engine.py index 9cf816c1fc..2f54d1e526 100644 --- a/src/dials/algorithms/scaling/error_model/engine.py +++ b/src/dials/algorithms/scaling/error_model/engine.py @@ -1,6 +1,7 @@ """ Refinement engine and functions for error model refinement. """ + from __future__ import annotations import logging @@ -81,7 +82,6 @@ def error_model_refinery(model, active_parameters, error_model_scope, max_iterat class ErrorModelRegressionRefiner(SimpleLBFGS): - """Use LBFGS for convenience, actually is a linear regression. Therefore target.predict step is unnecessary.""" @@ -148,7 +148,6 @@ def compute_functional_gradients_and_curvatures(self): class ErrorModelRefinery: - """Refiner for the basic error model.""" def __init__(self, model, parameters_to_refine, *args, **kwargs): diff --git a/src/dials/algorithms/scaling/error_model/error_model.py b/src/dials/algorithms/scaling/error_model/error_model.py index b42a4edb3b..7a9b1fff6c 100644 --- a/src/dials/algorithms/scaling/error_model/error_model.py +++ b/src/dials/algorithms/scaling/error_model/error_model.py @@ -103,7 +103,6 @@ def calc_deltahl(Ih_table, n_h, sigmaprime) -> np.array: class ErrorModelRegressionAPM: - """Parameter manager for error model minimisation using the linear regression method. @@ -171,7 +170,6 @@ def resolve_model_parameters(self): class ErrorModelA_APM: - """Parameter manager for minimising A component with individual minimizer""" def __init__(self, model): @@ -192,7 +190,6 @@ def resolve_model_parameters(self): class ErrorModelB_APM: - """Parameter manager for minimising Bcomponent with individual minimizer""" def __init__(self, model): @@ -215,7 +212,6 @@ def resolve_model_parameters(self): class ErrorModelBinner: - """A binner for the error model data. Data are binned based on Ih, and methods are available for @@ -361,7 +357,6 @@ def calculate_bin_variances(self) -> np.array: class BComponent: - """The basic error model B parameter component""" def __init__(self, initial_value=0.02): @@ -370,7 +365,6 @@ def __init__(self, initial_value=0.02): class AComponent: - """The basic error model A parameter component""" def __init__(self, initial_value=1.00): @@ -379,7 +373,6 @@ def __init__(self, initial_value=1.00): class BasicErrorModel: - """Definition of a basic two-parameter error model.""" min_reflections_required = 250 @@ -387,7 +380,6 @@ class BasicErrorModel: id_ = "basic" def __init__(self, a=None, b=None, basic_params=None): - """ A basic two-parameter error model s'^2 = a^2(s^2 + (bI)^2) @@ -548,13 +540,13 @@ def __str__(self): " Type: basic", f" Parameters: a = {a:.5f}, b = {b:.5f}", " Error model formula: " - + "\u03C3" + + "\u03c3" + "'" + "\xb2" + " = a" + "\xb2" + "(" - + "\u03C3\xb2" + + "\u03c3\xb2" " + (bI)" + "\xb2" + ")", " estimated I/sigma asymptotic limit: %s" % ISa, "", diff --git a/src/dials/algorithms/scaling/error_model/error_model_target.py b/src/dials/algorithms/scaling/error_model/error_model_target.py index 13866cf9ee..64d2c1b13d 100644 --- a/src/dials/algorithms/scaling/error_model/error_model_target.py +++ b/src/dials/algorithms/scaling/error_model/error_model_target.py @@ -2,7 +2,6 @@ Definition of the target function for error model minimisation. """ - from __future__ import annotations import numpy as np @@ -26,7 +25,6 @@ def calculate_regression_x_y(Ih_table): class ErrorModelTarget: - """Error model target for finding slope of norm distribution. (i.e. the 'a' parameter of the basic error model)""" @@ -115,7 +113,6 @@ def calculate_gradients(self, apm): class ErrorModelTargetA(ErrorModelTarget): - """Target to minimise the 'a' component of the basic error model.""" def calculate_residuals(self, apm): @@ -137,7 +134,6 @@ def calculate_gradients(self, apm): class ErrorModelTargetB(ErrorModelTarget): - """Target to minimise the 'b' component of the basic error model. Uses the binner to calculate residuals, gradients""" diff --git a/src/dials/algorithms/scaling/model/components/analytical_component.py b/src/dials/algorithms/scaling/model/components/analytical_component.py index 1c8fa4f9f7..7edff0aff4 100644 --- a/src/dials/algorithms/scaling/model/components/analytical_component.py +++ b/src/dials/algorithms/scaling/model/components/analytical_component.py @@ -9,7 +9,6 @@ class AnalyticalComponent(ScaleComponentBase): - null_parameter_value = 1.0 def __init__(self, initial_values, parameter_esds=None): diff --git a/src/dials/algorithms/scaling/model/components/scale_components.py b/src/dials/algorithms/scaling/model/components/scale_components.py index cc43a2a413..552f746a5f 100644 --- a/src/dials/algorithms/scaling/model/components/scale_components.py +++ b/src/dials/algorithms/scaling/model/components/scale_components.py @@ -94,9 +94,7 @@ def parameters(self): @parameters.setter def parameters(self, new_parameters): - assert len(new_parameters) == len( - self._parameters - ), f""" + assert len(new_parameters) == len(self._parameters), f""" attempting to set a new set of parameters of different length than previous assignment: was {len(self._parameters)}, attempting {len(new_parameters)}""" self._parameters = new_parameters @@ -191,9 +189,7 @@ class SingleScaleFactor(ScaleComponentBase): def __init__(self, initial_values, parameter_esds=None): """Set the initial parameter values, parameter esds and n_params.""" - assert ( - len(initial_values) == 1 - ), """ + assert len(initial_values) == 1, """ This model component can only hold a single parameter.""" super().__init__(initial_values, parameter_esds) diff --git a/src/dials/algorithms/scaling/model/components/smooth_scale_components.py b/src/dials/algorithms/scaling/model/components/smooth_scale_components.py index d5c654a61b..4b405eae0a 100644 --- a/src/dials/algorithms/scaling/model/components/smooth_scale_components.py +++ b/src/dials/algorithms/scaling/model/components/smooth_scale_components.py @@ -1,4 +1,4 @@ -"""" +""" " Classes that each define a smoothly varying component of a scaling model. These classes use a gaussian smoother (1D, 2D or 3D) to calculate the @@ -126,9 +126,7 @@ def smoother(self): def nparam_to_val(n_params): """Convert the number of parameters to the required input value for the smoother.""" - assert ( - n_params >= 2 - ), """cannot initialise a smooth scale factor + assert n_params >= 2, """cannot initialise a smooth scale factor for <2 parameters.""" if n_params == 2 or n_params == 3: return n_params - 1 @@ -352,9 +350,7 @@ class SmoothScaleComponent2D(ScaleComponentBase, SmoothMixin): null_parameter_value = 1.0 def __init__(self, initial_values, shape, parameter_esds=None): - assert len(initial_values) == ( - shape[0] * shape[1] - ), """The shape + assert len(initial_values) == (shape[0] * shape[1]), """The shape information to initialise a 2D smoother is inconsistent with the length of the initial parameter list.""" super().__init__(initial_values, parameter_esds) @@ -490,9 +486,7 @@ class SmoothScaleComponent3D(ScaleComponentBase, SmoothMixin): null_parameter_value = 1.0 def __init__(self, initial_values, shape, parameter_esds=None): - assert len(initial_values) == ( - shape[0] * shape[1] * shape[2] - ), """The + assert len(initial_values) == (shape[0] * shape[1] * shape[2]), """The shape information to initialise a 3D smoother is inconsistent with the length of the initial parameter list.""" super().__init__(initial_values, parameter_esds) diff --git a/src/dials/algorithms/scaling/model/model.py b/src/dials/algorithms/scaling/model/model.py index ad536b41d1..ea2b3d1a49 100644 --- a/src/dials/algorithms/scaling/model/model.py +++ b/src/dials/algorithms/scaling/model/model.py @@ -10,6 +10,8 @@ import logging import math +import pkg_resources + from libtbx import Auto, phil from dials.algorithms.scaling.error_model.error_model import BasicErrorModel @@ -49,8 +51,6 @@ logger = logging.getLogger("dials") -import pkg_resources - base_model_phil_str = """\ correction.fix = None .type = strings @@ -457,7 +457,6 @@ def _add_absorption_component_to_physically_derived_model(model, reflection_tabl class DoseDecay(ScalingModelBase): - """A model similar to the physical model, where an exponential decay component is used plus a relative B-factor per sweep, with no absorption surface by default. Most suitable for multi-crystal datasets.""" diff --git a/src/dials/algorithms/scaling/observers.py b/src/dials/algorithms/scaling/observers.py index b4cfc77c56..2d2a8ad5df 100644 --- a/src/dials/algorithms/scaling/observers.py +++ b/src/dials/algorithms/scaling/observers.py @@ -100,7 +100,7 @@ def print_scaling_summary(script): valid_ranges = get_valid_image_ranges(script.experiments) image_ranges = get_image_ranges(script.experiments) msg = [] - for (img, valid, refl) in zip(image_ranges, valid_ranges, script.reflections): + for img, valid, refl in zip(image_ranges, valid_ranges, script.reflections): if valid: if len(valid) > 1 or valid[0][0] != img[0] or valid[-1][1] != img[1]: msg.append( @@ -164,9 +164,9 @@ def print_scaling_summary(script): if d_min and d_min - max_current_res > 0.005: logger.info( "Resolution limit suggested from CC" - + "\u00BD" + + "\u00bd" + " fit (limit CC" - + "\u00BD" + + "\u00bd" + "=0.3): %.2f", d_min, ) diff --git a/src/dials/algorithms/scaling/outlier_rejection.py b/src/dials/algorithms/scaling/outlier_rejection.py index 09262fa2fe..cc873469e7 100644 --- a/src/dials/algorithms/scaling/outlier_rejection.py +++ b/src/dials/algorithms/scaling/outlier_rejection.py @@ -177,9 +177,7 @@ class OutlierRejectionBase: def __init__(self, Ih_table, zmax): """Set up and run the outlier rejection algorithm.""" - assert ( - Ih_table.n_work_blocks == 1 - ), """ + assert Ih_table.n_work_blocks == 1, """ Outlier rejection algorithms require an Ih_table with nblocks = 1""" # Note: could be possible to code for nblocks > 1 self._Ih_table_block = Ih_table.blocked_data_list[0] @@ -232,9 +230,7 @@ class TargetedOutlierRejection(OutlierRejectionBase): def __init__(self, Ih_table, zmax, target): """Set a target Ih_table and run the outlier rejection.""" - assert ( - target.n_work_blocks == 1 - ), """ + assert target.n_work_blocks == 1, """ Targeted outlier rejection requires a target Ih_table with nblocks = 1""" self._target_Ih_table_block = target.blocked_data_list[0] self._target_Ih_table_block.calc_Ih() diff --git a/src/dials/algorithms/scaling/parameter_handler.py b/src/dials/algorithms/scaling/parameter_handler.py index 95897d6d4e..ae6224846e 100644 --- a/src/dials/algorithms/scaling/parameter_handler.py +++ b/src/dials/algorithms/scaling/parameter_handler.py @@ -44,7 +44,6 @@ def __init__(self, components, selection_list): class ScalingParameterManagerGenerator(ParameterManagerGenerator): - """Class to generate parameter manager for scaling.""" def __init__(self, data_managers, target, mode, shared=None): diff --git a/src/dials/algorithms/scaling/plots.py b/src/dials/algorithms/scaling/plots.py index 1c56ded35f..31c6015289 100644 --- a/src/dials/algorithms/scaling/plots.py +++ b/src/dials/algorithms/scaling/plots.py @@ -1059,7 +1059,6 @@ def plot_array_absorption_plot(array_model): def plot_array_decay_plot(array_model): - decay_comp = array_model.components["decay"] configdict = array_model.configdict diff --git a/src/dials/algorithms/scaling/reflection_selection.py b/src/dials/algorithms/scaling/reflection_selection.py index 31d8d99bb2..d39a964b14 100644 --- a/src/dials/algorithms/scaling/reflection_selection.py +++ b/src/dials/algorithms/scaling/reflection_selection.py @@ -83,7 +83,7 @@ def _build_class_matrix(class_index, class_matrix, offset=0): - for (i, val) in enumerate(class_index, start=offset): + for i, val in enumerate(class_index, start=offset): class_matrix[val, i] = 1.0 return class_matrix @@ -125,7 +125,6 @@ def _select_groups_on_Isigma_cutoff(Ih_table, cutoff=2.0): def _perform_quasi_random_selection( Ih_table, n_datasets, min_per_class, min_total, max_total ): - class_matrix = sparse.matrix(n_datasets, Ih_table.size) class_matrix = _build_class_matrix( flumpy.from_numpy(Ih_table.Ih_table["dataset_id"].to_numpy()), class_matrix diff --git a/src/dials/algorithms/scaling/scaling_helper.h b/src/dials/algorithms/scaling/scaling_helper.h index bf9a2d375f..4a204a6c0a 100644 --- a/src/dials/algorithms/scaling/scaling_helper.h +++ b/src/dials/algorithms/scaling/scaling_helper.h @@ -62,7 +62,7 @@ class GaussianSmootherFirstFixed : public dials::refinement::GaussianSmoother { const scitbx::af::const_ref values) { // Use sparse storage as only naverage (default 3) values per row are // non-zero - std::size_t npoints = x.size(); //# data + std::size_t npoints = x.size(); // # data DIALS_ASSERT(npoints > 1); matrix weight(npoints, nvalues - 1); diff --git a/src/dials/algorithms/scaling/scaling_library.py b/src/dials/algorithms/scaling/scaling_library.py index 193887297e..8196566f02 100644 --- a/src/dials/algorithms/scaling/scaling_library.py +++ b/src/dials/algorithms/scaling/scaling_library.py @@ -253,15 +253,11 @@ def create_Ih_table( Allow an unequal number of experiments and reflections, as only need to extract one space group value (can optionally check all same if many).""" if selections: - assert len(selections) == len( - reflections - ), """Must have an equal number of + assert len(selections) == len(reflections), """Must have an equal number of reflection tables and selections in the input lists.""" space_group_0 = experiments[0].crystal.get_space_group() for experiment in experiments: - assert ( - experiment.crystal.get_space_group() == space_group_0 - ), """The space + assert experiment.crystal.get_space_group() == space_group_0, """The space groups of all experiments must be equal.""" input_tables = [] indices_lists = [] @@ -385,7 +381,6 @@ class MergedHalfDatasets: class ExtendedDatasetStatistics(iotbx.merging_statistics.dataset_statistics): - """A class to extend iotbx merging statistics.""" def __init__(self, *args, additional_stats=False, seed=0, **kwargs): diff --git a/src/dials/algorithms/scaling/scaling_refiner.py b/src/dials/algorithms/scaling/scaling_refiner.py index 7c6a215996..584eafb013 100644 --- a/src/dials/algorithms/scaling/scaling_refiner.py +++ b/src/dials/algorithms/scaling/scaling_refiner.py @@ -1,9 +1,8 @@ -""" Classes for scaling refinement engines. +"""Classes for scaling refinement engines. Classes are inherited from the dials.refinement engine with a few methods overwritten to use them with scaling code.""" - from __future__ import annotations import logging @@ -135,7 +134,7 @@ def print_step_table(refinery): logger.info("\nRefinement steps:") header = ["Step", "Nref"] - for (name, units) in zip(refinery._target.rmsd_names, refinery._target.rmsd_units): + for name, units in zip(refinery._target.rmsd_names, refinery._target.rmsd_units): header.append(name + "\n(" + units + ")") rows = [] diff --git a/src/dials/algorithms/scaling/scaling_utilities.py b/src/dials/algorithms/scaling/scaling_utilities.py index bb6752ef17..434066b191 100644 --- a/src/dials/algorithms/scaling/scaling_utilities.py +++ b/src/dials/algorithms/scaling/scaling_utilities.py @@ -2,7 +2,6 @@ Module of utility functions for scaling. """ - from __future__ import annotations import logging diff --git a/src/dials/algorithms/shoebox/__init__.py b/src/dials/algorithms/shoebox/__init__.py index 0ad2075458..9a9c08883c 100644 --- a/src/dials/algorithms/shoebox/__init__.py +++ b/src/dials/algorithms/shoebox/__init__.py @@ -1,4 +1,4 @@ from __future__ import annotations -from dials.algorithms.shoebox.masker import * -from dials_algorithms_shoebox_ext import * +from dials.algorithms.shoebox.masker import * # noqa: F403 +from dials_algorithms_shoebox_ext import * # noqa: F403 diff --git a/src/dials/algorithms/simulation/generate_test_reflections.py b/src/dials/algorithms/simulation/generate_test_reflections.py index a0842706b1..ec58f736cb 100644 --- a/src/dials/algorithms/simulation/generate_test_reflections.py +++ b/src/dials/algorithms/simulation/generate_test_reflections.py @@ -199,7 +199,6 @@ def simple_gaussian_spots(params): p = ProgressBar(title="Generating shoeboxes") for i in range(len(rlist)): - p.update(i * 100.0 / params.nrefl) mask = shoebox[i].mask diff --git a/src/dials/algorithms/spot_finding/factory.py b/src/dials/algorithms/spot_finding/factory.py index becf5b8a10..e08e26912b 100644 --- a/src/dials/algorithms/spot_finding/factory.py +++ b/src/dials/algorithms/spot_finding/factory.py @@ -380,7 +380,7 @@ def run(self, flags, sequence=None, observations=None, **kwargs): # noqa: U100 cutoff = hist.slot_centers()[i - 1] - 0.5 * hist.slot_width() sel = np.column_stack(np.where(H > cutoff)) - for (ix, iy) in sel: + for ix, iy in sel: flags.set_selected( ( (obs_x > xedges[ix]) @@ -460,7 +460,6 @@ def from_parameters(params=None, experiments=None, is_stills=False): raise RuntimeError("All experiment scans must contain time_of_flight") if contains_tof_experiments: - # ToF spots from spallation sources typically have elongated tails if params.spotfinder.filter.max_separation < 6: # Based on ISISSXD data diff --git a/src/dials/algorithms/spot_finding/finder.py b/src/dials/algorithms/spot_finding/finder.py index 93ead73dd2..64f1b82406 100644 --- a/src/dials/algorithms/spot_finding/finder.py +++ b/src/dials/algorithms/spot_finding/finder.py @@ -698,7 +698,6 @@ def find_spots(self, experiments: ExperimentList) -> flex.reflection_table: reflections = flex.reflection_table() for j, imageset in enumerate(imagesets): - # Find the strong spots in the sequence logger.info( "-" * 80 + "\nFinding strong spots in imageset %d\n" + "-" * 80, j @@ -837,7 +836,6 @@ def _create_hot_mask(self, imageset, hot_pixels): """ # Write the hot mask if self.write_hot_mask: - # Create the hot pixel mask hot_mask = tuple( flex.bool(flex.grid(p.get_image_size()[::-1]), True) @@ -887,7 +885,6 @@ def __init__( max_spot_size=20, min_chunksize=50, ): - super().__init__( threshold_function=threshold_function, mask=mask, @@ -913,7 +910,6 @@ def __init__( self.experiments = experiments def _correct_centroid_tof(self, reflections): - """ Sets the centroid of the spot to the peak position along the time of flight, as this tends to more accurately represent the true @@ -927,7 +923,6 @@ def _correct_centroid_tof(self, reflections): return reflections def _post_process(self, reflections): - reflections = self._correct_centroid_tof(reflections) # Filter any reflections outside of the tof range @@ -961,7 +956,6 @@ def _post_process(self, reflections): unit_s0 = expt.beam.get_unit_s0() for i_panel in range(len(expt.detector)): - sel = sel_expt & (panel_numbers == i_panel) x, y, tof = reflections["xyzobs.mm.value"].select(sel).parts() px, py, frame = reflections["xyzobs.px.value"].select(sel).parts() diff --git a/src/dials/algorithms/spot_prediction/__init__.py b/src/dials/algorithms/spot_prediction/__init__.py index 8b4cb9cd92..5eba1eb476 100644 --- a/src/dials/algorithms/spot_prediction/__init__.py +++ b/src/dials/algorithms/spot_prediction/__init__.py @@ -161,7 +161,6 @@ def StillsReflectionPredictor(experiment, dmin=None, spherical_relp=False, **kwa def LaueReflectionPredictor(experiment, dmin: float): - return dials_algorithms_spot_prediction_ext.LaueReflectionPredictor( experiment.beam, experiment.detector, diff --git a/src/dials/algorithms/spot_prediction/reflection_predictor.py b/src/dials/algorithms/spot_prediction/reflection_predictor.py index 95058000ec..b88d836288 100644 --- a/src/dials/algorithms/spot_prediction/reflection_predictor.py +++ b/src/dials/algorithms/spot_prediction/reflection_predictor.py @@ -2,12 +2,12 @@ import logging -logger = logging.getLogger(__name__) - from libtbx.phil import parse from dials.util import Sorry +logger = logging.getLogger(__name__) + # The phil parameters phil_scope = parse( """ diff --git a/src/dials/algorithms/spot_prediction/rotation_angles.h b/src/dials/algorithms/spot_prediction/rotation_angles.h index fff059b4bc..d431cbddd8 100644 --- a/src/dials/algorithms/spot_prediction/rotation_angles.h +++ b/src/dials/algorithms/spot_prediction/rotation_angles.h @@ -98,7 +98,7 @@ namespace dials { namespace algorithms { */ vec2 operator()(cctbx::miller::index<> miller_index, mat3 ub_matrix) const { - return operator()(ub_matrix *miller_index); + return operator()(ub_matrix * miller_index); } private: diff --git a/src/dials/algorithms/statistics/cc_half_algorithm.py b/src/dials/algorithms/statistics/cc_half_algorithm.py index e5659559f0..8c5b314047 100644 --- a/src/dials/algorithms/statistics/cc_half_algorithm.py +++ b/src/dials/algorithms/statistics/cc_half_algorithm.py @@ -1,6 +1,5 @@ """ΔCC½ algorithm definitions""" - from __future__ import annotations import logging @@ -26,7 +25,6 @@ class CCHalfFromMTZ: - """ Run a cc-half algorithm using an MTZ file. """ @@ -153,7 +151,6 @@ def read_mtzfile(filename, batch_offset=None): class CCHalfFromDials: - """ Run a cc-half algorithm using dials datafiles. """ @@ -383,7 +380,7 @@ def remove_image_ranges_below_cutoff( if exp_id == exp.identifier: tested.extend(list(range(imgrange[0], imgrange[1] + 1))) for imgrange in exp.scan.get_valid_image_ranges(exp.identifier): - if all([j not in tested for j in range(imgrange[0], imgrange[1] + 1)]): + if all(j not in tested for j in range(imgrange[0], imgrange[1] + 1)): table_id = expid_to_tableid[exp.identifier] exclude_images.append([f"{table_id}:{imgrange[0]}:{imgrange[1]}"]) logger.info( @@ -465,7 +462,6 @@ def read_experiments(experiments, reflection_table): class DeltaCCHalf: - """ Implementation of a ΔCC½ algorithm. """ diff --git a/src/dials/algorithms/statistics/delta_cchalf.py b/src/dials/algorithms/statistics/delta_cchalf.py index 11309b8ed5..2a79c5adef 100644 --- a/src/dials/algorithms/statistics/delta_cchalf.py +++ b/src/dials/algorithms/statistics/delta_cchalf.py @@ -278,7 +278,6 @@ def _compute_cchalf_excluding_each_group(self): # Compute CC1/2 minus each dataset cchalf_i = {} for dataset in dataset_lookup: - # Find all observations from this dataset and create a lookup based on # miller index index_lookup = defaultdict(list) diff --git a/src/dials/algorithms/statistics/fast_mcd.py b/src/dials/algorithms/statistics/fast_mcd.py index 3d8bad9fc8..705d0ebb3e 100644 --- a/src/dials/algorithms/statistics/fast_mcd.py +++ b/src/dials/algorithms/statistics/fast_mcd.py @@ -306,7 +306,6 @@ def small_dataset_estimate(self): trials = [] for i in range(self._n_trials): - H1 = self.form_initial_subset(h=self._h, data=self._data) T1, S1 = self.means_and_covariance(H1) detS1 = S1.matrix_determinant_via_lu() @@ -314,7 +313,6 @@ def small_dataset_estimate(self): # perform concentration steps detScurr, Tcurr, Scurr = detS1, T1, S1 for j in range(self._k1): # take maximum of k1 steps - Hnew = self.concentration_step(self._h, self._data, Tcurr, Scurr) Tnew, Snew = self.means_and_covariance(Hnew) detSnew = Snew.matrix_determinant_via_lu() @@ -369,11 +367,9 @@ def large_dataset_estimate(self): trials = [] h_frac = self._h / self._n for group in groups: - h_sub = int(len(group[0]) * h_frac) gp_trials = [] for i in range(n_trials): - H1 = self.form_initial_subset(h=h_sub, data=group) T1, S1 = self.means_and_covariance(H1) detS1 = S1.matrix_determinant_via_lu() @@ -381,7 +377,6 @@ def large_dataset_estimate(self): # perform concentration steps detScurr, Tcurr, Scurr = detS1, T1, S1 for j in range(self._k1): # take k1 steps - Hnew = self.concentration_step(h_sub, group, Tcurr, Scurr) Tnew, Snew = self.means_and_covariance(Hnew) detSnew = Snew.matrix_determinant_via_lu() @@ -403,10 +398,8 @@ def large_dataset_estimate(self): mrgd_trials = [] h_mrgd = int(sample_size * h_frac) for trial in trials: - detScurr, Tcurr, Scurr = trial for j in range(self._k2): # take k2 steps - Hnew = self.concentration_step(h_mrgd, sampled, Tcurr, Scurr) Tnew, Snew = self.means_and_covariance(Hnew) detSnew = Snew.matrix_determinant_via_lu() diff --git a/src/dials/algorithms/symmetry/__init__.py b/src/dials/algorithms/symmetry/__init__.py index 3dac165ef9..57613186c9 100644 --- a/src/dials/algorithms/symmetry/__init__.py +++ b/src/dials/algorithms/symmetry/__init__.py @@ -6,9 +6,6 @@ from __future__ import annotations import logging - -logger = logging.getLogger(__name__) - from io import StringIO import libtbx @@ -21,6 +18,8 @@ from dials.util import resolution_analysis from dials.util.normalisation import quasi_normalisation +logger = logging.getLogger(__name__) + def median_unit_cell(experiments): uc_params = [flex.double() for i in range(6)] diff --git a/src/dials/algorithms/symmetry/absences/plots.py b/src/dials/algorithms/symmetry/absences/plots.py index 2d2d0026b6..76ff8c33f5 100644 --- a/src/dials/algorithms/symmetry/absences/plots.py +++ b/src/dials/algorithms/symmetry/absences/plots.py @@ -1,6 +1,5 @@ """Definitions of plots for systematic absences.""" - from __future__ import annotations @@ -23,8 +22,7 @@ def plot_screw_axes(screw_axes_data): for name, data in screw_axes_data.items(): d.update( { - "plot_" - + name: { + "plot_" + name: { "data": [ { "x": list(data["miller_axis_vals"]), @@ -68,8 +66,7 @@ def plot_screw_axes(screw_axes_data): xtickvals.extend([xloc, xloc2]) plot = { - "frequencies_plot_" - + name: { + "frequencies_plot_" + name: { "data": [ { "x": list( @@ -138,8 +135,7 @@ def plot_screw_axes(screw_axes_data): d.update( { - "intensities_plot_" - + name: { + "intensities_plot_" + name: { "data": [ { "x": list(data["miller_axis_vals"]), diff --git a/src/dials/algorithms/symmetry/absences/run_absences_checks.py b/src/dials/algorithms/symmetry/absences/run_absences_checks.py index 22eb0a654a..0ded949f66 100644 --- a/src/dials/algorithms/symmetry/absences/run_absences_checks.py +++ b/src/dials/algorithms/symmetry/absences/run_absences_checks.py @@ -1,4 +1,5 @@ """Definition of systematic absences check algorithm.""" + from __future__ import annotations import logging diff --git a/src/dials/algorithms/symmetry/cosym/__init__.py b/src/dials/algorithms/symmetry/cosym/__init__.py index 95346fdeb7..61eaf58870 100644 --- a/src/dials/algorithms/symmetry/cosym/__init__.py +++ b/src/dials/algorithms/symmetry/cosym/__init__.py @@ -519,7 +519,6 @@ def as_json(self, filename=None, indent=2): class SymmetryAnalysis: def __init__(self, coords, sym_ops, subgroups, cb_op_inp_min): - import scipy.spatial.distance as ssd self.subgroups = subgroups diff --git a/src/dials/algorithms/symmetry/cosym/engine.py b/src/dials/algorithms/symmetry/cosym/engine.py index 3a24603068..9eb1fb256b 100644 --- a/src/dials/algorithms/symmetry/cosym/engine.py +++ b/src/dials/algorithms/symmetry/cosym/engine.py @@ -92,7 +92,6 @@ def callback_after_step(self, minimizer): def minimize_scitbx_lbfgs( target, coords, use_curvatures=True, max_iterations=100, max_calls=None ): - termination_params = scitbx.lbfgs.termination_parameters( max_iterations=max_iterations, max_calls=max_calls, diff --git a/src/dials/algorithms/symmetry/cosym/plots.py b/src/dials/algorithms/symmetry/cosym/plots.py index e34fa0ecbf..764378fc8f 100644 --- a/src/dials/algorithms/symmetry/cosym/plots.py +++ b/src/dials/algorithms/symmetry/cosym/plots.py @@ -4,7 +4,6 @@ def plot_coords(coords, labels=None, key="cosym_coordinates"): - coord_x = coords[:, 0] coord_y = coords[:, 1] assert coord_x.size == coord_y.size, (coord_x.size, coord_y.size) diff --git a/src/dials/algorithms/symmetry/cosym/target.py b/src/dials/algorithms/symmetry/cosym/target.py index 8de32e43a4..30ad56b8bf 100644 --- a/src/dials/algorithms/symmetry/cosym/target.py +++ b/src/dials/algorithms/symmetry/cosym/target.py @@ -10,16 +10,16 @@ import numpy as np import pandas as pd from orderedset import OrderedSet +from scipy import sparse import cctbx.sgtbx.cosets from cctbx import miller, sgtbx from cctbx.array_family import flex -logger = logging.getLogger(__name__) -from scipy import sparse - from dials.algorithms.scaling.scaling_library import ExtendedDatasetStatistics +logger = logging.getLogger(__name__) + def _lattice_lower_upper_index(lattices, lattice_id): lower_index = int(lattices[lattice_id]) @@ -62,13 +62,11 @@ def _compute_rij_matrix_one_row_block( cb_ops = [sgtbx.change_of_basis_op(cb_op_k) for cb_op_k in sym_ops] for j in range(n_lattices): - j_lower, j_upper = _lattice_lower_upper_index(lattices, j) intensities_j = data.data()[j_lower:j_upper] sigmas_j = data.sigmas()[j_lower:j_upper] for k, cb_op_k in enumerate(cb_ops): - indices_i = indices[cb_op_k.as_xyz()][i_lower:i_upper] for kk, cb_op_kk in enumerate(cb_ops): diff --git a/src/dials/algorithms/symmetry/laue_group.py b/src/dials/algorithms/symmetry/laue_group.py index 0cbf117616..cd50d0bb76 100644 --- a/src/dials/algorithms/symmetry/laue_group.py +++ b/src/dials/algorithms/symmetry/laue_group.py @@ -167,7 +167,6 @@ def _estimate_cc_sig_fac(self): self.cc_sig_fac = 0 def _estimate_cc_true(self): - # A1.2. Estimation of E(CC; S). # (i) diff --git a/src/dials/algorithms/symmetry/origin.py b/src/dials/algorithms/symmetry/origin.py index a85cabee1f..0b8c599254 100644 --- a/src/dials/algorithms/symmetry/origin.py +++ b/src/dials/algorithms/symmetry/origin.py @@ -3,7 +3,6 @@ measured intensities. """ - from __future__ import annotations @@ -78,7 +77,6 @@ def get_hkl_offset_correlation_coefficients( grid_l=0, reference=None, ): - # N.B. deliberately ignoring d_min, d_max as these are inconsistent with # changing the miller indices diff --git a/src/dials/array_family/boost_python/flex_int6.cc b/src/dials/array_family/boost_python/flex_int6.cc index cec02d48ad..259caed924 100644 --- a/src/dials/array_family/boost_python/flex_int6.cc +++ b/src/dials/array_family/boost_python/flex_int6.cc @@ -136,4 +136,4 @@ namespace dials { namespace af { } } // namespace boost_python -}} // namespace dials::af +}} // namespace dials::af diff --git a/src/dials/array_family/flex_ext.py b/src/dials/array_family/flex_ext.py index 65d978922f..2176750edd 100644 --- a/src/dials/array_family/flex_ext.py +++ b/src/dials/array_family/flex_ext.py @@ -1145,7 +1145,6 @@ def compute_miller_indices_in_asu(self, experiments): """ self["miller_index_asu"] = cctbx.array_family.flex.miller_index(len(self)) for idx, experiment in enumerate(experiments): - # Create the crystal symmetry object uc = experiment.crystal.get_unit_cell() sg = experiment.crystal.get_space_group() @@ -1322,7 +1321,6 @@ def map_centroids_to_reciprocal_space( sel_expt = self["id"] == i for i_panel in range(len(expt.detector)): - sel = sel_expt & (panel_numbers == i_panel) if calculated: x, y, z = self["xyzcal.mm"].select(sel).parts() diff --git a/src/dials/command_line/align_crystal.py b/src/dials/command_line/align_crystal.py index e8eca2c788..e369c39467 100644 --- a/src/dials/command_line/align_crystal.py +++ b/src/dials/command_line/align_crystal.py @@ -79,7 +79,6 @@ def smallest_angle(angle): def describe(vector, space_group, reciprocal=True): - vector_names = {a.elems: "a", b.elems: "b", c.elems: "c"} v = vector.elems @@ -110,7 +109,6 @@ def axis_type(vector, space_group): class align_crystal: - vector_names = {a.elems: "a", b.elems: "b", c.elems: "c"} def __init__(self, experiment, vectors, frame="reciprocal", mode="main"): @@ -157,7 +155,7 @@ def __init__(self, experiment, vectors, frame="reciprocal", mode="main"): [-ex, ey, -ez], ) - for (v1_, v2_) in self.vectors: + for v1_, v2_ in self.vectors: result_dictionary = collections.defaultdict(list) results.append((v1_, v2_, result_dictionary)) space_group = self.experiment.crystal.get_space_group() diff --git a/src/dials/command_line/anvil_correction.py b/src/dials/command_line/anvil_correction.py index 755af9ab60..259d171bb9 100644 --- a/src/dials/command_line/anvil_correction.py +++ b/src/dials/command_line/anvil_correction.py @@ -20,7 +20,6 @@ dials.anvil_correction integrated.expt integrated.refl thickness=1.2 normal=1,0,0 """ - from __future__ import annotations import logging diff --git a/src/dials/command_line/cluster_unit_cell.py b/src/dials/command_line/cluster_unit_cell.py index f4ee9f13a7..647e55ebf2 100644 --- a/src/dials/command_line/cluster_unit_cell.py +++ b/src/dials/command_line/cluster_unit_cell.py @@ -188,7 +188,6 @@ def do_cluster_analysis(crystal_symmetries, params): print(clustering) if params.plot.show or params.plot.name: - if params.plot.log: ax.set_yscale("symlog", linthresh=1) else: diff --git a/src/dials/command_line/combine_experiments.py b/src/dials/command_line/combine_experiments.py index 3726e1d26c..231531aa2b 100644 --- a/src/dials/command_line/combine_experiments.py +++ b/src/dials/command_line/combine_experiments.py @@ -13,8 +13,8 @@ import dials.util from dials.array_family import flex from dials.util import log -from dials.util.combine_experiments import CombineWithReference # noqa from dials.util.combine_experiments import ( + CombineWithReference, # noqa combine_experiments, combine_experiments_no_reflections, do_unit_cell_clustering, @@ -202,7 +202,6 @@ def save_combined_experiments( experiments_filename="combined.expt", reflections_filename="combined.refl", ): - output_experiments_list: List[ExperimentList] = [] output_reflections_list: List[flex.reflection_table] = [] expt_names_list: List[str] = [] diff --git a/src/dials/command_line/cosym.py b/src/dials/command_line/cosym.py index 58831927fb..bb333f8499 100644 --- a/src/dials/command_line/cosym.py +++ b/src/dials/command_line/cosym.py @@ -300,7 +300,7 @@ def _apply_reindexing_operators(self, reindexing_ops, subgroup=None): self.params.absolute_angle_tolerance, subgroup, ) - for (expt, refl) in zip(self._experiments, self._reflections): + for expt, refl in zip(self._experiments, self._reflections): expt.crystal = expt.crystal.change_basis(cb_op) refl["miller_index"] = cb_op.apply(refl["miller_index"]) elif ( @@ -308,7 +308,7 @@ def _apply_reindexing_operators(self, reindexing_ops, subgroup=None): != sgtbx.change_of_basis_op("a,b,c").as_xyz() ): cb_op = subgroup["cb_op_inp_best"] - for (expt, refl) in zip(self._experiments, self._reflections): + for expt, refl in zip(self._experiments, self._reflections): expt.crystal = expt.crystal.change_basis(cb_op) refl["miller_index"] = cb_op.apply(refl["miller_index"]) # if either of the above are not true, then we are already in the best cell. diff --git a/src/dials/command_line/damage_analysis.py b/src/dials/command_line/damage_analysis.py index 4dd083c968..aab645dd5f 100644 --- a/src/dials/command_line/damage_analysis.py +++ b/src/dials/command_line/damage_analysis.py @@ -116,7 +116,6 @@ class PychefRunner: - """Class to prepare input data and run the pychef algorithm.""" def __init__(self, intensities, dose, params): @@ -239,12 +238,16 @@ def from_dials_data_files(cls, params, experiments, reflection_table): ) logger.info( "Interpreting data using:\n starting_doses=%s\n dose_per_image=%s", - ", ".join("%s" % i for i in start_doses) - if len(set(start_doses)) > 1 - else f" all {start_doses[0]}", - ", ".join("%s" % i for i in doses_per_image) - if len(set(doses_per_image)) > 1 - else f" all {doses_per_image[0]}", + ( + ", ".join("%s" % i for i in start_doses) + if len(set(start_doses)) > 1 + else f" all {start_doses[0]}" + ), + ( + ", ".join("%s" % i for i in doses_per_image) + if len(set(doses_per_image)) > 1 + else f" all {doses_per_image[0]}" + ), ) for expt, starting_dose, dose_per_img in zip( diff --git a/src/dials/command_line/dials_import.py b/src/dials/command_line/dials_import.py index a6ef190913..d31cb272c2 100644 --- a/src/dials/command_line/dials_import.py +++ b/src/dials/command_line/dials_import.py @@ -210,7 +210,6 @@ def _extract_or_read_imagesets(params): # Check we have some filenames if len(experiments) == 0: - # FIXME Should probably make this smarter since it requires editing here # and in dials.import phil scope try: @@ -224,7 +223,6 @@ def _extract_or_read_imagesets(params): # Check if a template has been set and print help if not, otherwise try to # import the images based on the template input if len(params.input.template) > 0: - experiments = ExperimentListFactory.from_templates( params.input.template, image_range=params.geometry.scan.image_range, @@ -505,7 +503,6 @@ def __call__(self, imageset_list): # Loop through imagesets for imageset in imageset_list: - # Set the external lookups imageset = self.update_lookup(imageset, lookup) @@ -827,7 +824,6 @@ def assert_single_sequence(experiments, params): ] if len(sequences) > 1: - # Print some info about multiple sequences diagnose_multiple_sequences(sequences, params) @@ -964,7 +960,6 @@ def do_import( # Print out info for all experiments for experiment in experiments: - # Print some experiment info - override the output of image range # if appropriate image_range = params.geometry.scan.image_range diff --git a/src/dials/command_line/find_spots.py b/src/dials/command_line/find_spots.py index 2e3b1be428..04e63cd64b 100644 --- a/src/dials/command_line/find_spots.py +++ b/src/dials/command_line/find_spots.py @@ -100,7 +100,6 @@ def do_spotfinding( experiments: ExperimentList, params: libtbx.phil.scope_extract, ) -> tuple[ExperimentList, flex.reflection_table]: - # did input have identifier? had_identifiers = False if all(i != "" for i in experiments.identifiers()): @@ -176,7 +175,6 @@ def do_spotfinding( # Save the experiments if params.output.experiments: - logger.info(f"Saving experiments to {params.output.experiments}") experiments.as_file(params.output.experiments) diff --git a/src/dials/command_line/frame_orientations.py b/src/dials/command_line/frame_orientations.py index af42f6b9cf..0f589b3517 100644 --- a/src/dials/command_line/frame_orientations.py +++ b/src/dials/command_line/frame_orientations.py @@ -7,7 +7,6 @@ Usage: dials.frame_orientations refined.expt """ - from __future__ import annotations import sys diff --git a/src/dials/command_line/generate_mask.py b/src/dials/command_line/generate_mask.py index 2de2b82b15..c90353b350 100644 --- a/src/dials/command_line/generate_mask.py +++ b/src/dials/command_line/generate_mask.py @@ -17,7 +17,6 @@ dials.generate_mask models.expt d_max=2.00 """ - from __future__ import annotations import logging diff --git a/src/dials/command_line/geometry_viewer.py b/src/dials/command_line/geometry_viewer.py index c4c0c8d149..af734fbe1e 100644 --- a/src/dials/command_line/geometry_viewer.py +++ b/src/dials/command_line/geometry_viewer.py @@ -335,7 +335,6 @@ def OnChar(self, event): self.GetParent().viewer.OnChar(event) def add_controls(self): - ctrls = self.create_controls(setting="show_panel_axes", label="Show panel axes") self.panel_sizer.Add(ctrls[0], 0, wx.ALL, 5) @@ -576,7 +575,6 @@ def initialize_modelview(self, eye_vector=None, angle=None): @show_mail_handle_errors() def run(args=None): - import os from dials.util.options import ArgumentParser, flatten_experiments diff --git a/src/dials/command_line/import_xds.py b/src/dials/command_line/import_xds.py index 423cd87b6f..b5723a64a0 100644 --- a/src/dials/command_line/import_xds.py +++ b/src/dials/command_line/import_xds.py @@ -209,7 +209,6 @@ def __call__(self, params, options): # Loop through the data blocks for i, exp in enumerate(experiments): - # Print some experiment info print("-" * 80) print("Experiment %d" % i) diff --git a/src/dials/command_line/integrate.py b/src/dials/command_line/integrate.py index a139cc4a9a..f93d1616fb 100644 --- a/src/dials/command_line/integrate.py +++ b/src/dials/command_line/integrate.py @@ -19,7 +19,6 @@ dials.integrate models.expt refined.refl background.algorithm=glm """ - from __future__ import annotations import logging @@ -268,7 +267,6 @@ def sample_predictions(experiments, predicted, params): working_isel = flex.size_t() for iexp, exp in enumerate(experiments): - sel = predicted["id"] == iexp isel = sel.iselection() nrefs = sample_size = len(isel) @@ -315,7 +313,6 @@ def split_for_scan_range(experiments, reference, scan_range): # Only do anything is the scan range is set if scan_range is not None and len(scan_range) > 0: - # Ensure that all experiments have the same imageset and scan iset = [e.imageset for e in experiments] scan = [e.scan for e in experiments] diff --git a/src/dials/command_line/merge_cbf.py b/src/dials/command_line/merge_cbf.py index 5305237575..32a7b14a1f 100644 --- a/src/dials/command_line/merge_cbf.py +++ b/src/dials/command_line/merge_cbf.py @@ -88,7 +88,6 @@ def merge_cbf(imageset, n_images, out_prefix="sum_", get_raw_data_from_imageset= data_out = None for j in range(n_images): - i_in = (i_out * n_images) + j if get_raw_data_from_imageset: @@ -141,7 +140,6 @@ def merge_cbf(imageset, n_images, out_prefix="sum_", get_raw_data_from_imageset= "Omega_increment", "Chi_increment", }: - if rsplit[1] == "Count_cutoff": # needs to be an integer new_header.append( "%s\n" @@ -194,9 +192,11 @@ def merge_cbf(imageset, n_images, out_prefix="sum_", get_raw_data_from_imageset= # 'Strings with spaces, as they are seen as multiple columns, or with' # _multiple _columns _defined _on _same _line _they _are _seen _as _one _column new_line = [ - element - if modifier is None - else f"{float(element) * modifier:f}" + ( + element + if modifier is None + else f"{float(element) * modifier:f}" + ) for modifier, element in zip(modifiers, line.split()) ] new_header[n] = f"{' '.join(new_line)}\r\n" diff --git a/src/dials/command_line/model_background.py b/src/dials/command_line/model_background.py index 427eb72d4c..da352f2442 100644 --- a/src/dials/command_line/model_background.py +++ b/src/dials/command_line/model_background.py @@ -8,7 +8,6 @@ dials.integrate integrated.expt refined.refl background.algorithm=gmodel gmodel.robust.algorithm=True gmodel.model=background.pickle """ - from __future__ import annotations import logging diff --git a/src/dials/command_line/modify_geometry.py b/src/dials/command_line/modify_geometry.py index 1b09577c23..0142c85e14 100644 --- a/src/dials/command_line/modify_geometry.py +++ b/src/dials/command_line/modify_geometry.py @@ -27,7 +27,6 @@ def update( experiments: ExperimentList, new_params: libtbx.phil.scope_extract ) -> ExperimentList: - """ Modify detector, beam, goniometer and scan in experiments with the values in new_params """ @@ -48,7 +47,6 @@ def update( @dials.util.show_mail_handle_errors() def run(args: List[str] = None, phil: libtbx.phil.scope = phil_scope) -> None: - usage = "dials.modify_geometry [options] models.expt" parser = ArgumentParser( diff --git a/src/dials/command_line/plot_Fo_vs_Fc.py b/src/dials/command_line/plot_Fo_vs_Fc.py index 7e42190eab..b4c3cec28a 100644 --- a/src/dials/command_line/plot_Fo_vs_Fc.py +++ b/src/dials/command_line/plot_Fo_vs_Fc.py @@ -6,7 +6,6 @@ Usage: dials.plot_Fo_vs_Fc hklin=refined.mtz """ - from __future__ import annotations import sys @@ -22,8 +21,8 @@ from dials.util.options import ArgumentParser matplotlib.use("pdf") -import matplotlib.pyplot as plt -from matplotlib.ticker import MultipleLocator +import matplotlib.pyplot as plt # noqa: E402 +from matplotlib.ticker import MultipleLocator # noqa: E402 class HyperbolaFit(normal_eqns.non_linear_ls, normal_eqns.non_linear_ls_mixin): diff --git a/src/dials/command_line/plot_reflections.py b/src/dials/command_line/plot_reflections.py index 089660e240..dced41de62 100644 --- a/src/dials/command_line/plot_reflections.py +++ b/src/dials/command_line/plot_reflections.py @@ -84,7 +84,6 @@ def run(args=None): predicted_xyz = flex.vec3_double() for reflection_list in reflections: - if len(params.scan_range): sel = flex.bool(len(reflection_list), False) diff --git a/src/dials/command_line/plot_scan_varying_model.py b/src/dials/command_line/plot_scan_varying_model.py index ec1592b063..d463f4c25b 100644 --- a/src/dials/command_line/plot_scan_varying_model.py +++ b/src/dials/command_line/plot_scan_varying_model.py @@ -118,7 +118,6 @@ def run(self, args=None): # cell plot dat = [] for iexp, exp in enumerate(experiments): - crystal = exp.crystal scan = exp.scan @@ -171,7 +170,6 @@ def run(self, args=None): # orientation plot dat = [] for iexp, exp in enumerate(experiments): - crystal = exp.crystal scan = exp.scan @@ -212,7 +210,6 @@ def run(self, args=None): # beam centre plot dat = [] for iexp, exp in enumerate(experiments): - beam = exp.beam detector = exp.detector scan = exp.scan diff --git a/src/dials/command_line/refine.py b/src/dials/command_line/refine.py index cbf48ab795..0249593d1a 100644 --- a/src/dials/command_line/refine.py +++ b/src/dials/command_line/refine.py @@ -14,7 +14,6 @@ dials.refine indexed.expt indexed.refl scan_varying=(False/True/Auto) """ - from __future__ import annotations import copy @@ -257,7 +256,6 @@ def run_macrocycle(params, reflections, experiments): def _find_disjoint_sets(experiments): - # Extract parameterisable models from the experiments models = [] for experiment in experiments: diff --git a/src/dials/command_line/refine_bravais_settings.py b/src/dials/command_line/refine_bravais_settings.py index 7f613da21e..0a21dee8e4 100644 --- a/src/dials/command_line/refine_bravais_settings.py +++ b/src/dials/command_line/refine_bravais_settings.py @@ -29,7 +29,6 @@ dials.refine_bravais_settings indexed.expt indexed.refl nproc=4 """ - from __future__ import annotations import collections diff --git a/src/dials/command_line/reflection_viewer.py b/src/dials/command_line/reflection_viewer.py index 91f445caf7..1efc23a359 100644 --- a/src/dials/command_line/reflection_viewer.py +++ b/src/dials/command_line/reflection_viewer.py @@ -30,7 +30,6 @@ def __init__(self): ) def run(self, args=None): - from dials.util.options import flatten_reflections from dials.viewer.viewer_interface import extract_n_show diff --git a/src/dials/command_line/reindex.py b/src/dials/command_line/reindex.py index d46fc22210..0f358de78c 100644 --- a/src/dials/command_line/reindex.py +++ b/src/dials/command_line/reindex.py @@ -103,7 +103,6 @@ @dials.util.show_mail_handle_errors() def run(args=None): - usage = "dials.reindex [options] indexed.expt indexed.refl" parser = ArgumentParser( @@ -209,7 +208,6 @@ def run(args=None): raise Sorry("No reflections remain after filtering the test dataset") elif params.reference.file: - wavelength = np.mean([expt.beam.get_wavelength() for expt in experiments]) reference_miller_set = intensities_from_reference_file( diff --git a/src/dials/command_line/report.py b/src/dials/command_line/report.py index 14da58744b..4395fb8a1a 100644 --- a/src/dials/command_line/report.py +++ b/src/dials/command_line/report.py @@ -7,9 +7,11 @@ import numpy as np +import libtbx.phil from cctbx import uctbx import dials.util.log +from dials.algorithms.merging.merge import MergingStatisticsData from dials.algorithms.scaling.model.model import plot_scaling_models from dials.algorithms.scaling.observers import make_merging_stats_plots from dials.algorithms.scaling.scaling_library import ( @@ -44,8 +46,6 @@ dials.report integrated.refl integrated.expt """ -import libtbx.phil - # Create the phil parameters phil_scope = libtbx.phil.parse( """ @@ -172,7 +172,6 @@ def plot_cell(self, experiments): # cell plot dat = [] for iexp, exp in enumerate(experiments): - crystal = exp.crystal scan = exp.scan @@ -329,7 +328,6 @@ def plot_orientation(self, experiments): # orientation plot dat = [] for iexp, exp in enumerate(experiments): - crystal = exp.crystal scan = exp.scan @@ -1132,7 +1130,6 @@ def centroid_xy_xz_zy_residuals(self, rlist, threshold): d["residuals_xy"]["layout"]["title"] = "Centroid residuals in X and Y" if not is_stills: - d["residuals_zy"] = { "data": [ { @@ -1212,7 +1209,6 @@ class IntensityAnalyser: """Analyse the intensities.""" def __init__(self, grid_size=None, pixels_per_bin=10): - self.grid_size = grid_size self.pixels_per_bin = pixels_per_bin @@ -2081,9 +2077,6 @@ def reflection_corr_vs_ios(self, rlist, filename): } -from dials.algorithms.merging.merge import MergingStatisticsData - - def merging_stats_data(reflections, experiments): reflections["intensity"] = reflections["intensity.scale.value"] reflections["variance"] = reflections["intensity.scale.variance"] @@ -2207,7 +2200,6 @@ def __call__(self, rlist=None, experiments=None): json_data["image_range_tables"] = image_range_tables if self.params.output.html is not None: - from jinja2 import ChoiceLoader, Environment, PackageLoader loader = ChoiceLoader( diff --git a/src/dials/command_line/search_beam_position.py b/src/dials/command_line/search_beam_position.py index 685afeb6ff..8cd5dcb491 100644 --- a/src/dials/command_line/search_beam_position.py +++ b/src/dials/command_line/search_beam_position.py @@ -241,9 +241,10 @@ def igrid(x): plt.savefig("search_scope.png") # changing value - trial_origin_offset = (idxs[idx_max % widegrid]) * beamr1 + ( - idxs[idx_max // widegrid] - ) * beamr2 + trial_origin_offset = ( + (idxs[idx_max % widegrid]) * beamr1 + + (idxs[idx_max // widegrid]) * beamr2 + ) return trial_origin_offset show_plot(widegrid=2 * grid + 1, excursi=scores) diff --git a/src/dials/command_line/sequence_to_stills.py b/src/dials/command_line/sequence_to_stills.py index bd41e8e792..3d643f4ff0 100644 --- a/src/dials/command_line/sequence_to_stills.py +++ b/src/dials/command_line/sequence_to_stills.py @@ -6,7 +6,6 @@ dials.sequence_to_stills sequence.expt sequence.refl """ - from __future__ import annotations import logging @@ -116,7 +115,6 @@ def sequence_to_stills(experiments, reflections, params): f"Converting experiment {expt_id} images {i_start} to {i_stop} to stills" ) for i_array in range(i_start, i_stop): - # Shift array position to scan-point index i_scan_point = i_array - i_start diff --git a/src/dials/command_line/show.py b/src/dials/command_line/show.py index 8883bb1638..98f91fbd29 100644 --- a/src/dials/command_line/show.py +++ b/src/dials/command_line/show.py @@ -89,7 +89,6 @@ def beam_centre_raw_image_px(detector, s0): def show_beam(detector, beam, experiment_type: ExperimentType | None = None): - # standard static beam model string s = str(beam) @@ -172,7 +171,6 @@ def show_beam(detector, beam, experiment_type: ExperimentType | None = None): def show_goniometer(goniometer): - # standard static goniometer model string s = str(goniometer) @@ -252,7 +250,6 @@ def run(args=None): def show_experiments(experiments, show_scan_varying=False): - text = [] for i_expt, expt in enumerate(experiments): text.append("Experiment %i:" % i_expt) @@ -316,7 +313,6 @@ def show_experiments(experiments, show_scan_varying=False): def show_image_statistics(experiments, im_type): - if im_type == "raw": raw = True elif im_type == "corrected": @@ -439,7 +435,6 @@ def show_reflections( max_reflections=None, show_identifiers=False, ): - text = [] from orderedset import OrderedSet @@ -659,7 +654,6 @@ def format_column(key, data, format_strings=None): column = flex.std_string() max_element_lengths = [c.max_element_length() for c in c_strings] for i in range(len(c_strings[0])): - column.append( f"%{len(key)}s" % ", ".join( diff --git a/src/dials/command_line/show_extensions.py b/src/dials/command_line/show_extensions.py index de9f7de514..47bf2d1376 100644 --- a/src/dials/command_line/show_extensions.py +++ b/src/dials/command_line/show_extensions.py @@ -51,7 +51,6 @@ def run(self, args=None): # Either just show information about interfaces or show some about # extensions depending on user input if params.interfaces: - # Print info about interface if options.verbose > 0: print(f" name = {iface.name}") @@ -64,7 +63,6 @@ def run(self, args=None): print(f" phil:\n{phil}") else: - # Loop through all the extensions for ext in iface.extensions(): print(f" Extension: {ext.__name__}") diff --git a/src/dials/command_line/simple_integrate.py b/src/dials/command_line/simple_integrate.py index 3609c95aa7..df96ba7058 100644 --- a/src/dials/command_line/simple_integrate.py +++ b/src/dials/command_line/simple_integrate.py @@ -56,7 +56,6 @@ def run(): - """ Input setup """ @@ -94,7 +93,6 @@ def run(): def run_simple_integrate(params, experiments, reflections): - experiment = experiments[0] # Remove bad reflections (e.g. those not indexed) diff --git a/src/dials/command_line/slice_sequence.py b/src/dials/command_line/slice_sequence.py index 2ae210eaff..ffa9075356 100644 --- a/src/dials/command_line/slice_sequence.py +++ b/src/dials/command_line/slice_sequence.py @@ -3,6 +3,7 @@ from os.path import basename, splitext from dxtbx.model.experiment_list import ExperimentList +from libtbx.phil import parse import dials.util from dials.algorithms.refinement.refinement_helpers import calculate_frame_numbers @@ -31,8 +32,6 @@ "image_range=1 20" "image_range=5 30" """ -from libtbx.phil import parse - phil_scope = parse( """ @@ -130,12 +129,10 @@ def concatenate_reflections(sliced_reflections, identifiers): def exclude_images_multiple(experiments, reflections, image_number): - sliced_experiments = [] sliced_reflections = [] for iexp, experiment in enumerate(experiments): - # Calculate the image range for each slice first_image, last_image = experiment.scan.get_image_range() first_exclude = ((first_image - 1) // image_number + 1) * image_number diff --git a/src/dials/command_line/split_experiments.py b/src/dials/command_line/split_experiments.py index 22768f9ed4..4737526e14 100644 --- a/src/dials/command_line/split_experiments.py +++ b/src/dials/command_line/split_experiments.py @@ -340,7 +340,6 @@ def save_chunk(chunk_id, expts, refls): save_chunk(chunk_counter, chunk_expts, chunk_refls) else: for i, experiment in enumerate(experiments): - experiment_filename = experiments_template(index=i) print("Saving experiment %d to %s" % (i, experiment_filename)) ExperimentList([experiment]).as_json(experiment_filename) diff --git a/src/dials/command_line/ssx_integrate.py b/src/dials/command_line/ssx_integrate.py index 46e514de82..b2a4253aff 100644 --- a/src/dials/command_line/ssx_integrate.py +++ b/src/dials/command_line/ssx_integrate.py @@ -297,7 +297,6 @@ def wrap_integrate_one(input_to_integrate: InputToIntegrate): def process_batch(sub_tables, sub_expts, configuration, batch_offset=0): - # create iterable input_iterable: List[InputToIntegrate] = [] from dxtbx.imageset import ImageSequence, ImageSet @@ -417,9 +416,11 @@ def run_integration(reflections, experiments, params): integrated_experiments, integrated_reflections = process_batch( sub_tables, sub_expts, configuration, batch_offset=b ) - yield integrated_experiments, integrated_reflections, configuration[ - "aggregator" - ] + yield ( + integrated_experiments, + integrated_reflections, + configuration["aggregator"], + ) @show_mail_handle_errors() diff --git a/src/dials/command_line/stills_process.py b/src/dials/command_line/stills_process.py index 3c9d46a8ef..41da40bae7 100644 --- a/src/dials/command_line/stills_process.py +++ b/src/dials/command_line/stills_process.py @@ -561,7 +561,6 @@ def run(self, args=None): log.config(verbosity=options.verbose, logfile=logfile) else: - # Configure logging log.config(verbosity=options.verbose, logfile="dials.process.log") @@ -1017,7 +1016,6 @@ def debug_write(self, string, state=None): debug_file_handle.close() def process_experiments(self, tag, experiments): - if not self.params.output.composite_output: self.setup_filenames(tag) self.tag = tag @@ -1378,7 +1376,6 @@ def refine(self, experiments, centroids): else: # Dump experiments to disk if self.params.output.refined_experiments_filename: - experiments.as_json(self.params.output.refined_experiments_filename) if self.params.output.indexed_filename: @@ -1543,7 +1540,6 @@ def integrate(self, experiments, indexed): else: # Dump experiments to disk if self.params.output.integrated_experiments_filename: - experiments.as_json(self.params.output.integrated_experiments_filename) if self.params.output.integrated_filename: @@ -1810,19 +1806,11 @@ def extend_with_bookkeeping( dest=destrank, ) - self.all_imported_experiments = ( - self.all_strong_reflections - ) = ( + self.all_imported_experiments = self.all_strong_reflections = ( self.all_indexed_experiments - ) = ( - self.all_indexed_reflections - ) = ( + ) = self.all_indexed_reflections = ( self.all_integrated_experiments - ) = ( - self.all_integrated_reflections - ) = ( - self.all_coset_experiments - ) = ( + ) = self.all_integrated_reflections = self.all_coset_experiments = ( self.all_coset_reflections ) = self.all_int_pickles = self.all_integrated_reflections = [] @@ -1831,7 +1819,6 @@ def extend_with_bookkeeping( len(self.all_imported_experiments) > 0 and self.params.output.experiments_filename ): - self.all_imported_experiments.as_json( self.params.output.experiments_filename ) @@ -1848,7 +1835,6 @@ def extend_with_bookkeeping( len(self.all_indexed_experiments) > 0 and self.params.output.refined_experiments_filename ): - self.all_indexed_experiments.as_json( self.params.output.refined_experiments_filename ) @@ -1865,7 +1851,6 @@ def extend_with_bookkeeping( len(self.all_integrated_experiments) > 0 and self.params.output.integrated_experiments_filename ): - self.all_integrated_experiments.as_json( self.params.output.integrated_experiments_filename ) @@ -1884,7 +1869,6 @@ def extend_with_bookkeeping( len(self.all_coset_experiments) > 0 and self.params.output.coset_experiments_filename ): - self.all_coset_experiments.as_json( self.params.output.coset_experiments_filename ) diff --git a/src/dials/extensions/dispersion_spotfinder_threshold_ext.py b/src/dials/extensions/dispersion_spotfinder_threshold_ext.py index dc980b181b..9bd3cc6e11 100644 --- a/src/dials/extensions/dispersion_spotfinder_threshold_ext.py +++ b/src/dials/extensions/dispersion_spotfinder_threshold_ext.py @@ -111,7 +111,6 @@ def compute_threshold(self, image, mask, **kwargs): def estimate_global_threshold(image, mask=None, plot=False): - from scitbx import matrix from scitbx.array_family import flex diff --git a/src/dials/model/data/__init__.py b/src/dials/model/data/__init__.py index 4891585d8f..0d21244ab1 100644 --- a/src/dials/model/data/__init__.py +++ b/src/dials/model/data/__init__.py @@ -4,7 +4,7 @@ ext = boost_adaptbx.boost.python.import_ext("dials_model_data_ext") -from dials_model_data_ext import * # noqa: F403; lgtm +from dials_model_data_ext import * # noqa: F403, E402 __all__ = ( # noqa: F405 "AdjacencyList", diff --git a/src/dials/nexus/__init__.py b/src/dials/nexus/__init__.py index d8fcd4084b..27a8ad57f8 100644 --- a/src/dials/nexus/__init__.py +++ b/src/dials/nexus/__init__.py @@ -3,4 +3,4 @@ from boost_adaptbx.boost import optional from dials.array_family import flex -from dials_nexus_ext import * +from dials_nexus_ext import * # noqa: F403 diff --git a/src/dials/pychef/__init__.py b/src/dials/pychef/__init__.py index dd8716aade..46b43a0af0 100644 --- a/src/dials/pychef/__init__.py +++ b/src/dials/pychef/__init__.py @@ -110,7 +110,6 @@ class Statistics: def __init__( self, intensities, dose, n_bins=8, range_min=None, range_max=None, range_width=1 ): - if isinstance(dose, flex.double): sorted_dose = flex.sorted(dose) dd = sorted_dose[1:] - sorted_dose[:-1] @@ -195,7 +194,6 @@ def __init__( self.rd = chef_stats.rd() def completeness_vs_dose_str(self): - anomalous = self.intensities.anomalous_flag() title = "Completeness vs. dose:" diff --git a/src/dials/pychef/damage_series.py b/src/dials/pychef/damage_series.py index de66670b2d..90437f99b3 100644 --- a/src/dials/pychef/damage_series.py +++ b/src/dials/pychef/damage_series.py @@ -227,7 +227,6 @@ def _write_mtz(sel_intensities, sel_doses, fname): def generate_damage_series_mtz(params, doses, intensities): - plots = DamageSeriesPlots(d_max=params.d_max, d_min=params.d_min) group_size = params.damage_series.dose_group_size assert group_size > 0.0 @@ -271,7 +270,6 @@ def generate_damage_series_mtz(params, doses, intensities): def generate_damage_series(params, experiments, reflection_table): - # first set up plotting stuff plots = DamageSeriesPlots(d_max=params.d_max, d_min=params.d_min) diff --git a/src/dials/report/analysis.py b/src/dials/report/analysis.py index 3dde1e53a0..d508fb98e9 100644 --- a/src/dials/report/analysis.py +++ b/src/dials/report/analysis.py @@ -365,9 +365,7 @@ def generate_stats(d, r, s, e=None): anomalous_statistics.overall.delta_i_mean_over_sig_delta_i_mean ] if selected_anomalous_statistics: - anom_probability_plot = ( - selected_anomalous_statistics.overall.anom_probability_plot_expected_delta - ) + anom_probability_plot = selected_anomalous_statistics.overall.anom_probability_plot_expected_delta if anom_probability_plot is not None: stats["Anomalous slope"] = [anom_probability_plot.slope] stats["dF/F"] = [selected_anomalous_statistics.overall.anom_signal] diff --git a/src/dials/report/html_report.py b/src/dials/report/html_report.py index ad3cf22656..76e27532f5 100644 --- a/src/dials/report/html_report.py +++ b/src/dials/report/html_report.py @@ -7,11 +7,9 @@ def __init__(self, external_dependencies="remote"): self.external_dependencies = external_dependencies def header(self): - assert self.external_dependencies in ("remote", "local", "embed") if self.external_dependencies == "remote": - plotly_js = ( '' ) @@ -150,7 +148,6 @@ def header(self): return html_header def css(self): - return """ body { /*font-family: Helmet, Freesans, Helvetica, Arial, sans-serif;*/ @@ -183,9 +180,7 @@ def body(self): document.body); -""" % ( - "\n".join(content.html() for content in self._content) - ) +""" % ("\n".join(content.html() for content in self._content)) return html_body @@ -216,9 +211,7 @@ def html(self):
%s
-""" % ( - "\n".join(p.html() for p in self._panels) - ) +""" % ("\n".join(p.html() for p in self._panels)) return html @@ -294,7 +287,6 @@ def javascript(self): return javascript def html(self): - return "\n".join( ( '
' @@ -316,9 +308,7 @@ def html(self):
%s
-""" % "\n".join( - content.html() for content in self._content - ) +""" % "\n".join(content.html() for content in self._content) return html @@ -334,9 +324,7 @@ def html(self):
%s
-""" % "\n".join( - content.html() for content in self._content - ) +""" % "\n".join(content.html() for content in self._content) return html diff --git a/src/dials/report/plots.py b/src/dials/report/plots.py index a40600cd0f..5418d19783 100644 --- a/src/dials/report/plots.py +++ b/src/dials/report/plots.py @@ -109,7 +109,6 @@ def scale_rmerge_vs_batch_plot(batch_manager, rmerge_vs_b, scales_vs_b=None): def i_over_sig_i_vs_batch_plot(batch_manager, i_sig_i_vs_batch): - reduced_batches = batch_manager.reduced_batches shapes, annotations, text = batch_manager.batch_plot_shapes_and_annotations() if len(annotations) > 30: @@ -229,7 +228,6 @@ def flush(self): def xtriage_output(xanalysis): - with StringIO() as xs: xout = _xtriage_output(xs) try: @@ -524,7 +522,6 @@ def l_test_plot(self): } def second_moments_plot(self): - acentric = self.merged_intensities.select_acentric() centric = self.merged_intensities.select_centric() if acentric.size(): @@ -654,7 +651,6 @@ def additional_stats_plot(self): d_star_sq_bins, nticks=5 ) if self.dataset_statistics.r_split: - d.update( { "r_split": { @@ -688,15 +684,19 @@ def cc_one_half_plot(self, method=None): if method == "sigma_tau": cc_one_half_bins = [ - bin_stats.cc_one_half_sigma_tau - if bin_stats.cc_one_half_sigma_tau - else 0.0 + ( + bin_stats.cc_one_half_sigma_tau + if bin_stats.cc_one_half_sigma_tau + else 0.0 + ) for bin_stats in self.dataset_statistics.bins ] cc_one_half_critical_value_bins = [ - bin_stats.cc_one_half_sigma_tau_critical_value - if bin_stats.cc_one_half_sigma_tau_critical_value - else 0.0 + ( + bin_stats.cc_one_half_sigma_tau_critical_value + if bin_stats.cc_one_half_sigma_tau_critical_value + else 0.0 + ) for bin_stats in self.dataset_statistics.bins ] else: @@ -705,9 +705,11 @@ def cc_one_half_plot(self, method=None): for bin_stats in self.dataset_statistics.bins ] cc_one_half_critical_value_bins = [ - bin_stats.cc_one_half_critical_value - if bin_stats.cc_one_half_critical_value - else 0.0 + ( + bin_stats.cc_one_half_critical_value + if bin_stats.cc_one_half_critical_value + else 0.0 + ) for bin_stats in self.dataset_statistics.bins ] cc_anom_bins = [ @@ -715,9 +717,11 @@ def cc_one_half_plot(self, method=None): for bin_stats in self.dataset_statistics.bins ] cc_anom_critical_value_bins = [ - bin_stats.cc_anom_critical_value - if bin_stats.cc_anom_critical_value - else 0.0 + ( + bin_stats.cc_anom_critical_value + if bin_stats.cc_anom_critical_value + else 0.0 + ) for bin_stats in self.dataset_statistics.bins ] @@ -727,9 +731,9 @@ def cc_one_half_plot(self, method=None): cc_half=cc_one_half_bins, cc_anom=cc_anom_bins if not self.is_centric else None, cc_half_critical_values=cc_one_half_critical_value_bins, - cc_anom_critical_values=cc_anom_critical_value_bins - if not self.is_centric - else None, + cc_anom_critical_values=( + cc_anom_critical_value_bins if not self.is_centric else None + ), cc_half_fit=None, d_min=None, ) @@ -876,7 +880,6 @@ def multiplicity_vs_resolution_plot(self): } def merging_statistics_table(self, cc_half_method=None): - headers = [ "Resolution (Å)", "N(obs)", @@ -951,7 +954,6 @@ def safe_format(format_str, item): return merging_stats_table def overall_statistics_table(self, cc_half_method=None): - headers = ["", "Overall", "Low resolution", "High resolution"] stats = ( @@ -1010,12 +1012,12 @@ def overall_statistics_summary_data(self): if self.anomalous_dataset_statistics: o_anom = self.anomalous_dataset_statistics.overall h_anom = self.anomalous_dataset_statistics.bins[-1] - data[ - "Anomalous completeness (%)" - ] = f"{(o_anom.anom_completeness * 100):.2f} ({(h_anom.anom_completeness * 100):.2f})" - data[ - "Anomalous multiplicity" - ] = f"{o_anom.mean_redundancy:.2f} ({h_anom.mean_redundancy:.2f})" + data["Anomalous completeness (%)"] = ( + f"{(o_anom.anom_completeness * 100):.2f} ({(h_anom.anom_completeness * 100):.2f})" + ) + data["Anomalous multiplicity"] = ( + f"{o_anom.mean_redundancy:.2f} ({h_anom.mean_redundancy:.2f})" + ) else: data["Anomalous completeness (%)"] = "-" data["Anomalous multiplicity"] = "-" @@ -1062,7 +1064,6 @@ def make_plots(self): return d def del_anom_correlation_ratio(self, unmerged_intensities): - acentric = unmerged_intensities.select_acentric() centric = unmerged_intensities.select_centric() correl_ratios_acentric, correl_ratios_centric = ([], []) diff --git a/src/dials/util/ascii_art.py b/src/dials/util/ascii_art.py index 10dc5f6bb8..f16f151000 100644 --- a/src/dials/util/ascii_art.py +++ b/src/dials/util/ascii_art.py @@ -53,9 +53,9 @@ def flex_histogram(z, char="*", width=60, height=10): max_count = flex.max(counts) total_counts = flex.sum(counts) - assert total_counts == len( - z - ), "Only found %d out of %d reflections for histogram" % (total_counts, len(z)) + assert total_counts == len(z), ( + "Only found %d out of %d reflections for histogram" % (total_counts, len(z)) + ) counts *= height / max_count counts = counts.iround() diff --git a/src/dials/util/batch_handling.py b/src/dials/util/batch_handling.py index 017dfe51c0..fab364d9f6 100644 --- a/src/dials/util/batch_handling.py +++ b/src/dials/util/batch_handling.py @@ -119,9 +119,11 @@ def get_image_ranges(experiments): # Note, if set to 1,1,for scanless experiments then first batch offset in # _calculate_batch_offsets is zero below, bad! return [ - e.scan.get_image_range() - if (e.scan and e.scan.get_oscillation()[1] != 0.0) - else (0, 0) + ( + e.scan.get_image_range() + if (e.scan and e.scan.get_oscillation()[1] != 0.0) + else (0, 0) + ) for e in experiments ] diff --git a/src/dials/util/boost_python/streambuf_test_ext.cpp b/src/dials/util/boost_python/streambuf_test_ext.cpp index 0583322c87..a35561e51c 100644 --- a/src/dials/util/boost_python/streambuf_test_ext.cpp +++ b/src/dials/util/boost_python/streambuf_test_ext.cpp @@ -114,7 +114,7 @@ namespace dials { namespace util { namespace { def("write_and_seek", write_and_seek_ostream); } -}}} // namespace dials::util:: +}}} // namespace dials::util BOOST_PYTHON_MODULE(dials_util_streambuf_test_ext) { dials::util::wrap_all(); diff --git a/src/dials/util/cluster_map.py b/src/dials/util/cluster_map.py index 950311d703..aab03c4620 100644 --- a/src/dials/util/cluster_map.py +++ b/src/dials/util/cluster_map.py @@ -64,7 +64,6 @@ def cluster_map(func, iterable, callback=None, nslots=1, njobs=1, job_category=" # Start the drmaa session with drmaa.Session() as s: - # Create the job template jt = s.createJobTemplate() jt.remoteCommand = "cluster.dials.exec" @@ -90,7 +89,6 @@ def cluster_map(func, iterable, callback=None, nslots=1, njobs=1, job_category=" N = len(list(iterable)) try: - # Submit the array job joblist = s.runBulkJobs(jt, 1, N, 1) @@ -115,7 +113,6 @@ def cluster_map(func, iterable, callback=None, nslots=1, njobs=1, job_category=" s.deleteJobTemplate(jt) except KeyboardInterrupt: - # Delete the jobs s.control( drmaa.Session.JOB_IDS_SESSION_ALL, drmaa.JobControlAction.TERMINATE diff --git a/src/dials/util/combine_experiments.py b/src/dials/util/combine_experiments.py index 8d37fab954..b81fe68df3 100644 --- a/src/dials/util/combine_experiments.py +++ b/src/dials/util/combine_experiments.py @@ -57,7 +57,6 @@ def __init__( detector=None, params=None, ): - self.ref_beam = beam self.ref_goniometer = goniometer self.ref_scan = scan diff --git a/src/dials/util/command_line.py b/src/dials/util/command_line.py index e1e108fb17..cf89bb3180 100644 --- a/src/dials/util/command_line.py +++ b/src/dials/util/command_line.py @@ -33,7 +33,6 @@ def update(self, percent): if percent > 100: percent = 100 if diff_time >= self._update_period or percent >= 100: - # Check the difference in percentage and calculate # number of seconds remaining diff_perc = percent - self._last_perc @@ -152,7 +151,6 @@ def finished(self, string=None): string = string + time_string else: - # Truncate the string max_length = self._length - self._indent string = string[:max_length] @@ -207,7 +205,6 @@ def end(cls, string): # Check if we want to print the time or not if cls.print_time: - # Get the time string time_string = f"{time.time() - cls._start_time:.2f}s" @@ -222,7 +219,6 @@ def end(cls, string): string = string + time_string else: - # Truncate the string max_length = cls.max_length - cls.indent string = string[:max_length] diff --git a/src/dials/util/export_mtz.py b/src/dials/util/export_mtz.py index f94f96f61c..be6beec300 100644 --- a/src/dials/util/export_mtz.py +++ b/src/dials/util/export_mtz.py @@ -231,7 +231,6 @@ def add_batch_list( and experiment.crystal.num_scan_points > 0 and experiment.goniometer ): - # Get the index of the image in the sequence e.g. first => 0, second => 1 image_index = i + i0 - experiment.scan.get_image_range()[0] @@ -667,7 +666,6 @@ def export_mtz( logger.debug("Keeping existing batches") image_ranges = get_image_ranges(experiment_list) if len(unique_offsets) != len(batch_offsets): - raise ValueError( "Duplicate batch offsets detected: %s" % ", ".join( @@ -932,7 +930,6 @@ def convert_to_cambridge(experiments): def rotate_crystal(crystal, Rmat, axis, angle): - Amats = [] if crystal.num_scan_points > 0: scan_pts = list(range(crystal.num_scan_points)) diff --git a/src/dials/util/export_pets.py b/src/dials/util/export_pets.py index ab40e4a633..a5c82c7596 100644 --- a/src/dials/util/export_pets.py +++ b/src/dials/util/export_pets.py @@ -20,7 +20,6 @@ def rotate_crystal(crystal, Rmat, axis, angle): - Amats = [] if crystal.num_scan_points > 0: scan_pts = list(range(crystal.num_scan_points)) @@ -313,7 +312,6 @@ def _set_virtual_frames(self): ) def write_dyn_cif_pets(self): - self._set_virtual_frames() cif_filename = self.filename_prefix + ".cif_pets" diff --git a/src/dials/util/export_sadabs.py b/src/dials/util/export_sadabs.py index 899bbee158..f3fbb86b95 100644 --- a/src/dials/util/export_sadabs.py +++ b/src/dials/util/export_sadabs.py @@ -161,9 +161,7 @@ def export_sadabs(integrated_data, experiment_list, params): static = False with open(params.sadabs.hklout, "w") as fout: - for j in range(nref): - h, k, l = miller_index[j] if params.sadabs.predict: diff --git a/src/dials/util/export_shelx.py b/src/dials/util/export_shelx.py index 5748c606cf..a7611f980c 100644 --- a/src/dials/util/export_shelx.py +++ b/src/dials/util/export_shelx.py @@ -112,7 +112,7 @@ def _write_ins(experiment_list, best_unit_cell, composition, ins_file): # Check for single wavelength for exp in experiment_list: wl = exp.beam.get_wavelength() - if not any([isclose(wl, w, abs_tol=1e-4) for w in wavelengths]): + if not any(isclose(wl, w, abs_tol=1e-4) for w in wavelengths): wavelengths.append(wl) if len(wavelengths) > 1: raise ValueError("Experiments have more than one wavelength") diff --git a/src/dials/util/filter_reflections.py b/src/dials/util/filter_reflections.py index 87a24a06d5..cffd43bd59 100644 --- a/src/dials/util/filter_reflections.py +++ b/src/dials/util/filter_reflections.py @@ -279,7 +279,6 @@ def checkdataremains(func): """Decorate a filtering method, to raise a ValueError if all data filtered.""" def wrapper(*args, **kwargs): - reflections = func(*args, **kwargs) if not reflections: diff --git a/src/dials/util/image_grouping.py b/src/dials/util/image_grouping.py index 1c53eb838d..1833a4d763 100644 --- a/src/dials/util/image_grouping.py +++ b/src/dials/util/image_grouping.py @@ -81,6 +81,7 @@ EPS = 1e-9 + ## Define classes for defining the metadata type and values/location. # class to wrap some metadata @dataclass @@ -389,7 +390,7 @@ def groupings(self) -> Dict[str, ParsedGrouping]: return self._groupings def _parse_metadata(self, metadata: dict): - for (name, metadict) in metadata.items(): + for name, metadict in metadata.items(): # name is e.g. timepoint, metadict is image : file self.metadata_items[name] = ImgToMetadataDict() for image, meta in metadict.items(): @@ -399,9 +400,9 @@ def _parse_metadata(self, metadata: dict): raise ValueError( f"Image {image} not listed in 'images:' in input yaml" ) - if type(meta) is float or type(meta) is int: + if isinstance(meta, float) or isinstance(meta, int): self.metadata_items[name][imgfile] = ConstantMetadataForFile(meta) - elif type(meta) is str: + elif isinstance(meta, str): if meta.startswith("repeat="): try: n = int(meta.split("=")[1]) @@ -483,9 +484,7 @@ def _parse_grouping_structure(self, structure): self._groupings[groupby].add_metadata_for_image( imagefile, metaforname ) - self._groupings[groupby].add_tolerances( - {n: t for n, t in zip(values, tolerances)} - ) + self._groupings[groupby].add_tolerances(dict(zip(values, tolerances))) self._groupings[groupby].check_consistent() @@ -676,7 +675,6 @@ def save_subset(input_: SplittingIterable) -> Optional[Tuple[str, FilePair]]: class GroupingImageTemplates(object): - """Class that takes a parsed group and determines the groupings and mappings required to split input data into groups. @@ -698,7 +696,6 @@ def _files_to_groups( metadata: Dict[ImageFile, Dict[str, MetadataForFile]], groups: List[_MetaDataGroup], ) -> dict[ImageFile, _GroupInfo]: - # Ok now we have the groupings of the metadata. Now find which groups each # file contains. # Purpose here is to create an object that will allow easy allocation from @@ -813,9 +810,9 @@ def _get_expt_file_to_groupsdata(self, data_file_pairs: List[FilePair]): def write_groupids_into_files(self, data_file_pairs: List[FilePair]) -> None: "Write a group_id column into the reflection table" - expt_file_to_groupsdata: Dict[ - Path, GroupsForExpt - ] = self._get_expt_file_to_groupsdata(data_file_pairs) + expt_file_to_groupsdata: Dict[Path, GroupsForExpt] = ( + self._get_expt_file_to_groupsdata(data_file_pairs) + ) def set_group_id_column( filepair: FilePair, @@ -856,10 +853,9 @@ def split_files_to_groups( params: Any = None, prefix: str = "", ): - - expt_file_to_groupsdata: Dict[ - Path, GroupsForExpt - ] = self._get_expt_file_to_groupsdata(data_file_pairs) + expt_file_to_groupsdata: Dict[Path, GroupsForExpt] = ( + self._get_expt_file_to_groupsdata(data_file_pairs) + ) template = "{name}group_{index:0{maxindexlength:d}d}" name_template = functools.partial( template.format, @@ -900,7 +896,6 @@ def split_files_to_groups( class GroupingImageFiles(GroupingImageTemplates): - """This class provides specific implementations for when the images are h5 files. The main difference from templates is getting the image index. """ @@ -910,7 +905,6 @@ def _files_to_groups( metadata: Dict[ImageFile, Dict[str, MetadataForFile]], groups: List[_MetaDataGroup], ) -> dict[ImageFile, _GroupInfo]: - # Ok now we have the groupings of the metadata. Now find which groups each # file contains. # Purpose here is to create an object that will allow easy allocation from diff --git a/src/dials/util/image_viewer/mask_frame.py b/src/dials/util/image_viewer/mask_frame.py index 2be91a16ec..f058e5f558 100644 --- a/src/dials/util/image_viewer/mask_frame.py +++ b/src/dials/util/image_viewer/mask_frame.py @@ -16,7 +16,6 @@ class FloatCtrl(_FloatCtrl): - # override OnFocusLostMethod since calling event.Skip() causes bad things to # happen (for reasons I don't understand) def OnFocusLost(self, event): @@ -571,7 +570,6 @@ def OnUpdate(self, event): ) if self._resolution_range_d_min > 0 and self._resolution_range_d_max > 0: - self.params.masking.resolution_range.append( (self._resolution_range_d_min, self._resolution_range_d_max) ) @@ -798,7 +796,6 @@ def DrawCircle(self, xc, yc, xedge, yedge): ) def DrawPolygon(self, vertices): - if self._mode_polygon_layer: self._pyslip.DeleteLayer(self._mode_polygon_layer) self._mode_polygon_layer = None @@ -916,7 +913,6 @@ def AddUntrustedRectangle(self, x0, y0, x1, y1): self.params.masking.untrusted.append(region) def AddUntrustedCircle(self, xc, yc, xedge, yedge): - points = [(xc, yc), (xedge, yedge)] points = [self._pyslip.ConvertView2Geo(p) for p in points] diff --git a/src/dials/util/image_viewer/rstbx_frame.py b/src/dials/util/image_viewer/rstbx_frame.py index 19c1fc4f76..a96806c7ed 100644 --- a/src/dials/util/image_viewer/rstbx_frame.py +++ b/src/dials/util/image_viewer/rstbx_frame.py @@ -172,7 +172,7 @@ def load_image(self, file_name_or_data): """ key = self.get_key(file_name_or_data) - if type(file_name_or_data) is dict: + if isinstance(file_name_or_data, dict): self._img = rstbx.viewer.image(file_name_or_data) else: try: @@ -230,7 +230,7 @@ def add_file_name_or_data(self, file_name_or_data): if self.image_chooser.GetCount() >= self.CHOOSER_SIZE: self.image_chooser.Delete(0) i = self.image_chooser.GetCount() - if type(file_name_or_data) is dict: + if isinstance(file_name_or_data, dict): self.image_chooser.Insert(key, i, None) else: self.image_chooser.Insert(os.path.basename(key), i, key) diff --git a/src/dials/util/image_viewer/slip_viewer/calibration_frame.py b/src/dials/util/image_viewer/slip_viewer/calibration_frame.py index d8d34dc0db..039ed0c3fc 100644 --- a/src/dials/util/image_viewer/slip_viewer/calibration_frame.py +++ b/src/dials/util/image_viewer/slip_viewer/calibration_frame.py @@ -48,7 +48,7 @@ def __init__(self, *args, **kwds): name_quadrant = ["Q0", "Q1", "Q2", "Q3"][serial] box = wx.BoxSizer(wx.HORIZONTAL) - for (name_direction, value) in [("fast", fast), ("slow", slow)]: + for name_direction, value in [("fast", fast), ("slow", slow)]: name_ctrl = name_quadrant + "_" + name_direction + "_ctrl" spinner = FloatSpin( diff --git a/src/dials/util/image_viewer/slip_viewer/flex_image.py b/src/dials/util/image_viewer/slip_viewer/flex_image.py index cdaaef2549..b224f1a125 100644 --- a/src/dials/util/image_viewer/slip_viewer/flex_image.py +++ b/src/dials/util/image_viewer/slip_viewer/flex_image.py @@ -117,7 +117,6 @@ def get_flex_image_multipanel( # be assigned to the panel defined first. XXX Use a Z-buffer # instead? for i, panel in enumerate(detector): - # Determine the pixel size for the panel (in meters), as pixel # sizes need not be identical. data = image_data[i] diff --git a/src/dials/util/image_viewer/slip_viewer/frame.py b/src/dials/util/image_viewer/slip_viewer/frame.py index 6a2cbefbf9..d905eff064 100644 --- a/src/dials/util/image_viewer/slip_viewer/frame.py +++ b/src/dials/util/image_viewer/slip_viewer/frame.py @@ -339,7 +339,7 @@ def add_file_name_or_data(self, file_name_or_data): if self.image_chooser.GetCount() >= self.CHOOSER_SIZE: self.image_chooser.Delete(0) i = self.image_chooser.GetCount() - if type(file_name_or_data) is dict: + if isinstance(file_name_or_data, dict): self.image_chooser.Insert(key, i, None) elif isinstance(file_name_or_data, chooser_wrapper): self.image_chooser.Insert(key, i, file_name_or_data) diff --git a/src/dials/util/image_viewer/slip_viewer/pyslip.py b/src/dials/util/image_viewer/slip_viewer/pyslip.py index ed73de77f7..531ab0f3b0 100644 --- a/src/dials/util/image_viewer/slip_viewer/pyslip.py +++ b/src/dials/util/image_viewer/slip_viewer/pyslip.py @@ -93,7 +93,7 @@ def point_inside_polygon(x, y, poly): (p1x, p1y) = new_poly[0] - for (p2x, p2y) in new_poly: + for p2x, p2y in new_poly: if y > min(p1y, p2y): if y <= max(p1y, p2y): if x <= max(p1x, p2x): @@ -1107,7 +1107,6 @@ def DrawLightweightEllipticalSpline(self, dc, data, map_rel): y_off, pdata, ) in data: - # Gather ellipse center, major and minor axes in view # coordinates. (ellipse_center, semimajor_axis, semiminor_axis) = [ @@ -1639,7 +1638,7 @@ def LightweightDrawPointLayer2(self, dc, data, map_rel): rectangles = [] if radius: diameter = 2 * radius - for (lon, lat, place, radius, colour, x_off, y_off, pdata) in data: + for lon, lat, place, radius, colour, x_off, y_off, pdata in data: pt = self.ConvertGeo2ViewMasked((lon, lat)) if pt: (x, y) = pt @@ -1681,7 +1680,7 @@ def LightweightDrawPointLayer(self, dc, data, map_rel): # dc = wx.GCDC(dc) # allow transparent colours dc.SetPen(wx.Pen(colour)) dc.SetBrush(wx.Brush(colour)) - for (lon, lat, place, radius, colour, x_off, y_off, pdata) in data: + for lon, lat, place, radius, colour, x_off, y_off, pdata in data: pt = self.ConvertGeo2ViewMasked((lon, lat)) if pt: (x, y) = pt @@ -1700,7 +1699,7 @@ def DrawPointLayer(self, dc, data, map_rel): # draw points on map/view if map_rel: dc = wx.GCDC(dc) # allow transparent colours - for (lon, lat, place, radius, colour, x_off, y_off, pdata) in data: + for lon, lat, place, radius, colour, x_off, y_off, pdata in data: pt = self.ConvertGeo2ViewMasked((lon, lat)) if pt: dc.SetPen(wx.Pen(colour)) @@ -1710,12 +1709,12 @@ def DrawPointLayer(self, dc, data, map_rel): dc.DrawCircle(int(x + x_off), int(y + y_off), radius) else: (dc_w, dc_h) = dc.GetSize() - dc_w2 = dc_w / 2 # noqa; lgtm; self-modifying code - dc_h2 = dc_h / 2 # noqa; lgtm; self-modifying code + dc_w2 = dc_w / 2 # noqa + dc_h2 = dc_h / 2 # noqa dc_h -= 1 dc_w -= 1 dc = wx.GCDC(dc) # allow transparent colours - for (x, y, place, radius, colour, x_off, y_off, pdata) in data: + for x, y, place, radius, colour, x_off, y_off, pdata in data: dc.SetPen(wx.Pen(colour)) dc.SetBrush(wx.Brush(colour)) exec(self.point_view_placement[place]) @@ -1778,8 +1777,8 @@ def DrawPolygonLayer(self, dc, data, map_rel): lines.append([int(x) for x in p_lonlat]) else: (dc_w, dc_h) = dc.GetSize() - dc_w2 = dc_w / 2 # noqa; lgtm; self-modifying code - dc_h2 = dc_h / 2 # noqa; lgtm; self-modifying code + dc_w2 = dc_w / 2 # noqa + dc_h2 = dc_h / 2 # noqa dc_w -= 1 dc_h -= 1 dc = wx.GCDC(dc) # allow transparent colours @@ -1798,7 +1797,7 @@ def DrawPolygonLayer(self, dc, data, map_rel): # fetch the exec code, don't refetch for each point in polygon place_exec = self.poly_view_placement[place] pp = [] - for (x, y) in p: + for x, y in p: exec(place_exec) if closed: pp.append((x, y)) @@ -1831,9 +1830,9 @@ def DrawImageLayer(self, dc, images, map_rel): # draw images on map/view if map_rel: - for (lon, lat, bmap, w, h, place, x_off, y_off, idata) in images: - w2 = w / 2 # noqa; lgtm; self-modifying code - h2 = h / 2 # noqa; lgtm; self-modifying code + for lon, lat, bmap, w, h, place, x_off, y_off, idata in images: + w2 = w / 2 # noqa + h2 = h / 2 # noqa pt = self.ConvertGeo2ViewMasked((lon, lat)) if pt: (x, y) = pt @@ -1841,11 +1840,11 @@ def DrawImageLayer(self, dc, images, map_rel): dc.DrawBitmap(bmap, x, y, False) else: (dc_w, dc_h) = dc.GetSize() - dc_w2 = dc_w / 2 # noqa; lgtm; self-modifying code - dc_h2 = dc_h / 2 # noqa; lgtm; self-modifying code - for (x, y, bmap, w, h, place, x_off, y_off, idata) in images: - w2 = w / 2 # noqa; lgtm; self-modifying code - h2 = h / 2 # noqa; lgtm; self-modifying code + dc_w2 = dc_w / 2 # noqa + dc_h2 = dc_h / 2 # noqa + for x, y, bmap, w, h, place, x_off, y_off, idata in images: + w2 = w / 2 # noqa + h2 = h / 2 # noqa exec(self.image_view_placement[place]) dc.DrawBitmap(bmap, x, y, False) @@ -1901,16 +1900,16 @@ def DrawTextLayer(self, dc, text, map_rel): # place the text relative to hotpoint (w, h, _, _) = dc.GetFullTextExtent(tdata) - w2 = w / 2 # noqa; lgtm; self-modifying code - h2 = h / 2 # noqa; lgtm; self-modifying code + w2 = w / 2 # noqa + h2 = h / 2 # noqa exec(self.text_map_placement[place]) dc.SetTextForeground(textcolour) dc.DrawText(tdata, int(x), int(y)) else: # we need the size of the DC (dc_w, dc_h) = dc.GetSize() - dc_w2 = dc_w / 2 # noqa; lgtm; self-modifying code - dc_h2 = dc_h / 2 # noqa; lgtm; self-modifying code + dc_w2 = dc_w / 2 # noqa + dc_h2 = dc_h / 2 # noqa dc_w -= 1 dc_h -= 1 dc = wx.GCDC(dc) # allow transparent colours @@ -1950,8 +1949,8 @@ def DrawTextLayer(self, dc, text, map_rel): # place the text relative to hotpoint (w, h, _, _) = dc.GetFullTextExtent(tdata) # size of text - w2 = w / 2 # noqa; lgtm; self-modifying code - h2 = h / 2 # noqa; lgtm; self-modifying code + w2 = w / 2 # noqa + h2 = h / 2 # noqa exec(self.text_view_placement[place]) dc.SetTextForeground(textcolour) dc.DrawText(tdata, int(x), int(y)) @@ -2582,8 +2581,8 @@ def GetNearestPointInLayer(self, layer, pt): for p in layer.data: dc = wx.BufferedPaintDC(self, self.buffer) (dc_w, dc_h) = dc.GetSize() - dc_w2 = dc_w / 2 # noqa; lgtm; self-modifying code - dc_h2 = dc_h / 2 # noqa; lgtm; self-modifying code + dc_w2 = dc_w / 2 # noqa + dc_h2 = dc_h / 2 # noqa dc_h -= 1 dc_w -= 1 (x, y, place, _, _, x_off, y_off, pdata) = p @@ -2633,8 +2632,8 @@ def GetBoxSelPointsInLayer(self, layer, p1, p2): for p in layer.data: dc = wx.BufferedPaintDC(self, self.buffer) (dc_w, dc_h) = dc.GetSize() - dc_w2 = dc_w / 2 # noqa; lgtm; self-modifying code - dc_h2 = dc_h / 2 # noqa; lgtm; self-modifying code + dc_w2 = dc_w / 2 # noqa + dc_h2 = dc_h / 2 # noqa dc_h -= 1 dc_w -= 1 (x, y, place, _, _, x_off, y_off, pdata) = p diff --git a/src/dials/util/image_viewer/slip_viewer/ring_frame.py b/src/dials/util/image_viewer/slip_viewer/ring_frame.py index b2b83d519a..92c36e77a0 100644 --- a/src/dials/util/image_viewer/slip_viewer/ring_frame.py +++ b/src/dials/util/image_viewer/slip_viewer/ring_frame.py @@ -282,12 +282,11 @@ def _draw_ring_layer(self, dc, data, map_rel): pass dc.SetPen(wx.Pen(colour)) dc.SetBrush(wx.Brush(colour, wx.TRANSPARENT)) - for (lon, lat, place, radius, colour, x_off, y_off, pdata) in data: + for lon, lat, place, radius, colour, x_off, y_off, pdata in data: (x, y) = self._pyslip.ConvertGeo2View((lon, lat)) dc.DrawCircle(int(x), int(y), int(radius * scale)) def DrawRing(self): - xrayframe = self.GetParent().GetParent() panel_id, beam_pixel_fast, beam_pixel_slow = xrayframe.get_beam_center_px() diff --git a/src/dials/util/image_viewer/slip_viewer/score_frame.py b/src/dials/util/image_viewer/slip_viewer/score_frame.py index 136bc27f60..9984ca37b8 100644 --- a/src/dials/util/image_viewer/slip_viewer/score_frame.py +++ b/src/dials/util/image_viewer/slip_viewer/score_frame.py @@ -141,7 +141,7 @@ def OnSave(self, event): path = dialog.GetPath() if path != "": stream = open(path, "w") - for (key, score) in _scores.items(): + for key, score in _scores.items(): if score is None: print(f"{key} None", file=stream) else: diff --git a/src/dials/util/image_viewer/slip_viewer/tile_generation.py b/src/dials/util/image_viewer/slip_viewer/tile_generation.py index a7c3a3bd98..f101b18fc7 100644 --- a/src/dials/util/image_viewer/slip_viewer/tile_generation.py +++ b/src/dials/util/image_viewer/slip_viewer/tile_generation.py @@ -319,7 +319,6 @@ def vec_picture_fast_slow_to_map_relative(self, vector): return value def get_spotfinder_data(self, params): - pointdata = [] test_pattern = False if ( diff --git a/src/dials/util/image_viewer/slip_viewer/uc_frame.py b/src/dials/util/image_viewer/slip_viewer/uc_frame.py index c6aabc99cb..cebd6f7447 100644 --- a/src/dials/util/image_viewer/slip_viewer/uc_frame.py +++ b/src/dials/util/image_viewer/slip_viewer/uc_frame.py @@ -362,7 +362,7 @@ def _draw_rings_layer(self, dc, data, map_rel): pass dc.SetPen(wx.Pen(colour)) dc.SetBrush(wx.Brush(colour, wx.TRANSPARENT)) - for (lon, lat, place, radius, colour, x_off, y_off, pdata) in data: + for lon, lat, place, radius, colour, x_off, y_off, pdata in data: (x, y) = self._pyslip.ConvertGeo2View((lon, lat)) dc.DrawCircle(int(x), int(y), int(radius * scale)) diff --git a/src/dials/util/image_viewer/spotfinder_frame.py b/src/dials/util/image_viewer/spotfinder_frame.py index 56e7304e15..a68b0dd409 100644 --- a/src/dials/util/image_viewer/spotfinder_frame.py +++ b/src/dials/util/image_viewer/spotfinder_frame.py @@ -90,7 +90,6 @@ class RadialProfileThresholdDebug: # DispersionThresholdDebug object for those, while overriding the final_mask # method. This wrapper class handles that. def __init__(self, imageset, n_iqr, blur, n_bins): - self.imageset = imageset params = find_spots_phil_scope.extract() params.spotfinder.threshold.radial_profile.blur = blur @@ -120,7 +119,6 @@ def calculate_isoresolution_lines( n_rays=720, binning=1, ): - # Calculate 2θ angles wavelength = beam.get_wavelength() twotheta = uctbx.d_star_sq_as_two_theta(uctbx.d_as_d_star_sq(spacings), wavelength) @@ -133,7 +131,6 @@ def calculate_isoresolution_lines( ring_data = [] resolution_text_data = [] for tt, d in zip(twotheta, spacings): - # Generate rays at 2θ cone_base_centre = beamvec * math.cos(tt) cone_base_radius = (beamvec * math.sin(tt)).length() @@ -586,7 +583,6 @@ def boxSelect(self, event): return True def drawUntrustedPolygons(self): - # remove any previous selection if self.sel_image_polygon_layer: self.pyslip.DeleteLayer(self.sel_image_polygon_layer) @@ -613,7 +609,6 @@ def drawUntrustedPolygons(self): circle = region.circle if polygon is not None: - assert len(polygon) % 2 == 0, "Polygon must contain 2D coords" vertices = [] for i in range(int(len(polygon) / 2)): @@ -823,7 +818,7 @@ def _draw_resolution_polygons( "placement": "cc", "colour": "red", } - for (txt_x, txt_y, txt_str) in res_labels: + for txt_x, txt_y, txt_str in res_labels: x, y = self.pyslip.tiles.picture_fast_slow_to_map_relative(txt_x, txt_y) resolution_text_data.append((x, y, txt_str, metadata)) @@ -1201,7 +1196,6 @@ def get_image_data(self, image): return image_data def _calculate_dispersion_debug(self, image): - # hash current settings dispersion_debug_list_hash = hash( ( @@ -1674,7 +1668,6 @@ def _rotation_axis_overlay_data(self): return result def _reflection_overlay_data(self, i_frame): - fg_code = MaskCode.Valid | MaskCode.Foreground strong_code = MaskCode.Valid | MaskCode.Strong shoebox_dict = {"width": 2, "color": "#0000FFA0", "closed": False} @@ -1749,7 +1742,7 @@ def _reflection_overlay_data(self, i_frame): ): shoebox = reflection["shoebox"] iz = i_frame - z0 if not self.viewing_stills else 0 - if not reflection["id"] in all_pix_data: + if reflection["id"] not in all_pix_data: all_pix_data[reflection["id"]] = [] all_foreground_circles[reflection["id"]] = [] @@ -1962,7 +1955,6 @@ def _reflection_overlay_data(self, i_frame): } def get_spotfinder_data(self): - self.prediction_colours = [ "#e41a1c", "#377eb8", @@ -2703,7 +2695,6 @@ def OnUpdateThresholdParameters(self, event): self.OnUpdateImage(event) def OnDispersionThresholdDebug(self, event): - button = event.GetEventObject() selected = button.GetLabelText() diff --git a/src/dials/util/image_viewer/viewer_tools.py b/src/dials/util/image_viewer/viewer_tools.py index b2bd43b29c..d032dee94b 100644 --- a/src/dials/util/image_viewer/viewer_tools.py +++ b/src/dials/util/image_viewer/viewer_tools.py @@ -2,7 +2,6 @@ Various tools/controls used by the image viewer """ - from __future__ import annotations import wx diff --git a/src/dials/util/installer.py b/src/dials/util/installer.py index 90d53c9902..c2be91e4b6 100644 --- a/src/dials/util/installer.py +++ b/src/dials/util/installer.py @@ -5,7 +5,6 @@ must be moved to the proper location to work. """ - from __future__ import annotations import os.path diff --git a/src/dials/util/log.py b/src/dials/util/log.py index 67b2c5fbd3..a3c3c9097c 100644 --- a/src/dials/util/log.py +++ b/src/dials/util/log.py @@ -11,6 +11,7 @@ except ImportError: ColoredFormatter = None + # https://stackoverflow.com/questions/25194864/python-logging-time-since-start-of-program/25196134#25196134 class DialsLogfileFormatter: """A formatter for log files that prepends messages with the elapsed time diff --git a/src/dials/util/masking.h b/src/dials/util/masking.h index 5a1ab27cab..012adfe125 100644 --- a/src/dials/util/masking.h +++ b/src/dials/util/masking.h @@ -36,7 +36,7 @@ namespace dials { namespace util { */ ResolutionMaskGenerator(const BeamBase &beam, const Panel &panel) : resolution_( - af::c_grid<2>(panel.get_image_size()[1], panel.get_image_size()[0])) { + af::c_grid<2>(panel.get_image_size()[1], panel.get_image_size()[0])) { vec3 s0 = beam.get_s0(); double wavenumber = 1.0 / beam.get_wavelength(); for (std::size_t j = 0; j < resolution_.accessor()[0]; ++j) { diff --git a/src/dials/util/masking.py b/src/dials/util/masking.py index 6c54cbf0e4..7ac516f87e 100644 --- a/src/dials/util/masking.py +++ b/src/dials/util/masking.py @@ -160,7 +160,6 @@ def generate_ice_ring_resolution_ranges(beam, panel, params): Generate a set of resolution ranges from the ice ring parameters """ if params.filter is True: - # Get the crystal symmetry crystal_symmetry = crystal.symmetry( unit_cell=params.unit_cell, space_group=params.space_group.group() @@ -226,7 +225,6 @@ def generate_mask( # Create the mask for each panel masks = [] for index, panel in enumerate(detector): - mask = flex.bool(flex.grid(reversed(panel.get_image_size())), True) # Add a border around the image diff --git a/src/dials/util/multi_dataset_handling.py b/src/dials/util/multi_dataset_handling.py index f14823701e..fd8aa3ab49 100644 --- a/src/dials/util/multi_dataset_handling.py +++ b/src/dials/util/multi_dataset_handling.py @@ -3,7 +3,6 @@ and experiment lists. """ - from __future__ import annotations import copy @@ -11,14 +10,12 @@ from orderedset import OrderedSet +import iotbx.phil from dxtbx.util import ersatz_uuid4 from dials.array_family import flex logger = logging.getLogger("dials") - -import iotbx.phil - phil_scope = iotbx.phil.parse( """ dataset_selection { diff --git a/src/dials/util/napari_rlv/viewer.py b/src/dials/util/napari_rlv/viewer.py index fe5c5dce8e..1e0ceb16fa 100644 --- a/src/dials/util/napari_rlv/viewer.py +++ b/src/dials/util/napari_rlv/viewer.py @@ -98,7 +98,6 @@ def __init__(self, parent, id, title, size, settings, napari_viewer, *args, **kw @magicgui(auto_call=True) def rlv_geometry(self, invert_rotation_axis: bool, crystal_frame: bool): - # Set values self.settings.reverse_phi = invert_rotation_axis self.settings.crystal_frame = crystal_frame @@ -394,7 +393,6 @@ def show_point_info(layer, event): def load_models(self, experiments, reflections): Render3d.load_models(self, experiments, reflections) if self.settings.beam_centre is not None: - pass if self.settings.marker_size is Auto: max_radius = max(self.reflections["rlp"].norms()) diff --git a/src/dials/util/nexus/nx_mx.py b/src/dials/util/nexus/nx_mx.py index c4b0ca1627..0637c92be5 100644 --- a/src/dials/util/nexus/nx_mx.py +++ b/src/dials/util/nexus/nx_mx.py @@ -248,9 +248,9 @@ def dump_detector(entry, detector, beam, imageset, scan): # Make up some fake stuff if scan is not None: - nx_detector[ - "timestamp" - ] = scan.get_epochs().as_numpy_array() # FIXME non-standard + nx_detector["timestamp"] = ( + scan.get_epochs().as_numpy_array() + ) # FIXME non-standard nx_detector["frame_time"] = scan.get_exposure_times().as_numpy_array() nx_detector["frame_time"].attrs["units"] = "s" # Optional so don't try @@ -278,7 +278,6 @@ def dump_detector(entry, detector, beam, imageset, scan): # Loop through all the panels for i, panel in enumerate(detector): - # Get some panel attributes pixel_size = panel.get_pixel_size() image_size = panel.get_image_size() @@ -733,7 +732,6 @@ def dump(entry, experiments, params): # Get the experiment for index, experiment in enumerate(experiments): - # Create the entry assert ("experiment_%d" % index) not in entry nxmx = entry.create_group("experiment_%d" % index) @@ -856,7 +854,6 @@ def load(entry, exp_index): index = [] rotations = [] for name in exp_index: - # Get the entry nxmx = entry.file[name] diff --git a/src/dials/util/pprint.py b/src/dials/util/pprint.py index c1751369eb..d8ff4034f2 100644 --- a/src/dials/util/pprint.py +++ b/src/dials/util/pprint.py @@ -52,7 +52,6 @@ def profile3d(p, vmin=None, vmax=None): if __name__ == "__main__": - from dials.array_family import flex a1 = flex.double( diff --git a/src/dials/util/pycbf_extra.py b/src/dials/util/pycbf_extra.py index 507e25146c..5f493c0d55 100644 --- a/src/dials/util/pycbf_extra.py +++ b/src/dials/util/pycbf_extra.py @@ -43,7 +43,6 @@ def get_image(cbf_handle, category="array_data", column="data", row=0, element=0 # Check the type of the element to ensure it's a binary # otherwise raise an exception if "bnry" in cbf_handle.get_typeofvalue(): - # Read the image data into an array image_string = cbf_handle.get_integerarray_as_string() image = np.fromstring(image_string, np.int32) diff --git a/src/dials/util/reindex.py b/src/dials/util/reindex.py index 8342cb7bb4..0b32fdbb56 100644 --- a/src/dials/util/reindex.py +++ b/src/dials/util/reindex.py @@ -18,7 +18,6 @@ def derive_change_of_basis_op(from_hkl, to_hkl): - # exclude those reflections that we couldn't index sel = (to_hkl != (0, 0, 0)) & (from_hkl != (0, 0, 0)) assert sel.count(True) >= 3 # need minimum of 3 equations ? diff --git a/src/dials/util/resolution_analysis.py b/src/dials/util/resolution_analysis.py index d24e39b193..7347609427 100644 --- a/src/dials/util/resolution_analysis.py +++ b/src/dials/util/resolution_analysis.py @@ -240,9 +240,11 @@ def _get_cc_half_critical_values(merging_stats, cc_half_method): ).reversed() elif merging_stats.overall.cc_one_half_critical_value is not None: critical = [ - b.cc_one_half_critical_value - if b.cc_one_half_critical_value is not None - else 0.0 + ( + b.cc_one_half_critical_value + if b.cc_one_half_critical_value is not None + else 0.0 + ) for b in merging_stats.bins ] return flex.double(critical).reversed() @@ -513,7 +515,6 @@ class Resolutionizer: """A class to calculate things from merging reflections.""" def __init__(self, i_obs, params, batches=None, reference=None): - self._params = params self._reference = reference @@ -633,9 +634,9 @@ def resolution(self, metric, limit=None): self._merging_statistics, limit, cc_half_method=self._params.cc_half_method, - model=tanh_fit - if self._params.cc_half_fit == "tanh" - else polynomial_fit, + model=( + tanh_fit if self._params.cc_half_fit == "tanh" else polynomial_fit + ), ) elif metric == metrics.CC_REF: return self._resolution_cc_ref(limit=self._params.cc_ref) diff --git a/src/dials/util/show_version.py b/src/dials/util/show_version.py index 5a2dc1830a..e6e5ae7676 100644 --- a/src/dials/util/show_version.py +++ b/src/dials/util/show_version.py @@ -6,7 +6,6 @@ Echo the current DIALS version and build number. """ - from __future__ import annotations import datetime diff --git a/src/dials/util/slice.py b/src/dials/util/slice.py index 4e9687ab0f..24698e2ccc 100644 --- a/src/dials/util/slice.py +++ b/src/dials/util/slice.py @@ -57,7 +57,6 @@ def slice_reflections(reflections, image_ranges): to_keep = flex.size_t() for iexp, sr in enumerate(image_ranges): - if sr is None: continue isel = (reflections["id"] == iexp).iselection() diff --git a/src/dials/viewer/bitmap_from_array.py b/src/dials/viewer/bitmap_from_array.py index b3a3b81037..744597a329 100644 --- a/src/dials/viewer/bitmap_from_array.py +++ b/src/dials/viewer/bitmap_from_array.py @@ -52,7 +52,6 @@ def __init__( def bmp_lst_scaled(self, scale=1.0): if self._ini_wx_bmp_lst is None: - NewW = 350 wx_image = wx.Image(NewW, NewW) @@ -82,7 +81,6 @@ def bmp_lst_scaled(self, scale=1.0): return wx_bmp_lst def _wx_img_w_cpp(self, np_2d_tmp, show_nums, palette, np_2d_mask=None): - xmax = np_2d_tmp.shape[1] ymax = np_2d_tmp.shape[0] diff --git a/src/dials/viewer/flex_3d_array_viewer_tests.py b/src/dials/viewer/flex_3d_array_viewer_tests.py index fa668812ad..7d21e13c25 100644 --- a/src/dials/viewer/flex_3d_array_viewer_tests.py +++ b/src/dials/viewer/flex_3d_array_viewer_tests.py @@ -11,7 +11,6 @@ lst_flex_norm = [] for size_xyz in range(8, 6, -1): - size_x = size_xyz * 2 data_xyz_flex = flex.double(flex.grid(size_xyz, size_xyz, size_x), 15) diff --git a/src/dials/viewer/from_flex_to_wxbitmap.py b/src/dials/viewer/from_flex_to_wxbitmap.py index 7295b304f7..d1228ac165 100644 --- a/src/dials/viewer/from_flex_to_wxbitmap.py +++ b/src/dials/viewer/from_flex_to_wxbitmap.py @@ -13,9 +13,7 @@ class wxbitmap_convert: """ def __init__(self, data_in_n1, data_in_n2=None): - if data_in_n1 is None and data_in_n2 is None: - self.lst_3d_data = None self.lst_3d_mask = None @@ -70,7 +68,6 @@ def __init__(self, data_in_n1, data_in_n2=None): or img2_np.shape[1] == 0 or img2_np.shape[2] == 0 ): - self.lst_3d_data = None self.lst_3d_mask = None diff --git a/src/dials/viewer/mask_bmp_2D.h b/src/dials/viewer/mask_bmp_2D.h index 198851be70..afbbab5f08 100644 --- a/src/dials/viewer/mask_bmp_2D.h +++ b/src/dials/viewer/mask_bmp_2D.h @@ -12,7 +12,7 @@ #ifndef DIALS_MASK_LOW_LEVEL_H #define DIALS_MASK_LOW_LEVEL_H #define PX_SCALE 85 -//#define DST_BTW_LIN 14 +// #define DST_BTW_LIN 14 #define DST_BTW_LIN 43 #include #include diff --git a/src/dials/viewer/slice_viewer.py b/src/dials/viewer/slice_viewer.py index 769cfecac3..d7e30abf2f 100644 --- a/src/dials/viewer/slice_viewer.py +++ b/src/dials/viewer/slice_viewer.py @@ -89,7 +89,6 @@ def in_lst(self, flex_lst_one, flex_lst_two=None): class show_reflections: def __init__(self, table, two_windows=False): - # two_windows = True print("two_windows =", two_windows) @@ -103,7 +102,6 @@ def __init__(self, table, two_windows=False): class show_tabl_2fr_wx_app(wx.App): def OnInit(self): - self.ImgFrame = flex_3d_frame(None, "DIALS reflections viewer IMG") self.flex_panel = flex_arr_img_panel(self.ImgFrame) self.ImgFrame.frame_ini_img(self.flex_panel) @@ -141,7 +139,6 @@ def OnInit(self): return True def in_tabl(self, table, two_windows): - # if not two_windows: self.upper_panel.ini_n_intro(table) self.data_grid.ini_n_intro(table) diff --git a/src/dials/viewer/viewer_low_level_util.py b/src/dials/viewer/viewer_low_level_util.py index 3bddda2d47..99e0dc54aa 100644 --- a/src/dials/viewer/viewer_low_level_util.py +++ b/src/dials/viewer/viewer_low_level_util.py @@ -105,7 +105,6 @@ def SetValue(self, row, col, value): class MyGrid(gridlib.Grid): def __init__(self, parent_frame): - self.parent_fr = parent_frame super().__init__(parent_frame) @@ -395,7 +394,6 @@ def set_scroll_content(self): self.SetScrollRate(1, 1) def OnMouseMotion(self, event): - self.Mouse_Pos_x, self.Mouse_Pos_y = event.GetPosition() def OnMouseWheel(self, event): @@ -413,13 +411,11 @@ def OnMouseWheel(self, event): self.y_uni = float(View_start_y + self.Mouse_Pos_y) / float(v_size_y) def img_refresh(self, i_bmp_new): - self.lst_2d_bmp = i_bmp_new self.set_scroll_content() def OnIdle(self, event): if self.scroll_rot != 0: - self.SetScrollRate(1, 1) self.parent_panel.to_re_zoom(self.scroll_rot) @@ -515,7 +511,6 @@ def OnButtUpdate(self): self.parent_panel.to_change_palette("hot descend") def OnItsCheckbox(self, event): - if event.IsChecked(): self.parent_panel.to_show_nums() else: diff --git a/tests/algorithms/image/connected_components/test_connected_components.py b/tests/algorithms/image/connected_components/test_connected_components.py index 5c52061731..d0decedae5 100644 --- a/tests/algorithms/image/connected_components/test_connected_components.py +++ b/tests/algorithms/image/connected_components/test_connected_components.py @@ -76,7 +76,6 @@ def test_labels_are_valid(self): for j in range(self.size[0]): for i in range(self.size[1]): if self.mask_list[k][j, i]: - l1 = self.labels[vi] if k > 0 and self.mask_list[k - 1][j, i]: l2 = label_map[k - 1, j, i] @@ -162,7 +161,6 @@ def test_labels_are_valid(self): for j in range(self.size[0]): for i in range(self.size[1]): if self.mask_list[k][j, i]: - l1 = self.labels[vi] if k > 0 and self.mask_list[k - 1][j, i]: l2 = label_map[k - 1, j, i] diff --git a/tests/algorithms/image/threshold/test_local.py b/tests/algorithms/image/threshold/test_local.py index 53a45e8b71..0544e5e7f6 100644 --- a/tests/algorithms/image/threshold/test_local.py +++ b/tests/algorithms/image/threshold/test_local.py @@ -231,7 +231,6 @@ def test_dispersion_extended_threshold(self): "algorithm", [DispersionThreshold, DispersionExtendedThreshold] ) def test_dispersion_algorithm_symmetry(self, algorithm): - nsig_b = 3 nsig_s = 3 @@ -266,7 +265,6 @@ def transpose_a_flex_bool(arr): "algorithm", [DispersionThresholdDebug, DispersionExtendedThresholdDebug] ) def test_dispersion_debug_algorithm_symmetry(self, algorithm): - nsig_b = 3 nsig_s = 3 diff --git a/tests/algorithms/indexing/basis_vector_search/test_combinations.py b/tests/algorithms/indexing/basis_vector_search/test_combinations.py index 9bf0fda3c5..6aafce33d1 100644 --- a/tests/algorithms/indexing/basis_vector_search/test_combinations.py +++ b/tests/algorithms/indexing/basis_vector_search/test_combinations.py @@ -36,7 +36,6 @@ def test_combinations(setup_rlp): target_symmetry_sg_only, target_symmetry_ref, ): - crystal_models = combinations.candidate_orientation_matrices( basis_vectors, max_combinations=50 ) diff --git a/tests/algorithms/indexing/test_assign_indices.py b/tests/algorithms/indexing/test_assign_indices.py index 84a8436f4b..db15116b7f 100644 --- a/tests/algorithms/indexing/test_assign_indices.py +++ b/tests/algorithms/indexing/test_assign_indices.py @@ -176,7 +176,6 @@ def test_assign_indices(space_group_symbol, experiment, crystal_factory): class CompareGlobalLocal: def __init__(self, experiment, reflections, expected_miller_indices): - index_reflections_global = AssignIndicesGlobal() index_reflections_local = AssignIndicesLocal() diff --git a/tests/algorithms/indexing/test_non_primitive_basis.py b/tests/algorithms/indexing/test_non_primitive_basis.py index 6e124cebe3..c8276bb950 100644 --- a/tests/algorithms/indexing/test_non_primitive_basis.py +++ b/tests/algorithms/indexing/test_non_primitive_basis.py @@ -27,7 +27,6 @@ def test_detect(space_group_symbol): @pytest.mark.parametrize("space_group_symbol", bravais_types.acentric) def test_correct(space_group_symbol): - sgi = sgtbx.space_group_info(space_group_symbol) cs = sgi.any_compatible_crystal_symmetry(volume=1000) ms = cs.build_miller_set(anomalous_flag=True, d_min=1).expand_to_p1() diff --git a/tests/algorithms/indexing/test_symmetry.py b/tests/algorithms/indexing/test_symmetry.py index d6ae6c2a4f..d39e141a5d 100644 --- a/tests/algorithms/indexing/test_symmetry.py +++ b/tests/algorithms/indexing/test_symmetry.py @@ -12,7 +12,6 @@ @pytest.mark.parametrize("space_group_symbol", bravais_types.acentric) def test_SymmetryHandler(space_group_symbol): - sgi = sgtbx.space_group_info(symbol=space_group_symbol) sg = sgi.group() cs = sgi.any_compatible_crystal_symmetry(volume=10000) @@ -180,7 +179,6 @@ def test_symmetry_handler_c2_i2(crystal_symmetry): @pytest.mark.parametrize("crystal_symmetry", crystal_symmetries) def test_find_matching_symmetry(crystal_symmetry): - cs = crystal_symmetry cs.show_summary() @@ -193,7 +191,6 @@ def test_find_matching_symmetry(crystal_symmetry): (cs.unit_cell(), cs.space_group()), (None, cs.space_group()), ][:]: - best_subgroup = symmetry.find_matching_symmetry( uc_inp, target_space_group=ref_sg ) diff --git a/tests/algorithms/integration/profile/test_profile_fitting.py b/tests/algorithms/integration/profile/test_profile_fitting.py index 87688fd82f..5855df4d7e 100644 --- a/tests/algorithms/integration/profile/test_profile_fitting.py +++ b/tests/algorithms/integration/profile/test_profile_fitting.py @@ -332,7 +332,6 @@ def test_deconvolve_3_with_no_background(): Ical = [[], [], []] for it in range(1): - # Copy profile c = flex.double(flex.grid(40, 9, 9)) for i in range(p.all()[0]): @@ -365,7 +364,6 @@ def test_deconvolve_3_with_no_background(): def test_deconvolve_3_with_flat_background(): - np.random.seed(0) I0 = [1000, 2000, 3000] @@ -375,7 +373,6 @@ def test_deconvolve_3_with_flat_background(): Ical = [[], [], []] for it in range(1): - # Copy profile c = flex.double(flex.grid(40, 9, 9)) for i in range(p.all()[0]): @@ -419,7 +416,6 @@ def test_deconvolve_7_with_no_background(): Ical = [[], [], [], [], [], [], []] for it in range(1): - # Copy profile c = flex.double(flex.grid(40, 40, 40)) for i in range(p.all()[0]): @@ -469,7 +465,6 @@ def test_deconvolve_7_with_flat_background(): Ical = [[], [], [], [], [], [], []] for it in range(1): - # Copy profile c = flex.double(flex.grid(40, 40, 40)) for i in range(p.all()[0]): diff --git a/tests/algorithms/integration/test_filter_overlaps.py b/tests/algorithms/integration/test_filter_overlaps.py index af90a34b4d..b4cd6a0c25 100644 --- a/tests/algorithms/integration/test_filter_overlaps.py +++ b/tests/algorithms/integration/test_filter_overlaps.py @@ -81,9 +81,10 @@ def is_overlap(code): except IndexError: continue for i, this_code in enumerate(mask_array): - assert not is_overlap( - this_code - ), "Overlapping foreground and background found at (%d, %d)" % ( - i % shoebox.xsize(), - i // shoebox.xsize(), + assert not is_overlap(this_code), ( + "Overlapping foreground and background found at (%d, %d)" + % ( + i % shoebox.xsize(), + i // shoebox.xsize(), + ) ) diff --git a/tests/algorithms/integration/test_parallel_integrator.py b/tests/algorithms/integration/test_parallel_integrator.py index e065860bcc..b68fc4a6d7 100644 --- a/tests/algorithms/integration/test_parallel_integrator.py +++ b/tests/algorithms/integration/test_parallel_integrator.py @@ -96,7 +96,6 @@ def create(data, detector_space=False, deconvolution=False): data_spec = GaussianRSMultiCrystalReferenceProfileData() for e in experiments: - sampler = CircleSampler( e.detector[0].get_image_size(), e.scan.get_array_range(), @@ -176,7 +175,6 @@ def test_gaussianrs_detector_space_intensity_calculator(data): def test_gaussianrs_detector_space_with_deconvolution_intensity_calculator(data): - algorithm = IntensityCalculatorFactory.create( data, detector_space=True, deconvolution=True ) @@ -312,7 +310,6 @@ def test_gaussianrs_profile_data_pickling(data): data_spec = GaussianRSMultiCrystalReferenceProfileData() for e in experiments: - sampler = CircleSampler( e.detector[0].get_image_size(), e.scan.get_array_range(), num_scan_points ) diff --git a/tests/algorithms/polygon/clip/test_clipping.py b/tests/algorithms/polygon/clip/test_clipping.py index 8d1eae2a64..314db30a22 100644 --- a/tests/algorithms/polygon/clip/test_clipping.py +++ b/tests/algorithms/polygon/clip/test_clipping.py @@ -62,7 +62,6 @@ def generate_polygon(nvert, box): def test_SimpleWithConvex_intersecting(): for i in range(10000): - # Generate intersecting polygons subject, target = generate_intersecting() @@ -78,7 +77,6 @@ def test_SimpleWithConvex_intersecting(): def test_SimpleWithConvex_non_intersecting(): for i in range(10000): - # Generate nonintersecting polygons subject, target = generate_non_intersecting() @@ -93,7 +91,6 @@ def test_SimpleWithConvex_non_intersecting(): def test_SimpleWithRect_intersecting(): for i in range(10000): - # Generate intersecting polygons subject, target = generate_intersecting(target_size=2) rect = ((0, 0), (10, 10)) @@ -108,7 +105,6 @@ def test_SimpleWithRect_intersecting(): def test_TriangleWithTriangle_intersecting(): for i in range(10000): - # Generate intersecting polygons subject, target = generate_intersecting(3, 3) @@ -122,7 +118,6 @@ def test_TriangleWithTriangle_intersecting(): def test_TriangleWithTriangle_non_intersecting(): for i in range(10000): - # Generate nonintersecting polygons subject, target = generate_non_intersecting(3, 3) @@ -135,7 +130,6 @@ def test_TriangleWithTriangle_non_intersecting(): def test_TriangleWithConvexQuad_intersecting(): for i in range(10000): - # Generate intersecting polygons subject, target = generate_intersecting(3, 4) @@ -149,7 +143,6 @@ def test_TriangleWithConvexQuad_intersecting(): def test_TriangleWithConvexQuad_non_intersecting(): for i in range(10000): - # Generate nonintersecting polygons subject, target = generate_non_intersecting(3, 4) @@ -162,7 +155,6 @@ def test_TriangleWithConvexQuad_non_intersecting(): def test_QuadWithTriangle_intersecting(): for i in range(10000): - # Generate intersecting polygons subject, target = generate_intersecting(4, 3) @@ -176,7 +168,6 @@ def test_QuadWithTriangle_intersecting(): def test_QuadWithTriangle_non_intersecting(): for i in range(10000): - # Generate nonintersecting polygons subject, target = generate_non_intersecting(4, 3) @@ -189,7 +180,6 @@ def test_QuadWithTriangle_non_intersecting(): def test_QuadWithConvexQuad_intersecting(): for i in range(10000): - # Generate intersecting polygons subject, target = generate_intersecting(4, 4) @@ -203,7 +193,6 @@ def test_QuadWithConvexQuad_intersecting(): def test_QuadWithConvexQuad_non_intersecting(): for i in range(10000): - # Generate nonintersecting polygons subject, target = generate_non_intersecting(4, 4) diff --git a/tests/algorithms/profile_model/ellipsoid/test_derivatives.py b/tests/algorithms/profile_model/ellipsoid/test_derivatives.py index 47e51bc42a..9e721a2f3d 100644 --- a/tests/algorithms/profile_model/ellipsoid/test_derivatives.py +++ b/tests/algorithms/profile_model/ellipsoid/test_derivatives.py @@ -33,7 +33,6 @@ def second_derivative(func, x, y=None, h=None): def generate_data(): - from random import seed seed(0) @@ -78,7 +77,6 @@ def compute_s2(b1, b2, b3): @pytest.mark.parametrize("sigma,s0,b1,b2,b3", generate_testdata()) def test_derivative_of_epsilon(sigma, s0, b1, b2, b3): - ds2 = ds2_db(b1, b2, b3) def compute_epsilon(b1, b2, b3): @@ -117,7 +115,6 @@ def compute_dep(s2, ds2): @pytest.mark.parametrize("sigma,s0,b1,b2,b3", generate_testdata()) def test_derivative_of_mubar(sigma, s0, b1, b2, b3): - ds2 = ds2_db(b1, b2, b3) def compute_mubar(b1, b2, b3): @@ -164,7 +161,6 @@ def compute_dmubar(s2, ds2): @pytest.mark.parametrize("sigma,s0,b1,b2,b3", generate_testdata()) def test_derivative_of_e1(sigma, s0, b1, b2, b3): - ds2 = ds2_db(b1, b2, b3) def compute_e1(b1, b2, b3): @@ -207,7 +203,6 @@ def compute_de1(s2, ds2): @pytest.mark.parametrize("sigma,s0,b1,b2,b3", generate_testdata()) def test_derivative_of_e2(sigma, s0, b1, b2, b3): - ds2 = ds2_db(b1, b2, b3) def compute_e2(b1, b2, b3): @@ -252,7 +247,6 @@ def compute_de2(s2, ds2): @pytest.mark.parametrize("sigma,s0,b1,b2,b3", generate_testdata()) def test_derivative_of_e3(sigma, s0, b1, b2, b3): - ds2 = ds2_db(b1, b2, b3) def compute_e3(b1, b2, b3): @@ -295,7 +289,6 @@ def compute_de3(s2, ds2): @pytest.mark.parametrize("sigma,s0,b1,b2,b3", generate_testdata()) def test_derivative_of_s1(sigma, s0, b1, b2, b3): - ds2 = ds2_db(b1, b2, b3) def compute_s1(b1, b2, b3): @@ -397,7 +390,6 @@ def compute_ds1(s2, ds2): @pytest.mark.parametrize("sigma,s0,b1,b2,b3", generate_testdata()) def test_derivative_of_f(sigma, s0, b1, b2, b3): - ds2 = ds2_db(b1, b2, b3) def compute_f(b1, b2, b3): diff --git a/tests/algorithms/profile_model/ellipsoid/test_model.py b/tests/algorithms/profile_model/ellipsoid/test_model.py index 8857756b6f..c8a8ed5f14 100644 --- a/tests/algorithms/profile_model/ellipsoid/test_model.py +++ b/tests/algorithms/profile_model/ellipsoid/test_model.py @@ -27,7 +27,6 @@ @pytest.fixture def simple1_profile_model(): - params = flex.double([4e-4]) model = Simple1ProfileModel(params) @@ -43,7 +42,6 @@ def simple6_profile_model(): @pytest.fixture def simple1_model_state(test_experiment): - state = ModelState(test_experiment, Simple1MosaicityParameterisation()) return state @@ -51,7 +49,6 @@ def simple1_model_state(test_experiment): @pytest.fixture def simple6_model_state(test_experiment): - state = ModelState(test_experiment, Simple6MosaicityParameterisation()) return state @@ -72,7 +69,6 @@ def check_simple1_sigma(sigma, params): def check_simple6_sigma(sigma, params): - b1, b2, b3, b4, b5, b6 = params L = np.array([[b1, 0, 0], [b2, b3, 0], [b4, b5, b6]]) M = np.matmul(L, L.T) @@ -98,7 +94,6 @@ def test_Simple1ProfileModel_predict_reflections( simple1_profile_model, test_experiment, ): - # Create the index generator index_generator = IndexGenerator( test_experiment.crystal.get_unit_cell(), @@ -124,7 +119,6 @@ def test_Simple1ProfileModel_predict_reflections( def test_Simple1ProfileModel_compute_bbox(simple1_profile_model, test_experiment): - experiments = [test_experiment] # Create the index generator @@ -297,7 +291,6 @@ def test_Simple6ProfileModel_parameterisation(simple6_profile_model): def test_compute_change_of_basis_operation(): - r = np.array((0, 0.5, -(1 - sqrt(0.75)))) s0 = np.array((0, 0, 1)) s2 = s0 + r diff --git a/tests/algorithms/profile_model/ellipsoid/test_parameterisation.py b/tests/algorithms/profile_model/ellipsoid/test_parameterisation.py index bb568d09e3..c25df0d66d 100644 --- a/tests/algorithms/profile_model/ellipsoid/test_parameterisation.py +++ b/tests/algorithms/profile_model/ellipsoid/test_parameterisation.py @@ -30,7 +30,6 @@ def first_derivative(func, x, h): def test_Simple1MosaicityParameterisation(): - p = Simple1MosaicityParameterisation(params=np.array([1e-3])) assert p.is_angular() is False @@ -47,7 +46,6 @@ def test_Simple1MosaicityParameterisation(): def test_Simple6MosaicityParameterisation(): - params = np.array([1e-3, 2e-3, 3e-3, 4e-3, 5e-3, 6e-3]) S6 = Simple6MosaicityParameterisation(params=params) @@ -109,7 +107,6 @@ def f(x): def test_WavelengthSpreadParameterisation(): - params = np.array([1e-3]) p = WavelengthSpreadParameterisation(params=params) @@ -131,7 +128,6 @@ def check_model_state_with_fixed( fix_unit_cell=False, fix_orientation=False, ): - state = ModelState( experiment, mosaicity_parameterisation, @@ -201,7 +197,6 @@ def check_model_state_with_fixed( def test_ModelState(test_experiment): - experiments = [test_experiment] S1 = Simple1MosaicityParameterisation(np.array([0.1])) @@ -244,7 +239,6 @@ def check_reflection_model_state_with_fixed( fix_unit_cell=False, fix_orientation=False, ): - state = ModelState( experiment, mosaicity_parameterisation, @@ -381,7 +375,6 @@ def f(x): def test_ReflectionModelState(test_experiment): - experiments = [test_experiment] S1 = Simple1MosaicityParameterisation(np.array([0.01])) @@ -414,7 +407,6 @@ def test_ReflectionModelState(test_experiment): def generate_data(experiments, reflections): - from random import seed seed(0) @@ -459,7 +451,6 @@ def generate_data(experiments, reflections): @pytest.fixture def testdata(test_experiment): - TestData = namedtuple( "TestData", ["experiment", "models", "s0", "h", "ctot", "mobs", "Sobs"] ) diff --git a/tests/algorithms/profile_model/ellipsoid/test_refiner.py b/tests/algorithms/profile_model/ellipsoid/test_refiner.py index 941d0fb3c9..580764ff92 100644 --- a/tests/algorithms/profile_model/ellipsoid/test_refiner.py +++ b/tests/algorithms/profile_model/ellipsoid/test_refiner.py @@ -46,7 +46,6 @@ def first_derivative(func, x, h): def generate_data(experiments, reflections): - from random import seed seed(0) @@ -98,7 +97,6 @@ def generate_data(experiments, reflections): @pytest.fixture def testdata(test_experiment): - TestData = namedtuple( "TestData", [ @@ -134,7 +132,6 @@ def testdata(test_experiment): @pytest.fixture def refinerdata_testdata(testdata): - experiment = testdata.experiment reflections = testdata.reflections @@ -169,9 +166,7 @@ def refinerdata_testdata(testdata): for j in range(11): for i in range(11): shoebox_data[0, j, i] = ( - 100 - * exp(-0.5 * (j - 5) ** 2 / 1**2) - * exp(-0.5 * (i - 5) ** 2 / 1**2) + 100 * exp(-0.5 * (j - 5) ** 2 / 1**2) * exp(-0.5 * (i - 5) ** 2 / 1**2) ) shoebox_mask[0, j, i] = 5 for sbox in reflections["shoebox"]: @@ -330,7 +325,6 @@ def f(x): def test_rotate_vec3_double(): - vectors = np.array([[1, 1, 1]], dtype=np.float64).reshape(3, 1) / norm( np.array([1, 1, 1], dtype=np.float64) ) @@ -344,7 +338,6 @@ def test_rotate_vec3_double(): def test_rotate_mat3_double(): - A = np.eye(3, dtype=np.float64) v1 = np.array([0, 0, 1.0], dtype=np.float64).reshape(3, 1) v2 = np.array([1, 1, 1.0], dtype=np.float64).reshape(3, 1) @@ -471,7 +464,6 @@ def f(x): def test_Refiner(testdata, refinerdata_testdata): - experiment = testdata.experiment data = refinerdata_testdata @@ -486,7 +478,6 @@ def check( fix_unit_cell=True, fix_wavelength_spread=True, ): - state = ModelState( experiment, parameterisation, @@ -515,7 +506,6 @@ def check( def test_RefinerData(testdata): - experiment = testdata.experiment reflections = testdata.reflections @@ -550,9 +540,7 @@ def test_RefinerData(testdata): for j in range(11): for i in range(11): shoebox_data[0, j, i] = ( - 100 - * exp(-0.5 * (j - 5) ** 2 / 1**2) - * exp(-0.5 * (i - 5) ** 2 / 1**2) + 100 * exp(-0.5 * (j - 5) ** 2 / 1**2) * exp(-0.5 * (i - 5) ** 2 / 1**2) ) shoebox_mask[0, j, i] = 5 for sbox in reflections["shoebox"]: diff --git a/tests/algorithms/refinement/setup_geometry.py b/tests/algorithms/refinement/setup_geometry.py index 1399bdc10d..3d9845978e 100644 --- a/tests/algorithms/refinement/setup_geometry.py +++ b/tests/algorithms/refinement/setup_geometry.py @@ -1,6 +1,5 @@ """Setup experimental geometry for refinement test cases""" - from __future__ import annotations import random @@ -25,7 +24,6 @@ class Extract: def __init__( self, master_phil, local_overrides="", cmdline_args=None, verbose=False ): - self._verbose = verbose arg_interpreter = command_line.argument_interpreter(master_phil=master_phil) @@ -56,7 +54,6 @@ def __init__( self.phil = master_phil.format(python_object=temp) def set_seed(self): - if self._params.random_seed is not None: random.seed(self._params.random_seed) # set the flex random seed too @@ -68,11 +65,9 @@ def set_seed(self): print(msg % self._params.random_seed) def build_goniometer(self): - self.goniometer = GoniometerFactory.known_axis(self._params.goniometer.axis) def build_beam(self): - if self._params.beam.wavelength.random: wavelength = random.uniform(*self._params.beam.wavelength.range) else: @@ -85,7 +80,6 @@ def build_beam(self): ] if self._params.beam.direction.method == "inclination": - if self._params.beam.direction.inclination.random: inclination = random.gauss( 0.0, self._params.beam.direction.inclination.angle @@ -98,24 +92,20 @@ def build_beam(self): ) elif self._params.beam.direction.method == "close_to": - temp = self._params.beam.direction.close_to.direction beam_dir = random_vector_close_to( temp, sd=self._params.beam.direction.close_to.sd ) elif self._params.beam.direction.method == "exactly": - beam_dir = matrix.col(self._params.beam.direction.exactly) self.beam = BeamFactory.make_beam(unit_s0=beam_dir, wavelength=wavelength) def build_detector(self): - assert self._params.detector.directions.method in ["close_to", "exactly"] if self._params.detector.directions.method == "close_to": - temp = self._params.detector.directions.close_to.dir1 dir1 = random_vector_close_to( temp, sd=self._params.detector.directions.close_to.sd @@ -127,7 +117,6 @@ def build_detector(self): ) elif self._params.detector.directions.method == "exactly": - temp = self._params.detector.directions.exactly.dir1 dir1 = matrix.col(temp) @@ -138,14 +127,12 @@ def build_detector(self): assert self._params.detector.centre.method in ["close_to", "exactly"] if self._params.detector.centre.method == "close_to": - centre = random_vector_close_to( self._params.detector.centre.close_to.value, sd=self._params.detector.centre.close_to.sd, ) elif self._params.detector.centre.method == "exactly": - temp = self._params.detector.centre.exactly.value centre = matrix.col(temp) @@ -168,7 +155,6 @@ def build_detector(self): @staticmethod def _build_cell_vec(vec): - if vec.length.random: length = random.uniform(*vec.length.range) else: @@ -177,19 +163,16 @@ def _build_cell_vec(vec): assert vec.direction.method in ["close_to", "exactly"] if vec.direction.method == "close_to": - x = random_vector_close_to( vec.direction.close_to.direction, sd=vec.direction.close_to.sd ) elif vec.direction.method == "exactly": - x = matrix.col(vec.direction.exactly.direction) return length * x def build_crystal(self): - vecs = [ self._build_cell_vec(axis) for axis in ( diff --git a/tests/algorithms/refinement/setup_minimiser.py b/tests/algorithms/refinement/setup_minimiser.py index e2b2fb2fab..e8d91b5b4b 100644 --- a/tests/algorithms/refinement/setup_minimiser.py +++ b/tests/algorithms/refinement/setup_minimiser.py @@ -1,6 +1,5 @@ """Setup experimental geometry for refinement test cases""" - from __future__ import annotations from libtbx.phil import command_line, parse @@ -24,7 +23,6 @@ def __init__( cmdline_args=None, verbose=True, ): - self._target = target self._prediction_parameterisation = prediction_parameterisation self._verbose = verbose @@ -44,7 +42,6 @@ def __init__( self.refiner = self.build_minimiser() def build_minimiser(self): - assert self._params.engine in ["SimpleLBFGS", "LBFGScurvs", "GaussNewton"] if self._params.engine == "SimpleLBFGS": @@ -64,7 +61,6 @@ def build_minimiser(self): return refiner if self._params.engine == "GaussNewton": - refiner = GaussNewtonIterations( target=self._target, prediction_parameterisation=self._prediction_parameterisation, diff --git a/tests/algorithms/refinement/sim_images.py b/tests/algorithms/refinement/sim_images.py index c958854782..9328b4e57b 100644 --- a/tests/algorithms/refinement/sim_images.py +++ b/tests/algorithms/refinement/sim_images.py @@ -1,7 +1,6 @@ """Simulate a rotation dataset with a smoothly-varying beam position for refinement testing. Script based on tst_nanoBragg_basic.py""" - from __future__ import annotations import math @@ -31,7 +30,6 @@ class Simulation: def __init__(self, override_fdp=None): - # Set up detector distance = 100 pixel_size = 0.1 diff --git a/tests/algorithms/refinement/test_angle_derivatives_wrt_vector_elts.py b/tests/algorithms/refinement/test_angle_derivatives_wrt_vector_elts.py index 61ebf138db..d2833545bf 100644 --- a/tests/algorithms/refinement/test_angle_derivatives_wrt_vector_elts.py +++ b/tests/algorithms/refinement/test_angle_derivatives_wrt_vector_elts.py @@ -1,7 +1,6 @@ """Test analytical expression for the partial derivatives of an angle between two vectors with respect to each element of the vectors""" - from __future__ import annotations import math diff --git a/tests/algorithms/refinement/test_beam_parameters.py b/tests/algorithms/refinement/test_beam_parameters.py index 083c916927..abe16e19ea 100644 --- a/tests/algorithms/refinement/test_beam_parameters.py +++ b/tests/algorithms/refinement/test_beam_parameters.py @@ -34,7 +34,6 @@ def test_beam_parameters(): # random initial orientations and wavelengths with a random parameter shifts attempts = 1000 for i in range(attempts): - # make a random beam vector and parameterise it sample_to_source = matrix.col.random(3, 0.5, 1.5).normalize() beam = bf.make_beam(sample_to_source, wavelength=random.uniform(0.8, 1.5)) diff --git a/tests/algorithms/refinement/test_centroid_outlier.py b/tests/algorithms/refinement/test_centroid_outlier.py index c4343f442f..bb1149b496 100644 --- a/tests/algorithms/refinement/test_centroid_outlier.py +++ b/tests/algorithms/refinement/test_centroid_outlier.py @@ -19,7 +19,6 @@ ], ) def test_centroid_outlier(dials_data, method, colnames, expected_nout): - flex.set_random_seed(42) data_dir = dials_data("refinement_test_data", pathlib=True) residuals = flex.reflection_table.from_file( diff --git a/tests/algorithms/refinement/test_crystal_parameters.py b/tests/algorithms/refinement/test_crystal_parameters.py index 2c33999d8a..fe570d759f 100644 --- a/tests/algorithms/refinement/test_crystal_parameters.py +++ b/tests/algorithms/refinement/test_crystal_parameters.py @@ -55,7 +55,6 @@ def random_direction_close_to(vector): # random initial orientations with a random parameter shift at each attempts = 100 for i in range(attempts): - # make a random P1 crystal and parameterise it a = random.uniform(10, 50) * random_direction_close_to(matrix.col((1, 0, 0))) b = random.uniform(10, 50) * random_direction_close_to(matrix.col((0, 1, 0))) diff --git a/tests/algorithms/refinement/test_detector_parameters.py b/tests/algorithms/refinement/test_detector_parameters.py index 4dd3f3f88d..e9f762f29f 100644 --- a/tests/algorithms/refinement/test_detector_parameters.py +++ b/tests/algorithms/refinement/test_detector_parameters.py @@ -74,7 +74,6 @@ def make_multi_panel(single_panel_detector): # apply small random shifts & rotations to each panel for p in multi_panel_detector: - # perturb origin vector o_multiplier = random.gauss(1.0, 0.01) new_origin = random_vector_close_to(p.get_origin(), sd=0.1) @@ -217,7 +216,6 @@ def test(): # random initial orientations with a random parameter shift at each attempts = 100 for i in range(attempts): - # create random initial position det = Detector(random_panel()) dp = DetectorParameterisationSinglePanel(det) @@ -308,7 +306,6 @@ def test(): attempts = 5 for i in range(attempts): - multi_panel_detector = make_multi_panel(det) # parameterise this detector @@ -327,7 +324,6 @@ def test(): # analytical gradients for j in range(9): - an_ds_dp = dp.get_ds_dp(multi_state_elt=j) fd_ds_dp = get_fd_gradients(dp, [1.0e-7] * dp.num_free(), multi_state_elt=j) diff --git a/tests/algorithms/refinement/test_finite_diffs.py b/tests/algorithms/refinement/test_finite_diffs.py index f47ff26441..6199372f3e 100644 --- a/tests/algorithms/refinement/test_finite_diffs.py +++ b/tests/algorithms/refinement/test_finite_diffs.py @@ -67,6 +67,7 @@ """Test analytical calculation of gradients of the target function versus finite difference calculations""" + # function for calculating finite difference gradients of the target function def get_fd_gradients(target, pred_param, deltas): """Calculate centered finite difference gradients for each of the @@ -114,7 +115,6 @@ def get_fd_gradients(target, pred_param, deltas): def test(args=[]): - # Local functions def random_direction_close_to(vector, sd=0.5): return vector.rotate_around_origin( diff --git a/tests/algorithms/refinement/test_multi_experiment_refinement.py b/tests/algorithms/refinement/test_multi_experiment_refinement.py index c4823948ca..d118f6be78 100644 --- a/tests/algorithms/refinement/test_multi_experiment_refinement.py +++ b/tests/algorithms/refinement/test_multi_experiment_refinement.py @@ -37,7 +37,6 @@ def test(args=[]): - ############################# # Setup experimental models # ############################# diff --git a/tests/algorithms/refinement/test_multi_panel_detector_parameterisation.py b/tests/algorithms/refinement/test_multi_panel_detector_parameterisation.py index 948d9a5165..c4009b8852 100644 --- a/tests/algorithms/refinement/test_multi_panel_detector_parameterisation.py +++ b/tests/algorithms/refinement/test_multi_panel_detector_parameterisation.py @@ -5,7 +5,6 @@ ensuring the results are the same. """ - from __future__ import annotations from collections import namedtuple @@ -89,7 +88,6 @@ def make_panel_in_array(array_elt, reference_panel): @pytest.fixture(scope="session") def init_test(): - models = setup_geometry.Extract(master_phil) single_panel_detector = models.detector @@ -216,7 +214,6 @@ def init_test(): def test(init_test): - single_panel_detector = init_test.experiments_single_panel.detectors()[0] multi_panel_detector = init_test.experiments_multi_panel.detectors()[0] beam = init_test.experiments_single_panel.beams()[0] @@ -362,7 +359,6 @@ def test(init_test): def test_equivalence_of_python_and_cpp_multipanel_algorithms(init_test): - multi_panel_detector = init_test.experiments_multi_panel.detectors()[0] beam = init_test.experiments_single_panel.beams()[0] diff --git a/tests/algorithms/refinement/test_orientation_refinement.py b/tests/algorithms/refinement/test_orientation_refinement.py index 361c553205..022e7aca21 100644 --- a/tests/algorithms/refinement/test_orientation_refinement.py +++ b/tests/algorithms/refinement/test_orientation_refinement.py @@ -9,7 +9,6 @@ "random_seed=3; engine=LBFGScurvs" """ - from __future__ import annotations import sys @@ -83,7 +82,6 @@ def test(args=[]): - ############################# # Setup experimental models # ############################# diff --git a/tests/algorithms/refinement/test_parameter_auto_reduction.py b/tests/algorithms/refinement/test_parameter_auto_reduction.py index eba3291698..79651f2b2d 100644 --- a/tests/algorithms/refinement/test_parameter_auto_reduction.py +++ b/tests/algorithms/refinement/test_parameter_auto_reduction.py @@ -54,7 +54,6 @@ def tc(): def test_check_and_fail(tc): - # There are 823 reflections assert len(tc.refman.get_matches()) == 823 @@ -76,7 +75,6 @@ def test_check_and_fail(tc): def test_check_and_fix(tc): - n_det = tc.det_param.num_free() n_beam = tc.s0_param.num_free() n_xlo = tc.xlo_param.num_free() @@ -117,7 +115,6 @@ def test_check_and_fix(tc): def test_check_and_remove(): - test = _Test() # Override the single panel model and parameterisation. This test function @@ -199,7 +196,6 @@ def test_check_and_remove(): def test_ignore(tc): - n_det = tc.det_param.num_free() n_beam = tc.s0_param.num_free() n_xlo = tc.xlo_param.num_free() diff --git a/tests/algorithms/refinement/test_prediction_parameters.py b/tests/algorithms/refinement/test_prediction_parameters.py index e319c7efcc..d397e85688 100644 --- a/tests/algorithms/refinement/test_prediction_parameters.py +++ b/tests/algorithms/refinement/test_prediction_parameters.py @@ -47,7 +47,6 @@ def test(): - overrides = """geometry.parameters.crystal.a.length.range = 10 50 geometry.parameters.crystal.b.length.range = 10 50 geometry.parameters.crystal.c.length.range = 10 50""" diff --git a/tests/algorithms/refinement/test_ref_passage_categorisation.py b/tests/algorithms/refinement/test_ref_passage_categorisation.py index 0adf2b0b23..a85d201347 100644 --- a/tests/algorithms/refinement/test_ref_passage_categorisation.py +++ b/tests/algorithms/refinement/test_ref_passage_categorisation.py @@ -1,7 +1,6 @@ """Trivial check for whether classification of reflections as exiting or entering the Ewald sphere is done the right way round""" - from __future__ import annotations import math diff --git a/tests/algorithms/refinement/test_refine_multi_wedges.py b/tests/algorithms/refinement/test_refine_multi_wedges.py index ae11a47b22..7b86347e1e 100644 --- a/tests/algorithms/refinement/test_refine_multi_wedges.py +++ b/tests/algorithms/refinement/test_refine_multi_wedges.py @@ -2,7 +2,6 @@ Test refinement of multiple narrow sequences. """ - from __future__ import annotations import shutil diff --git a/tests/algorithms/refinement/test_refinement_regression.py b/tests/algorithms/refinement/test_refinement_regression.py index 56c9e6e350..d2c21a2b5a 100644 --- a/tests/algorithms/refinement/test_refinement_regression.py +++ b/tests/algorithms/refinement/test_refinement_regression.py @@ -3,7 +3,6 @@ parameters using generated reflection positions from ideal geometry. """ - from __future__ import annotations diff --git a/tests/algorithms/refinement/test_refiner_config.py b/tests/algorithms/refinement/test_refiner_config.py index bda408bff7..be595765e8 100644 --- a/tests/algorithms/refinement/test_refiner_config.py +++ b/tests/algorithms/refinement/test_refiner_config.py @@ -1,6 +1,5 @@ """Test Refiners can be constructed with various configurations""" - from __future__ import annotations from copy import deepcopy @@ -21,7 +20,6 @@ ["automatic", "single", "multiple", "hierarchical"], ) def test_multi_panel_parameterisations(dials_data, detector_parameterisation_choice): - data_dir = dials_data("iterative_cspad_refinement", pathlib=True) exp_file = data_dir / "cspad_refined_experiments_step6_level2_300.json" ref_file = data_dir / "cspad_reflections_step7_300.pickle" @@ -51,7 +49,6 @@ def test_multi_panel_parameterisations(dials_data, detector_parameterisation_cho def test_trim_scans_to_observations(dials_data): - # Use 4 scan data for this test data_dir = dials_data("l_cysteine_dials_output", pathlib=True) experiments = ExperimentListFactory.from_json_file( diff --git a/tests/algorithms/refinement/test_refiner_units.py b/tests/algorithms/refinement/test_refiner_units.py index 5c66e04f41..9adfcec408 100644 --- a/tests/algorithms/refinement/test_refiner_units.py +++ b/tests/algorithms/refinement/test_refiner_units.py @@ -3,7 +3,6 @@ alone. """ - from __future__ import annotations from unittest.mock import Mock, patch diff --git a/tests/algorithms/refinement/test_reflection_manager.py b/tests/algorithms/refinement/test_reflection_manager.py index 770519a78b..f8bd2529f4 100644 --- a/tests/algorithms/refinement/test_reflection_manager.py +++ b/tests/algorithms/refinement/test_reflection_manager.py @@ -11,7 +11,6 @@ def test_scan_margin(dials_data): - # Use 4 scan data for this test data_dir = dials_data("l_cysteine_dials_output", pathlib=True) experiments = ExperimentListFactory.from_json_file( diff --git a/tests/algorithms/refinement/test_restraints_gradients.py b/tests/algorithms/refinement/test_restraints_gradients.py index 7fb2995b08..55cdef788c 100644 --- a/tests/algorithms/refinement/test_restraints_gradients.py +++ b/tests/algorithms/refinement/test_restraints_gradients.py @@ -3,7 +3,6 @@ restraints """ - from __future__ import annotations import math @@ -145,7 +144,6 @@ def dangle(u, v): # look at each parameter for i, dO in enumerate(dO_dp): - # print # print "***** PARAMETER {0} *****".format(i) diff --git a/tests/algorithms/refinement/test_restraints_parameterisation.py b/tests/algorithms/refinement/test_restraints_parameterisation.py index 3bd259b268..f224f9df3a 100644 --- a/tests/algorithms/refinement/test_restraints_parameterisation.py +++ b/tests/algorithms/refinement/test_restraints_parameterisation.py @@ -393,7 +393,6 @@ def test_10_crystals_with_stills_parameterisation(dials_data): def test_group_restraint_with_multiple_crystals_and_a_stills_refiner(dials_data): - # The phil scope from dials.algorithms.refinement.refiner import phil_scope diff --git a/tests/algorithms/refinement/test_rotation_decomposition.py b/tests/algorithms/refinement/test_rotation_decomposition.py index 785227f2a7..82332b4b6e 100644 --- a/tests/algorithms/refinement/test_rotation_decomposition.py +++ b/tests/algorithms/refinement/test_rotation_decomposition.py @@ -1,6 +1,5 @@ """Test decomposition of rotation matrices around arbitrary axes""" - from __future__ import annotations import math @@ -15,7 +14,6 @@ def _test_rotation_matrices(phi1, phi2, phi3): - # compose rotation matrix R1 = matrix.col((1, 0, 0)).axis_and_angle_as_r3_rotation_matrix(phi1, deg=False) R2 = matrix.col((0, 1, 0)).axis_and_angle_as_r3_rotation_matrix(phi2, deg=False) @@ -70,7 +68,6 @@ def _test_rotation_matrices(phi1, phi2, phi3): def _test_vs_euler_angles_xyz_angles(phi1, phi2, phi3): - from scitbx.math import euler_angles_xyz_angles # compose rotation matrix @@ -144,7 +141,6 @@ def test(): # tests using principal axes, and angles in the range +/-30 deg for i in range(n_tests): - phi1 = random.uniform(-math.pi / 6, math.pi / 6) phi2 = random.uniform(-math.pi / 6, math.pi / 6) phi3 = random.uniform(-math.pi / 6, math.pi / 6) diff --git a/tests/algorithms/refinement/test_scan_varying_block_calculation.py b/tests/algorithms/refinement/test_scan_varying_block_calculation.py index 4637fbcfb2..b2c031be55 100644 --- a/tests/algorithms/refinement/test_scan_varying_block_calculation.py +++ b/tests/algorithms/refinement/test_scan_varying_block_calculation.py @@ -2,7 +2,6 @@ for scan-varying refinement. This exercises the issue originally flagged in https://github.com/dials/dials/issues/511""" - from __future__ import annotations from math import pi @@ -18,7 +17,6 @@ def create_experiments(image_start=1): - # Create models from libtbx.phil import parse @@ -68,7 +66,6 @@ def create_experiments(image_start=1): def generate_reflections(experiments): - from cctbx.sgtbx import space_group, space_group_symbols from dials.algorithms.refinement.prediction.managed_predictors import ( @@ -112,7 +109,6 @@ def generate_reflections(experiments): def test_per_width_and_per_image_are_equivalent(): - # Scan starting at image 1 experiments = create_experiments(1) reflections = generate_reflections(experiments) diff --git a/tests/algorithms/refinement/test_scan_varying_model_parameters.py b/tests/algorithms/refinement/test_scan_varying_model_parameters.py index 96732590dd..dc9deeacd3 100644 --- a/tests/algorithms/refinement/test_scan_varying_model_parameters.py +++ b/tests/algorithms/refinement/test_scan_varying_model_parameters.py @@ -290,7 +290,6 @@ def test_ScanVaryingCrystalOrientationParameterisation_intervals( phi3_data = [] step_size = (vmp.image_range[1] - vmp.image_range[0]) / num_points for t in [vmp.image_range[0] + e * step_size for e in range(num_points + 1)]: - # collect data for plot smooth_at.append(t) phi1_data.append(xl_op._smoother.value_weight(t, xl_op._param[0])[0]) diff --git a/tests/algorithms/refinement/test_scan_varying_prediction_parameters.py b/tests/algorithms/refinement/test_scan_varying_prediction_parameters.py index 80fca50ad4..c050a8c4c9 100644 --- a/tests/algorithms/refinement/test_scan_varying_prediction_parameters.py +++ b/tests/algorithms/refinement/test_scan_varying_prediction_parameters.py @@ -214,11 +214,11 @@ def test(cmdline_overrides=[]): phi_grads /= delta try: - for (a, b) in zip(x_grads, an_grads[i]["dX_dp"]): + for a, b in zip(x_grads, an_grads[i]["dX_dp"]): assert a == pytest.approx(b, abs=1e-5) - for (a, b) in zip(y_grads, an_grads[i]["dY_dp"]): + for a, b in zip(y_grads, an_grads[i]["dY_dp"]): assert a == pytest.approx(b, abs=1e-5) - for (a, b) in zip(phi_grads, an_grads[i]["dphi_dp"]): + for a, b in zip(phi_grads, an_grads[i]["dphi_dp"]): assert a == pytest.approx(b, abs=1e-5) except AssertionError: print(f"Failure for {p_names[i]}") @@ -230,7 +230,6 @@ def test(cmdline_overrides=[]): def test_SparseFlex_scalars(): - size = 100 # Make a dense double array with 50% explicit zeroes @@ -269,7 +268,6 @@ def test_SparseFlex_scalars(): def test_SparseFlex_matrix_and_vector_arithmetic(): - size = 100 # Make a dense vec3 array with 50% explicit zeroes @@ -350,7 +348,6 @@ def test_SparseFlex_matrix_and_vector_arithmetic(): def test_SparseFlex_vec3_only_methods(): - size = 100 # Make a dense vec3 array with 50% explicit zeroes @@ -402,7 +399,6 @@ def test_SparseFlex_vec3_only_methods(): def test_SparseFlex_select(): - size = 100 # Make a dense double array with 50% explicit zeroes @@ -514,10 +510,8 @@ def test_SparseFlex_select_intersection(random_order): def test_intersection_i_seqs_speed(): - exec_times = [] for i in range(100): - size = 10000 sel1 = flex.random_selection(size, int(size / 2)) @@ -568,7 +562,6 @@ def test_intersection_i_seqs_speed(): @pytest.mark.parametrize("element_type", ["vec3", "mat3"]) def test_ReconstituteDerivatives(element_type): - if element_type == "vec3": n = 3 build = build_reconstitute_derivatives_vec3 diff --git a/tests/algorithms/refinement/test_stills_prediction_parameters.py b/tests/algorithms/refinement/test_stills_prediction_parameters.py index 7ccee5c6ab..3dc862d328 100644 --- a/tests/algorithms/refinement/test_stills_prediction_parameters.py +++ b/tests/algorithms/refinement/test_stills_prediction_parameters.py @@ -121,7 +121,6 @@ def generate_reflections(self): self.reflections["xyzobs.mm.variance"] += (1e-3, 1e-3, 1e-6) def get_fd_gradients(self, pred_param, ref_predictor): - # get finite difference gradients p_vals = pred_param.get_param_vals() deltas = [1.0e-7] * len(p_vals) @@ -129,7 +128,6 @@ def get_fd_gradients(self, pred_param, ref_predictor): fd_grads = [] p_names = pred_param.get_param_names() for i in range(len(deltas)): - # save parameter value val = p_vals[i] @@ -208,7 +206,6 @@ def test_stills_pred_param(tc): fd_grads = tc.get_fd_gradients(pred_param, ref_predictor) for i, (an_grad, fd_grad) in enumerate(zip(an_grads, fd_grads)): - # compare FD with analytical calculations print(f"\nParameter {i}: {fd_grad['name']}") diff --git a/tests/algorithms/refinement/test_stills_refinement.py b/tests/algorithms/refinement/test_stills_refinement.py index 5a090e987a..08f371d151 100644 --- a/tests/algorithms/refinement/test_stills_refinement.py +++ b/tests/algorithms/refinement/test_stills_refinement.py @@ -3,7 +3,6 @@ Only the crystal is perturbed while the beam and detector are known. """ - from __future__ import annotations diff --git a/tests/algorithms/refinement/test_stills_spherical_relp_derivatives.py b/tests/algorithms/refinement/test_stills_spherical_relp_derivatives.py index d10f0baabd..cb4e2fc37f 100644 --- a/tests/algorithms/refinement/test_stills_spherical_relp_derivatives.py +++ b/tests/algorithms/refinement/test_stills_spherical_relp_derivatives.py @@ -2,7 +2,6 @@ Test derivatives typed up in dials_regression/doc/notes/prediction/stills_prediction_nave3.pdf """ - from __future__ import annotations import pytest @@ -35,7 +34,6 @@ class Predictor: def __init__(self, experiments): - self._experiment = experiments[0] self.update() @@ -70,7 +68,6 @@ def __init__( xl_orientation_parameterisation, xl_unit_cell_parameterisation, ): - # References to the underlying models from the first experiment self.experiment = experiments[0] self.beam = self.experiment.beam @@ -92,7 +89,6 @@ def __init__( self.us0 = self.s0.normalize() def get_beam_gradients(self, reflections): - ds0_dbeam_p = self.beam_parameterisation.get_ds_dp() p_names = self.beam_parameterisation.get_param_names() @@ -124,7 +120,6 @@ def get_beam_gradients(self, reflections): # loop through the parameters for name, der in zip(p_names, ds0_dbeam_p): - # term1 term1 = self.us0.dot(der) * q_s0 + self.s0len * (der) term1 = term1 * inv_s @@ -139,7 +134,6 @@ def get_beam_gradients(self, reflections): return ds1_dp def get_crystal_orientation_gradients(self, reflections): - # get derivatives of the U matrix wrt the parameters dU_dxlo_p = self.xl_orientation_parameterisation.get_ds_dp() p_names = self.xl_orientation_parameterisation.get_param_names() @@ -166,7 +160,6 @@ def get_crystal_orientation_gradients(self, reflections): # loop through the parameters for name, der in zip(p_names, dU_dxlo_p): - # calculate the derivative of q for this parameter dq = flex.mat3_double(n, der.elems) * B * h @@ -184,7 +177,6 @@ def get_crystal_orientation_gradients(self, reflections): return ds1_dp def get_crystal_unit_cell_gradients(self, reflections): - # get derivatives of the B matrix wrt the parameters dB_dxluc_p = self.xl_unit_cell_parameterisation.get_ds_dp() p_names = self.xl_unit_cell_parameterisation.get_param_names() @@ -211,7 +203,6 @@ def get_crystal_unit_cell_gradients(self, reflections): # loop through the parameters for name, der in zip(p_names, dB_dxluc_p): - # calculate the derivative of q for this parameter dq = U * flex.mat3_double(n, der.elems) * h @@ -338,7 +329,6 @@ def test(): fd_grads = [] p_names = pred_param.get_param_names() for i, delta in enumerate(deltas): - # save parameter value val = p_vals[i] @@ -379,7 +369,6 @@ def test(): pred_param.set_param_vals(p_vals) for i, fd_grad in enumerate(fd_grads): - ## compare FD with analytical calculations print(f"\n\nParameter {i}: {fd_grad['name']}") diff --git a/tests/algorithms/refinement/test_two_theta_refinement.py b/tests/algorithms/refinement/test_two_theta_refinement.py index ea62faee6e..29a9c05fa1 100644 --- a/tests/algorithms/refinement/test_two_theta_refinement.py +++ b/tests/algorithms/refinement/test_two_theta_refinement.py @@ -2,7 +2,6 @@ Test refinement of a crystal unit cell using a two theta target. """ - from __future__ import annotations from copy import deepcopy @@ -20,7 +19,6 @@ def generate_reflections(experiments): - from cctbx.sgtbx import space_group, space_group_symbols from scitbx.array_family import flex @@ -158,7 +156,6 @@ def test_fd_derivatives(): deltas = [1.0e-7] * len(p_vals) for i in range(len(deltas)): - val = p_vals[i] p_vals[i] -= deltas[i] / 2.0 diff --git a/tests/algorithms/reflection_basis/test_coordinate_system.py b/tests/algorithms/reflection_basis/test_coordinate_system.py index 1df3234fc6..151fb726d0 100644 --- a/tests/algorithms/reflection_basis/test_coordinate_system.py +++ b/tests/algorithms/reflection_basis/test_coordinate_system.py @@ -138,9 +138,7 @@ def test_beamvector_limit(beamvector): c1, c2 = beamvector["cs"].from_beam_vector(s_dash) # Check the point is equal to the limit in rs - assert math.sqrt(c1**2 + c2**2) == pytest.approx( - abs(beamvector["cs"].limits()[0]) - ) + assert math.sqrt(c1**2 + c2**2) == pytest.approx(abs(beamvector["cs"].limits()[0])) ### Test the TestFromRotationAngle class @@ -271,7 +269,6 @@ def random_shift(): # Loop a number of times num = 1000 for i in range(num): - # Create a rotation angle phi_dash = rotationangle["phi"] + random_shift() diff --git a/tests/algorithms/reflection_basis/test_map_frames.py b/tests/algorithms/reflection_basis/test_map_frames.py index b7f79b62b8..5ead835ec0 100644 --- a/tests/algorithms/reflection_basis/test_map_frames.py +++ b/tests/algorithms/reflection_basis/test_map_frames.py @@ -59,7 +59,6 @@ def test_map_frames_forward(dials_data): s0_length = matrix.col(beam.get_s0()).length() for i in range(100): - # Get random x, y, z x = random.uniform(0, 2000) y = random.uniform(0, 2000) @@ -148,7 +147,6 @@ def test_map_frames_reverse(dials_data): s0_length = matrix.col(beam.get_s0()).length() for i in range(100): - # Get random x, y, z x = random.uniform(0, 2000) y = random.uniform(0, 2000) @@ -254,7 +252,6 @@ def test_map_forward_reverse(dials_data): s0_length = matrix.col(beam.get_s0()).length() for i in range(100): - # Get random x, y, z x = random.uniform(0, 2000) y = random.uniform(0, 2000) diff --git a/tests/algorithms/reflection_basis/test_transform.py b/tests/algorithms/reflection_basis/test_transform.py index c83f73ff0b..480aba1c45 100644 --- a/tests/algorithms/reflection_basis/test_transform.py +++ b/tests/algorithms/reflection_basis/test_transform.py @@ -84,7 +84,6 @@ def test_forward(dials_data): s1_map = transform.beam_vector_map(detector[0], beam, True) for i in range(100): - # Get random x, y, z x = random.uniform(300, 1800) y = random.uniform(300, 1800) @@ -160,7 +159,6 @@ def test_forward(dials_data): s1_map = transform.beam_vector_map(detector[0], beam, True) for i in range(100): - # Get random x, y, z x = random.uniform(300, 1800) y = random.uniform(300, 1800) @@ -281,7 +279,6 @@ def test_forward_no_model(dials_data): s1_map = transform.beam_vector_map(detector[0], beam, True) for i in range(100): - # Get random x, y, z x = random.uniform(300, 1800) y = random.uniform(300, 1800) diff --git a/tests/algorithms/scaling/test_outlier_rejection.py b/tests/algorithms/scaling/test_outlier_rejection.py index 8661f51ed9..40436da84e 100644 --- a/tests/algorithms/scaling/test_outlier_rejection.py +++ b/tests/algorithms/scaling/test_outlier_rejection.py @@ -204,7 +204,6 @@ def generate_outlier_table_2(): def test_outlier_rejection_with_small_outliers(): - rt = flex.reflection_table() rt["intensity"] = flex.double( [3560.84231, 3433.66407, 3830.64235, 0.20552, 3786.59537] @@ -228,7 +227,6 @@ def test_outlier_rejection_with_small_outliers(): def test_limit_outlier_weights(): - rt = flex.reflection_table() rt["intensity"] = flex.double([100.0, 101.0, 109.0, 105.0, 1.0]) rt["variance"] = flex.double([100.0, 101.0, 109.0, 105.0, 1.0]) diff --git a/tests/algorithms/scaling/test_plots.py b/tests/algorithms/scaling/test_plots.py index 41f7a86f19..dbbe9deec2 100644 --- a/tests/algorithms/scaling/test_plots.py +++ b/tests/algorithms/scaling/test_plots.py @@ -117,7 +117,6 @@ def test_plot_array_modulation_plot(): def test_plot_scaling_models(): - physical_dict = { "__id__": "physical", "is_scaled": True, diff --git a/tests/algorithms/scaling/test_scale_and_filter.py b/tests/algorithms/scaling/test_scale_and_filter.py index 8fec5a583b..ce634d8a66 100644 --- a/tests/algorithms/scaling/test_scale_and_filter.py +++ b/tests/algorithms/scaling/test_scale_and_filter.py @@ -1,4 +1,5 @@ """Test that compute_delta_cchalf returns required values""" + from __future__ import annotations import json diff --git a/tests/algorithms/scaling/test_scaling_restraints.py b/tests/algorithms/scaling/test_scaling_restraints.py index 8118a7a59f..7a097bdef7 100644 --- a/tests/algorithms/scaling/test_scaling_restraints.py +++ b/tests/algorithms/scaling/test_scaling_restraints.py @@ -210,7 +210,8 @@ def test_MultiScalingRestraints( for i in range(mock_restrained_component.n_params): for j in range(mock_restrained_component.n_params): assert jacobian_restraints[1][i, j] == abs_restraints[1][i, j] - assert jacobian_restraints[1][i + n_abs_params, j + n_total_params] == ( - abs_restraints[1][i, j] + assert ( + jacobian_restraints[1][i + n_abs_params, j + n_total_params] + == (abs_restraints[1][i, j]) ) assert abs_restraints[1].non_zeroes * 2 == jacobian_restraints[1].non_zeroes diff --git a/tests/algorithms/scaling/test_target_function.py b/tests/algorithms/scaling/test_target_function.py index 118a107593..88a82b9c07 100644 --- a/tests/algorithms/scaling/test_target_function.py +++ b/tests/algorithms/scaling/test_target_function.py @@ -351,9 +351,7 @@ def test_target_gradient_calculation_finite_difference( grad = target.calculate_gradients(scaler.Ih_table.blocked_data_list[0]) res, _ = target.compute_residuals(scaler.Ih_table.blocked_data_list[0]) - assert ( - res > 1e-8 - ), """residual should not be zero, or the gradient test + assert res > 1e-8, """residual should not be zero, or the gradient test below will not really be working!""" # Now compare to finite difference diff --git a/tests/algorithms/spot_prediction/test_ray_predictor.py b/tests/algorithms/spot_prediction/test_ray_predictor.py index 9cedf55910..6f13e8134d 100644 --- a/tests/algorithms/spot_prediction/test_ray_predictor.py +++ b/tests/algorithms/spot_prediction/test_ray_predictor.py @@ -131,7 +131,6 @@ def test_rotation_angles(raypredictor): for hkl, xyz in zip( raypredictor.integrate_handle.hkl, raypredictor.integrate_handle.xyzcal ): - # Calculate the XDS phi value xds_phi = ( raypredictor.scan.get_oscillation(deg=False)[0] diff --git a/tests/algorithms/spot_prediction/test_reeke_index_generator.py b/tests/algorithms/spot_prediction/test_reeke_index_generator.py index 138e515a97..7368104988 100644 --- a/tests/algorithms/spot_prediction/test_reeke_index_generator.py +++ b/tests/algorithms/spot_prediction/test_reeke_index_generator.py @@ -14,7 +14,6 @@ class Test: def setup_method(self): - # cubic, 50A cell, 1A radiation, 1 deg osciillation, everything ideal a = 50.0 self.ub = matrix.sqr((1.0 / a, 0.0, 0.0, 0.0, 1.0 / a, 0.0, 0.0, 0.0, 1.0 / a)) @@ -26,7 +25,6 @@ def setup_method(self): self.margin = 1 def test_varying_s0(self): - space_group_type = space_group_info("P 1").group().type() ub_beg, ub_end = self._get_ub(0) @@ -73,7 +71,6 @@ def test_varying_s0(self): assert len(common) >= 0.98 * min_set_len def _get_ub(self, frame): - angle_beg = frame * 1 angle_end = (frame + 1) * 1 diff --git a/tests/algorithms/spot_prediction/test_rotation_angles.py b/tests/algorithms/spot_prediction/test_rotation_angles.py index d9ccc5c5fa..105a4cbdac 100644 --- a/tests/algorithms/spot_prediction/test_rotation_angles.py +++ b/tests/algorithms/spot_prediction/test_rotation_angles.py @@ -71,7 +71,6 @@ def test(dials_regression: Path, tmp_path): # Create a dict of lists of xy for each hkl gen_phi = {} for h in integrate_handle.hkl: - # Calculate the angles angles = ra(h, ub) gen_phi[h] = angles @@ -85,7 +84,6 @@ def test(dials_regression: Path, tmp_path): # For each hkl in the xds file for hkl, xyz in zip(integrate_handle.hkl, integrate_handle.xyzcal): - # Calculate the XDS phi value xds_phi = ( scan.get_oscillation(deg=False)[0] diff --git a/tests/algorithms/spot_prediction/test_scan_varying_predictor.py b/tests/algorithms/spot_prediction/test_scan_varying_predictor.py index c9407d1ab6..2b27279dd9 100644 --- a/tests/algorithms/spot_prediction/test_scan_varying_predictor.py +++ b/tests/algorithms/spot_prediction/test_scan_varying_predictor.py @@ -3,7 +3,6 @@ predictor. """ - from __future__ import annotations import math @@ -143,7 +142,7 @@ def test(): assert len(refs1_sorted) == len(refs2_sorted) - for (r1, r2) in zip(refs1_sorted, refs2_sorted): + for r1, r2 in zip(refs1_sorted, refs2_sorted): assert r1["miller_index"] == r2["miller_index"] dz = r1["xyzcal.px"][2] - r2["xyzcal.px"][2] assert abs(dz) < 0.01 diff --git a/tests/algorithms/spot_prediction/test_spot_prediction.py b/tests/algorithms/spot_prediction/test_spot_prediction.py index 2707c9d031..8103b3054d 100644 --- a/tests/algorithms/spot_prediction/test_spot_prediction.py +++ b/tests/algorithms/spot_prediction/test_spot_prediction.py @@ -131,7 +131,6 @@ def test_rotation_angles(spotpredictor): for hkl, xyz in zip( spotpredictor.integrate_handle.hkl, spotpredictor.integrate_handle.xyzcal ): - xds_phi = ( spotpredictor.scan.get_oscillation(deg=False)[0] + xyz[2] * spotpredictor.scan.get_oscillation(deg=False)[1] @@ -181,7 +180,6 @@ def test_image_coordinates(spotpredictor): for hkl, xyz in zip( spotpredictor.integrate_handle.hkl, spotpredictor.integrate_handle.xyzcal ): - xds_xy = (xyz[0] - 0.5, xyz[1] - 0.5) # Select the nearest xy to use if there are 2 diff --git a/tests/algorithms/statistics/test_binned_statistics.py b/tests/algorithms/statistics/test_binned_statistics.py index 238a413fb2..efa82a5a82 100644 --- a/tests/algorithms/statistics/test_binned_statistics.py +++ b/tests/algorithms/statistics/test_binned_statistics.py @@ -8,7 +8,6 @@ def test_partitions(): - vals = flex.double((1, 2, 3, 1, 2, 3, 1, 2, 3)) bins = flex.size_t((0, 1, 3, 0, 1, 3, 0, 1, 3)) binned_statistics = BinnedStatistics(vals, bins, 4) @@ -26,7 +25,6 @@ def test_partitions(): def test_median_and_iqr(): - # Even number of values, one bin vals1 = flex.double((1, 2, 3, 4)) bins = flex.size_t([0] * len(vals1)) diff --git a/tests/algorithms/statistics/test_fast_mcd.py b/tests/algorithms/statistics/test_fast_mcd.py index e53698a92b..8a77f096ae 100644 --- a/tests/algorithms/statistics/test_fast_mcd.py +++ b/tests/algorithms/statistics/test_fast_mcd.py @@ -1,12 +1,10 @@ """Testing functions for multivariate outlier rejection by the FAST-MCD algorithm""" - from __future__ import annotations def test_maha(): - # Want implementation of Mahalanobis distance to match this R session: # > x1 <- round(rnorm(10,3), 3) diff --git a/tests/array_family/test_identifiers_handling.py b/tests/array_family/test_identifiers_handling.py index 5c69d32c9c..73b98b1636 100644 --- a/tests/array_family/test_identifiers_handling.py +++ b/tests/array_family/test_identifiers_handling.py @@ -1,4 +1,5 @@ """Test for new experiment identifier features""" + from __future__ import annotations from dxtbx.model import Experiment, ExperimentList diff --git a/tests/command_line/test_anvil_correction.py b/tests/command_line/test_anvil_correction.py index e75f6a5fa6..10051be311 100644 --- a/tests/command_line/test_anvil_correction.py +++ b/tests/command_line/test_anvil_correction.py @@ -2,7 +2,6 @@ Tests for dials.command_line.anvil_correction. """ - from __future__ import annotations import copy diff --git a/tests/command_line/test_combine_experiments.py b/tests/command_line/test_combine_experiments.py index 494f28f490..9f53154799 100644 --- a/tests/command_line/test_combine_experiments.py +++ b/tests/command_line/test_combine_experiments.py @@ -2,7 +2,6 @@ Test combination of multiple experiments and reflections files. """ - from __future__ import annotations import copy @@ -411,7 +410,6 @@ def test_combine_nsubset( def test_failed_tolerance_error(dials_data, monkeypatch): - """Test that we get a sensible error message on tolerance failures""" # Select some experiments to use for combining data_dir = dials_data("polyhedra_narrow_wedges", pathlib=True) diff --git a/tests/command_line/test_export_bitmaps.py b/tests/command_line/test_export_bitmaps.py index ec8a24ce52..6fe92e9344 100644 --- a/tests/command_line/test_export_bitmaps.py +++ b/tests/command_line/test_export_bitmaps.py @@ -105,7 +105,6 @@ def test_export_multiple_bitmaps_with_specified_output_filename_fails( @pytest.mark.parametrize("set_imageset_index", [True, False]) def test_export_single_cbf(dials_data, tmp_path, set_imageset_index): - image = str(dials_data("centroid_test_data", pathlib=True) / "centroid_0002.cbf") cmd = [image, f"output.directory={tmp_path}"] if set_imageset_index: diff --git a/tests/command_line/test_export_mosflm.py b/tests/command_line/test_export_mosflm.py index a7628a999f..17256b0967 100644 --- a/tests/command_line/test_export_mosflm.py +++ b/tests/command_line/test_export_mosflm.py @@ -36,9 +36,7 @@ def test_export_mosflm(dials_regression: Path, tmp_path): 0.83360283 -0.53598726 -0.13350648 42.2717 42.2720 39.6704 90.0001 89.9993 89.9998 0.000 0.000 0.000 -""".strip( - "\n" - ) +""".strip("\n") ) assert (tmp_path / "mosflm" / "mosflm.in").is_file() lines = (tmp_path / "mosflm" / "mosflm.in").read_text() @@ -51,8 +49,6 @@ def test_export_mosflm(dials_regression: Path, tmp_path): BEAM 220.002 212.478 DISTANCE 190.1800 MATRIX index.mat -""".strip( - "\n" - ) +""".strip("\n") % (dials_regression, os.path.sep) ) diff --git a/tests/command_line/test_export_xds.py b/tests/command_line/test_export_xds.py index 623e1bc81f..88ee14655a 100644 --- a/tests/command_line/test_export_xds.py +++ b/tests/command_line/test_export_xds.py @@ -63,9 +63,7 @@ def test_spots_xds(tmp_path): 1317.52 1171.59 19.28 120.00 6 -4 6 1260.25 1300.55 13.67 116.00 -4 2 6 1090.27 1199.47 41.49 114.00 -2 3 -13 -""".split( - "\n" - ) +""".split("\n") ] diff --git a/tests/command_line/test_import.py b/tests/command_line/test_import.py index e1f106d2c4..1db6f6b26e 100644 --- a/tests/command_line/test_import.py +++ b/tests/command_line/test_import.py @@ -823,9 +823,7 @@ def test_convert_stills_to_sequences(dials_data, tmp_path): # also add in something that is sequences, for completess centroid_image_files = sorted( dials_data("centroid_test_data", pathlib=True).glob("centroid*.cbf") - )[ - :3 - ] # just three images + )[:3] # just three images result = subprocess.run( [ shutil.which("dials.import"), diff --git a/tests/command_line/test_merge.py b/tests/command_line/test_merge.py index db5d1769af..1425a5c633 100644 --- a/tests/command_line/test_merge.py +++ b/tests/command_line/test_merge.py @@ -1,6 +1,5 @@ """Tests for dials.merge command line program.""" - from __future__ import annotations import json diff --git a/tests/command_line/test_powder_calibrate.py b/tests/command_line/test_powder_calibrate.py index 72151330d4..8bbac75e13 100644 --- a/tests/command_line/test_powder_calibrate.py +++ b/tests/command_line/test_powder_calibrate.py @@ -4,10 +4,11 @@ import pytest -pytest.importorskip("pyFAI") - from dxtbx.serialize import load +# ruff: noqa: E402 +pytest.importorskip("pyFAI") + from dials.command_line import powder_calibrate from dials.command_line.powder_calibrate import Geometry, Point, PowderCalibrator @@ -16,7 +17,6 @@ "eyeball, starting_geometry", [(True, "imported.expt"), (False, "eyeballed.expt")] ) def test_calibrate_coarse(dials_data, tmp_path, eyeball, starting_geometry): - aluminium_powder = dials_data("aluminium_standard", pathlib=True) starting_geom_exptlist = load.experiment_list(aluminium_powder / starting_geometry) @@ -55,7 +55,6 @@ def mocked_eyeball(self): def test_save_geom_to_expt(dials_data, tmp_path): - aluminium_powder = dials_data("aluminium_standard", pathlib=True) imported_exptlist = load.experiment_list(aluminium_powder / "imported.expt") diff --git a/tests/command_line/test_refine.py b/tests/command_line/test_refine.py index 5f2310fbc1..c737f384e8 100644 --- a/tests/command_line/test_refine.py +++ b/tests/command_line/test_refine.py @@ -7,7 +7,6 @@ have not changed format and so on. """ - from __future__ import annotations import math diff --git a/tests/command_line/test_report.py b/tests/command_line/test_report.py index 506740241f..0624319a2a 100644 --- a/tests/command_line/test_report.py +++ b/tests/command_line/test_report.py @@ -1,4 +1,5 @@ """Tests for dials.report""" + from __future__ import annotations import json diff --git a/tests/command_line/test_ssx_integrate.py b/tests/command_line/test_ssx_integrate.py index 5b5f86b38b..2e88a089c4 100644 --- a/tests/command_line/test_ssx_integrate.py +++ b/tests/command_line/test_ssx_integrate.py @@ -1,5 +1,6 @@ from __future__ import annotations +import json import pathlib import shutil import subprocess @@ -48,8 +49,6 @@ def test_ssx_integrate_fullprocess(dials_data, tmp_path): assert tmp_path.joinpath(f"nuggets/nugget_integrated_{i}.json").is_file() -import json - expected_simple1 = { "likelihood": 171374.174649, "mosaicity": [0.0003630], diff --git a/tests/command_line/test_symmetry.py b/tests/command_line/test_symmetry.py index d69824a402..4c4d6c43ef 100644 --- a/tests/command_line/test_symmetry.py +++ b/tests/command_line/test_symmetry.py @@ -8,7 +8,7 @@ import pytest import scitbx.matrix -from cctbx import sgtbx, uctbx +from cctbx import crystal, sgtbx, uctbx from dxtbx.model import Crystal, Experiment, ExperimentList, Scan from dxtbx.serialize import load @@ -416,9 +416,6 @@ def test_change_of_basis_ops_to_minimum_cell_mpro(): ) -from cctbx import crystal - - def test_change_of_basis_ops_to_minimum_cell_with_outlier(): symmetries = [ crystal.symmetry(unit_cell=uc, space_group="P1") diff --git a/tests/command_line/test_two_theta_refine.py b/tests/command_line/test_two_theta_refine.py index 34fde1a2e0..67bd562217 100644 --- a/tests/command_line/test_two_theta_refine.py +++ b/tests/command_line/test_two_theta_refine.py @@ -3,7 +3,6 @@ data and comparing with expected output. """ - from __future__ import annotations import shutil diff --git a/tests/model/data/test_pixel_list.py b/tests/model/data/test_pixel_list.py index 51a6dff3a7..fd547a0d83 100644 --- a/tests/model/data/test_pixel_list.py +++ b/tests/model/data/test_pixel_list.py @@ -86,7 +86,6 @@ def test_labels_3d(): for j in range(size[0]): for i in range(size[1]): if mask_list[k][j, i]: - l1 = labels[vi] if k > 0 and mask_list[k - 1][j, i]: l2 = label_map[k - 1, j, i] @@ -135,7 +134,6 @@ def test_labels_2d(): for j in range(size[0]): for i in range(size[1]): if mask_list[k][j, i]: - l1 = labels[vi] if k > 0 and mask_list[k - 1][j, i]: l2 = label_map[k - 1, j, i] diff --git a/tests/report/test_plots.py b/tests/report/test_plots.py index cf8dd17561..cc617547c3 100644 --- a/tests/report/test_plots.py +++ b/tests/report/test_plots.py @@ -44,7 +44,6 @@ def iobs(): def test_AnomalousPlotter(): - "Make a larger array to allow all plots to be made" cs = crystal.symmetry(space_group_symbol="P1", unit_cell=(6, 6, 6, 90, 90, 90)) ms = miller.build_set(cs, anomalous_flag=True, d_min=1.0) diff --git a/tests/util/__init__.py b/tests/util/__init__.py index 80e4ce04c7..14dbdd70e7 100644 --- a/tests/util/__init__.py +++ b/tests/util/__init__.py @@ -1,4 +1,5 @@ """Shared functions for tests.""" + from __future__ import annotations from unittest.mock import Mock diff --git a/tests/util/test_export_mtz.py b/tests/util/test_export_mtz.py index 9c01017cf6..8aba4bcb86 100644 --- a/tests/util/test_export_mtz.py +++ b/tests/util/test_export_mtz.py @@ -2,7 +2,6 @@ Unit testing for the export_mtz.py routines """ - from __future__ import annotations import itertools diff --git a/tests/util/test_image_grouping.py b/tests/util/test_image_grouping.py index 7fde65cbaf..ae1af42449 100644 --- a/tests/util/test_image_grouping.py +++ b/tests/util/test_image_grouping.py @@ -246,7 +246,6 @@ def test_invalid_yml(tmp_path): reason="Failures due to translated paths; see https://github.com/cctbx/dxtbx/issues/613", ) def test_real_h5_example(tmp_path, dials_data): - """This test tests a few use cases on processed data derived from h5 format.""" fpath1 = ( "/dls/mx/data/nt30330/nt30330-15/VMXi-AB1698/well_42/images/image_58766.nxs" @@ -395,7 +394,6 @@ def test_real_h5_example(tmp_path, dials_data): reason="Failures due to translated paths; see https://github.com/cctbx/dxtbx/issues/613", ) def test_real_cbf_example(tmp_path, dials_data): - """This test tests a few use cases on real cbf data, using the template metadata definition. diff --git a/tests/util/test_log.py b/tests/util/test_log.py index 52b2c2456e..bce510acdf 100644 --- a/tests/util/test_log.py +++ b/tests/util/test_log.py @@ -10,7 +10,6 @@ def test_LoggingContext(): - # configure logging dials.util.log.config(verbosity=2) @@ -68,7 +67,6 @@ def log_something(_: Any) -> List[logging.LogRecord]: raises=AttributeError, ) def test_cached_log_records(caplog): - # Generate some cached log messages in easy_mp child processes. results = multi_node_parallel_map( log_something, diff --git a/tests/util/test_render_3d.py b/tests/util/test_render_3d.py index 7ac99ec288..1b9c593a36 100644 --- a/tests/util/test_render_3d.py +++ b/tests/util/test_render_3d.py @@ -66,12 +66,12 @@ def test_Render3d(mocker, multi_sequence_data): render.load_models(experiments, reflections) assert render.set_beam_centre.call_count == 1 - for (outlier_display, expected_count) in (("outliers", 0), ("inliers", 1255)): + for outlier_display, expected_count in (("outliers", 0), ("inliers", 1255)): render.settings.outlier_display = outlier_display render.load_models(experiments, reflections) assert render.viewer.set_points.call_args[0][0].size() == expected_count - for (display, expected_count) in ( + for display, expected_count in ( ("indexed", 1255), ("unindexed", 0), ("integrated", 0), From 1052d967bd07da2cf3f77eb4222470c6dad989ee Mon Sep 17 00:00:00 2001 From: Graeme Winter Date: Mon, 19 Aug 2024 12:10:49 +0100 Subject: [PATCH 26/40] Remove as_grid_scan from dials.import (#2454) Is not useful, and is misleading as it makes data into stills rather than image sequence. If we want to reinstate it, this can be reverted and fixed up appropriately. Fixes #2452 Mentioned in #2447 --- newsfragments/2454.removal | 2 ++ src/dials/command_line/dials_import.py | 25 +------------------------ 2 files changed, 3 insertions(+), 24 deletions(-) create mode 100644 newsfragments/2454.removal diff --git a/newsfragments/2454.removal b/newsfragments/2454.removal new file mode 100644 index 0000000000..11f42f4b09 --- /dev/null +++ b/newsfragments/2454.removal @@ -0,0 +1,2 @@ +`dials.import`: remove useless as_grid_scan option as misleading. To import as stills set scan.oscillation=0,0 + diff --git a/src/dials/command_line/dials_import.py b/src/dials/command_line/dials_import.py index d31cb272c2..ada3380d3d 100644 --- a/src/dials/command_line/dials_import.py +++ b/src/dials/command_line/dials_import.py @@ -11,7 +11,7 @@ import dxtbx.model.compare as compare import libtbx.phil -from dxtbx.imageset import ImageGrid, ImageSequence +from dxtbx.imageset import ImageSequence from dxtbx.model.experiment_list import ( Experiment, ExperimentList, @@ -149,13 +149,6 @@ def _pickle_load(fh): .type = bool .help = "If False, raise an error if multiple sequences are found" - as_grid_scan = False - .type = bool - .help = "Import as grid scan" - - grid_size = None - .type = ints(size=2) - .help = "If importing as a grid scan set the size" } include scope dials.util.options.format_phil_scope @@ -494,9 +487,6 @@ def __call__(self, imageset_list): """ # Import the lookup data lookup = self.import_lookup_data(self.params) - # Convert all to ImageGrid - if self.params.input.as_grid_scan: - imageset_list = self.convert_to_grid_scan(imageset_list, self.params) # Create the experiments experiments = ExperimentList() @@ -768,19 +758,6 @@ def import_lookup_data(self, params): dy=Item(data=dy, filename=dy_filename), ) - def convert_to_grid_scan(self, imageset_list, params): - """ - Convert the imagesets to grid scans - """ - if params.input.grid_size is None: - raise Sorry("The input.grid_size parameter is required") - result = [] - for imageset in imageset_list: - result.append( - ImageGrid.from_imageset(imageset.as_imageset(), params.input.grid_size) - ) - return result - def print_sequence_diff(sequence1, sequence2, params): """ From 93cf1bf330f080693d372dbe83c629a3ddbb4093 Mon Sep 17 00:00:00 2001 From: Nicholas Devenish Date: Mon, 19 Aug 2024 20:54:56 +0100 Subject: [PATCH 27/40] MNT: Don't run newsfragment check on pre-commit PRs --- .github/workflows/newsfragments.yml | 4 ++-- .pre-commit-config.yaml | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/newsfragments.yml b/.github/workflows/newsfragments.yml index 7b64806fd3..fcaabf3f40 100644 --- a/.github/workflows/newsfragments.yml +++ b/.github/workflows/newsfragments.yml @@ -10,10 +10,10 @@ jobs: rename-news: name: Newsfragment runs-on: ubuntu-latest - if: github.event.pull_request.draft == false + if: (github.event.pull_request.draft == false) && (! startswith(github.event.pull_request.title, '[pre-commit.ci]')) steps: - name: Check out the repository - uses: actions/checkout@v2 + uses: actions/checkout@v4 with: ref: ${{ github.event.pull_request.head.sha }} token: ${{ secrets.USER_TOKEN }} diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a8a2bd7eab..6a0027c976 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,3 +1,5 @@ +ci: + skip: [no-images] repos: # Syntax validation and some basic sanity checks - repo: https://github.com/pre-commit/pre-commit-hooks From 8c53add03cecb3e61c81a456b29a2e12931f395c Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 20 Aug 2024 08:50:51 +0100 Subject: [PATCH 28/40] MNT: pre-commit autoupdate (#2722) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - https://github.com/charliermarsh/ruff-pre-commit → https://github.com/astral-sh/ruff-pre-commit Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Nicholas Devenish --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 6a0027c976..d2a0cd2e46 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: no-commit-to-branch name: "Don't commit to 'main' directly" -- repo: https://github.com/charliermarsh/ruff-pre-commit +- repo: https://github.com/astral-sh/ruff-pre-commit rev: v0.6.1 hooks: - id: ruff From 5bcde7d6630ca46a7b6602249ce0ad526a13050e Mon Sep 17 00:00:00 2001 From: Nicholas Devenish Date: Tue, 20 Aug 2024 08:54:32 +0100 Subject: [PATCH 29/40] MNT: Set CMake policy version, to suppress warnings --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 7429bb099e..4915c29314 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.20 FATAL_ERROR) +cmake_minimum_required(VERSION 3.20...3.30 FATAL_ERROR) project(dials) From fe8a5f14ffd7a1b083e51ed491f55bd3fc3c3912 Mon Sep 17 00:00:00 2001 From: Nicholas Devenish Date: Tue, 20 Aug 2024 09:24:34 +0100 Subject: [PATCH 30/40] Protect gemmi tests against missing gemmi executable (#2723) If this couldn't be found, this threw a rather obtuse error from inside subprocess.py. --- newsfragments/2723.misc | 1 + tests/command_line/test_export.py | 3 +++ tests/command_line/test_ssx_reduction.py | 5 +++++ 3 files changed, 9 insertions(+) create mode 100644 newsfragments/2723.misc diff --git a/newsfragments/2723.misc b/newsfragments/2723.misc new file mode 100644 index 0000000000..60775d7dbc --- /dev/null +++ b/newsfragments/2723.misc @@ -0,0 +1 @@ +Add explicit tests that executable ``gemmi`` is present, before using it. diff --git a/tests/command_line/test_export.py b/tests/command_line/test_export.py index 2e18096a0d..6560455031 100644 --- a/tests/command_line/test_export.py +++ b/tests/command_line/test_export.py @@ -294,6 +294,9 @@ def test_mmcif(compress, hklout, dials_data, tmp_path): assert (tmp_path / hklin).is_file() +@pytest.mark.skipif( + shutil.which("gemmi") is None, reason="Could not find gemmi executable" +) @pytest.mark.parametrize("pdb_version", ["v5", "v5_next"]) def test_mmcif_on_scaled_data(dials_data, tmp_path, pdb_version): """Call dials.export format=mmcif after scaling""" diff --git a/tests/command_line/test_ssx_reduction.py b/tests/command_line/test_ssx_reduction.py index 3c9a83ad50..35688ad36d 100644 --- a/tests/command_line/test_ssx_reduction.py +++ b/tests/command_line/test_ssx_reduction.py @@ -4,7 +4,12 @@ import shutil import subprocess +import pytest + +@pytest.mark.skipif( + shutil.which("gemmi") is None, reason="Could not find gemmi executable" +) def test_ssx_reduction(dials_data, tmp_path): """ Check that dials.cosym, dials.scale, dials.export and dials.merge run From 9288bebdb15741468332e6828623d74036ca1f17 Mon Sep 17 00:00:00 2001 From: DiamondLightSource-build-server Date: Tue, 20 Aug 2024 09:25:36 +0100 Subject: [PATCH 31/40] DIALS 3.21.0 Changelog towncrier --name=DIALS --version='3.21.0' --- CHANGELOG.rst | 45 ++++++++++++++++++++++++++++++++++++++ newsfragments/2454.removal | 2 -- newsfragments/2662.feature | 1 - newsfragments/2675.misc | 1 - newsfragments/2680.bugfix | 1 - newsfragments/2681.bugfix | 1 - newsfragments/2683.bugfix | 3 --- newsfragments/2687.bugfix | 2 -- newsfragments/2688.bugfix | 1 - newsfragments/2689.bugfix | 1 - newsfragments/2692.bugfix | 1 - newsfragments/2696.feature | 1 - newsfragments/2697.bugfix | 1 - newsfragments/2699.bugfix | 1 - newsfragments/2700.bugfix | 2 -- newsfragments/2702.misc | 1 - newsfragments/2707.bugfix | 1 - newsfragments/2709.feature | 1 - newsfragments/2714.misc | 2 -- newsfragments/2715.bugfix | 1 - newsfragments/2718.bugfix | 3 --- newsfragments/2723.misc | 1 - 22 files changed, 45 insertions(+), 29 deletions(-) delete mode 100644 newsfragments/2454.removal delete mode 100644 newsfragments/2662.feature delete mode 100644 newsfragments/2675.misc delete mode 100644 newsfragments/2680.bugfix delete mode 100644 newsfragments/2681.bugfix delete mode 100644 newsfragments/2683.bugfix delete mode 100644 newsfragments/2687.bugfix delete mode 100644 newsfragments/2688.bugfix delete mode 100644 newsfragments/2689.bugfix delete mode 100644 newsfragments/2692.bugfix delete mode 100644 newsfragments/2696.feature delete mode 100644 newsfragments/2697.bugfix delete mode 100644 newsfragments/2699.bugfix delete mode 100644 newsfragments/2700.bugfix delete mode 100644 newsfragments/2702.misc delete mode 100644 newsfragments/2707.bugfix delete mode 100644 newsfragments/2709.feature delete mode 100644 newsfragments/2714.misc delete mode 100644 newsfragments/2715.bugfix delete mode 100644 newsfragments/2718.bugfix delete mode 100644 newsfragments/2723.misc diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 24d6876586..a5ec1d6bff 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,48 @@ +DIALS 3.21.0 (2024-08-20) +========================= + +Features +-------- + +- Add classes to support time-of-flight and Laue indexing and refinement. (`#2662 `_) +- ``dials.symmetry``: Allow free selection of ``significance_level`` in the range [0,1]. (`#2696 `_) +- ``dials.export``: Add support for exporting still data in mmcif format that GEMMI can read. (`#2709 `_) + + +Bugfixes +-------- + +- ``dials.index``: Fix a ``pink_indexer`` error that caused failures for images with electron diffraction geometry. (`#2680 `_) +- ``dials.correlation_matrix``: Correctly select datasets for output json after filtering, when used by multiplex. (`#2681 `_) +- ``dials.index``: Avoid mm to px conversion when the ``refinement_protocol`` is set to do no refinement, as the required data are not available. (`#2687 `_) +- Avoid deprecated ``matplotlib.cm.get_cmap`` calls (`#2688 `_) +- Change Docker base image to rockylinux:8 as centos:7 is EOL (`#2689 `_) +- ``dials.image_viewer``: Increase the maximum resolution for the ring tool radius. This was too small for new detectors. (`#2697 `_) +- ``dials.index``: Fix potential crash in max_cell estimation when all spots are at the resolution of ice rings. (`#2699 `_) +- ``dials.merge``: Use GEMMI to output merged MTZs, for consistency with ``dials.export`` (`#2700 `_) +- ``dials.reciprocal_lattice_viewer``: Fix middle mouse drag to translate function. (`#2707 `_) +- Performance improvement for selections from large reflection tables. For a table containing 165k experiment identifiers the speedup is 1000x (12 minutes per call). (`#2718 `_) + + +Improved Documentation +---------------------- + +- Docstrings and type hints are added to the ``reindex_experiments`` and ``reindex_reflections`` functions to make it easier to use these outside the ``dials.reindex`` program. (`#2683 `_) +- Improvements to the small molecule tutorial. (`#2692 `_) + + +Deprecations and Removals +------------------------- + +- `dials.import`: remove useless as_grid_scan option as misleading. To import as stills set scan.oscillation=0,0 (`#2454 `_) + + +Misc +---- + +- `#2675 `_, `#2702 `_, `#2714 `_, `#2715 `_, `#2723 `_ + + DIALS 3.20.0 (2024-06-19) ========================= diff --git a/newsfragments/2454.removal b/newsfragments/2454.removal deleted file mode 100644 index 11f42f4b09..0000000000 --- a/newsfragments/2454.removal +++ /dev/null @@ -1,2 +0,0 @@ -`dials.import`: remove useless as_grid_scan option as misleading. To import as stills set scan.oscillation=0,0 - diff --git a/newsfragments/2662.feature b/newsfragments/2662.feature deleted file mode 100644 index 5ae13cb41f..0000000000 --- a/newsfragments/2662.feature +++ /dev/null @@ -1 +0,0 @@ -Add classes to support time-of-flight and Laue indexing and refinement. diff --git a/newsfragments/2675.misc b/newsfragments/2675.misc deleted file mode 100644 index ec988a5462..0000000000 --- a/newsfragments/2675.misc +++ /dev/null @@ -1 +0,0 @@ -Migrate pre-commit tooling to ruff. diff --git a/newsfragments/2680.bugfix b/newsfragments/2680.bugfix deleted file mode 100644 index 2d97359538..0000000000 --- a/newsfragments/2680.bugfix +++ /dev/null @@ -1 +0,0 @@ -``dials.index``: Fix a bug in the ``pink_indexer`` method that caused failures for images with electron diffraction geometry. diff --git a/newsfragments/2681.bugfix b/newsfragments/2681.bugfix deleted file mode 100644 index 4b113a304a..0000000000 --- a/newsfragments/2681.bugfix +++ /dev/null @@ -1 +0,0 @@ -``dials.correlation_matrix``: Correctly select datasets for output json after filtering when used by multiplex. diff --git a/newsfragments/2683.bugfix b/newsfragments/2683.bugfix deleted file mode 100644 index c9ffb58523..0000000000 --- a/newsfragments/2683.bugfix +++ /dev/null @@ -1,3 +0,0 @@ -Docstrings and type hints are added to the ``reindex_experiments`` and -``reindex_reflections`` functions to make it easier to use these -outside the ``dials.reindex`` program. diff --git a/newsfragments/2687.bugfix b/newsfragments/2687.bugfix deleted file mode 100644 index bdd4866ab6..0000000000 --- a/newsfragments/2687.bugfix +++ /dev/null @@ -1,2 +0,0 @@ -``dials.index``: Avoid mm to px conversion when the ``refinement_protocol`` -is set to do no refinement, as the required data are not available. diff --git a/newsfragments/2688.bugfix b/newsfragments/2688.bugfix deleted file mode 100644 index d8eaa47e9b..0000000000 --- a/newsfragments/2688.bugfix +++ /dev/null @@ -1 +0,0 @@ -Avoid deprecated ``matplotlib.cm.get_cmap`` calls diff --git a/newsfragments/2689.bugfix b/newsfragments/2689.bugfix deleted file mode 100644 index dc0f17fd05..0000000000 --- a/newsfragments/2689.bugfix +++ /dev/null @@ -1 +0,0 @@ -Change Docker base image to rockylinux:8 as centos:7 is EOL \ No newline at end of file diff --git a/newsfragments/2692.bugfix b/newsfragments/2692.bugfix deleted file mode 100644 index d392a22846..0000000000 --- a/newsfragments/2692.bugfix +++ /dev/null @@ -1 +0,0 @@ -Improvements to the small molecule tutorial. diff --git a/newsfragments/2696.feature b/newsfragments/2696.feature deleted file mode 100644 index 7d85003dd8..0000000000 --- a/newsfragments/2696.feature +++ /dev/null @@ -1 +0,0 @@ -``dials.symmetry``: allow free selection of ``significance_level`` in the range [0,1] diff --git a/newsfragments/2697.bugfix b/newsfragments/2697.bugfix deleted file mode 100644 index 878a91204f..0000000000 --- a/newsfragments/2697.bugfix +++ /dev/null @@ -1 +0,0 @@ -`dials.image_viewer`: Increase the maximum resolution for the ring tool radius. This was too small for new detectors. diff --git a/newsfragments/2699.bugfix b/newsfragments/2699.bugfix deleted file mode 100644 index 0bb10f4497..0000000000 --- a/newsfragments/2699.bugfix +++ /dev/null @@ -1 +0,0 @@ -``dials.index``: Fix potential crash in max_cell estimation when all ice ring spots diff --git a/newsfragments/2700.bugfix b/newsfragments/2700.bugfix deleted file mode 100644 index ca2ba74688..0000000000 --- a/newsfragments/2700.bugfix +++ /dev/null @@ -1,2 +0,0 @@ -``dials.merge``: Use gemmi to output merged MTZs for consistency with ``dials.export`` - diff --git a/newsfragments/2702.misc b/newsfragments/2702.misc deleted file mode 100644 index a415426835..0000000000 --- a/newsfragments/2702.misc +++ /dev/null @@ -1 +0,0 @@ -Add additional reflection properties to save after indexing. diff --git a/newsfragments/2707.bugfix b/newsfragments/2707.bugfix deleted file mode 100644 index 03028e0ab6..0000000000 --- a/newsfragments/2707.bugfix +++ /dev/null @@ -1 +0,0 @@ -``dials.reciprocal_lattice_viewer``: fix middle mouse drag to translate function. diff --git a/newsfragments/2709.feature b/newsfragments/2709.feature deleted file mode 100644 index 6af2c983c7..0000000000 --- a/newsfragments/2709.feature +++ /dev/null @@ -1 +0,0 @@ -``dials.export``: Add support for exporting still data in mmcif format, that can be understood with gemmi diff --git a/newsfragments/2714.misc b/newsfragments/2714.misc deleted file mode 100644 index eb586aaa39..0000000000 --- a/newsfragments/2714.misc +++ /dev/null @@ -1,2 +0,0 @@ -Tidy output of ``dials.index`` around the defaults for ``joint_index`` - diff --git a/newsfragments/2715.bugfix b/newsfragments/2715.bugfix deleted file mode 100644 index deda002998..0000000000 --- a/newsfragments/2715.bugfix +++ /dev/null @@ -1 +0,0 @@ -Modified branching to Laue refinement methods to check for ExperimentType first. diff --git a/newsfragments/2718.bugfix b/newsfragments/2718.bugfix deleted file mode 100644 index 06ae9674d6..0000000000 --- a/newsfragments/2718.bugfix +++ /dev/null @@ -1,3 +0,0 @@ -Performance improvement for selections from large reflection tables. For a -table containing 165k experiment identifiers the speedup is 1000x (12 minutes -per call). diff --git a/newsfragments/2723.misc b/newsfragments/2723.misc deleted file mode 100644 index 60775d7dbc..0000000000 --- a/newsfragments/2723.misc +++ /dev/null @@ -1 +0,0 @@ -Add explicit tests that executable ``gemmi`` is present, before using it. From cfa71c3b3bfc2d54816d5f553844df775e9bdfff Mon Sep 17 00:00:00 2001 From: Nicholas Devenish Date: Tue, 20 Aug 2024 13:57:02 +0100 Subject: [PATCH 32/40] Bump development version --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index db32d461de..02d2558481 100644 --- a/setup.py +++ b/setup.py @@ -6,7 +6,7 @@ from build import build -__version_tag__ = "3.20.dev" +__version_tag__ = "3.22.dev" setup_kwargs = { "name": "dials", From c09969af0297b620eb412014bc77c4de8d8bd67d Mon Sep 17 00:00:00 2001 From: "Aaron S. Brewster" Date: Thu, 22 Aug 2024 08:21:18 -0700 Subject: [PATCH 33/40] Allow combining masks using dials.generate_mask (#2711) eg dials.generate_mask imported.expt backstop.mask border=1 or dials.generate_mask backstop.mask shadow.mask --- newsfragments/2711.feature | 1 + src/dials/command_line/generate_mask.py | 76 ++++++++++++++++++++++-- tests/command_line/test_generate_mask.py | 20 ++++++- 3 files changed, 91 insertions(+), 6 deletions(-) create mode 100644 newsfragments/2711.feature diff --git a/newsfragments/2711.feature b/newsfragments/2711.feature new file mode 100644 index 0000000000..9338821e8e --- /dev/null +++ b/newsfragments/2711.feature @@ -0,0 +1 @@ +Allow combining masks using dials.generate_mask diff --git a/src/dials/command_line/generate_mask.py b/src/dials/command_line/generate_mask.py index c90353b350..271bc1b19d 100644 --- a/src/dials/command_line/generate_mask.py +++ b/src/dials/command_line/generate_mask.py @@ -6,6 +6,8 @@ options to create simple masks using the detector trusted range, or from simple shapes or by setting different resolution ranges. +Masks can also be combined by including them as arguments. + Examples:: dials.generate_mask models.expt border=5 @@ -15,6 +17,10 @@ untrusted.circle=200,200,100 dials.generate_mask models.expt d_max=2.00 + + dials.generate_mask models.expt d_max=2.00 existing.mask + + dials.generate_mask backstop.mask shadow.mask """ from __future__ import annotations @@ -22,7 +28,7 @@ import logging import os.path import pickle -from typing import List, Optional, Tuple +from typing import List, Optional, Tuple, Union import libtbx.phil as phil from dxtbx.format.image import ImageBool @@ -63,6 +69,7 @@ def generate_mask( experiments: ExperimentList, params: phil.scope_extract, + existing_masks: Union[None, Masks] = None, ) -> Tuple[Masks, Optional[ExperimentList]]: """ Generate a pixel mask for each imageset in an experiment list. @@ -82,6 +89,8 @@ def generate_mask( experiments: An experiment list containing only one imageset. params: Masking parameters, having the structure defined in :data:`phil_scope`. + existing_masks: list of masks to combine with the mask being + generated Returns: A list of masks, one for each imageset. @@ -89,6 +98,22 @@ def generate_mask( A copy of :param:`experiments` with the masks applied (optional, only returned if :attr:`params.output.experiments` is set). """ + + if existing_masks: + existing_mask = list(existing_masks[0]) + for mask in existing_masks[1:]: + for panel_idx in range(len(existing_mask)): + existing_mask[panel_idx] &= mask[panel_idx] + existing_mask = tuple(existing_mask) + + # Check if only combining masks + if not experiments and existing_masks: + # Save the mask to file + log.info("Writing mask to %s", params.output.mask) + with open(params.output.mask, "wb") as fh: + pickle.dump(existing_mask, fh) + return + imagesets = experiments.imagesets() masks = [] @@ -107,6 +132,11 @@ def generate_mask( for imageset, filename in zip(imagesets, filenames): mask = dials.util.masking.generate_mask(imageset, params) + if existing_masks: + mask = list(mask) + for panel_idx in range(len(existing_mask)): + mask[panel_idx] &= existing_mask[panel_idx] + mask = tuple(mask) masks.append(mask) # Save the mask to file @@ -142,7 +172,7 @@ def run(args: List[str] = None, phil: phil.scope = phil_scope) -> None: args: Arguments to parse. If None, :data:`sys.argv[1:]` will be used. """ # Create the parser - usage = "usage: dials.generate_mask [options] models.expt" + usage = "usage: dials.generate_mask [options] [models.expt] [mask, mask, ...]" parser = ArgumentParser( usage=usage, phil=phil, @@ -152,19 +182,55 @@ def run(args: List[str] = None, phil: phil.scope = phil_scope) -> None: ) # Parse the command line arguments - params, options = parser.parse_args(args=args, show_diff_phil=True) + params, options, unhandled = parser.parse_args( + args=args, show_diff_phil=True, return_unhandled=True + ) experiments = flatten_experiments(params.input.experiments) # Configure logging dials.util.log.config(verbosity=options.verbose, logfile=params.output.log) + # Read in any starting masks + remains_unhandled = [] + existing_masks = [] + for arg in unhandled: + if os.path.isfile(arg): + try: + with open(arg, "rb") as fh: + mask = pickle.load(fh, encoding="bytes") + except Exception: + remains_unhandled.append(arg) + else: + if ( + isinstance(mask, tuple) + and mask + and all(type(m) is flex.bool for m in mask) + ): + existing_masks.append(mask) + else: + print("Invalid mask file:", arg) + remains_unhandled.append(arg) + if remains_unhandled: + print( + "Couldn't recognize the following arguments:", ", ".join(remains_unhandled) + ) + parser.print_help() + return + # Check number of args - if len(experiments) == 0: + if len(experiments) == 0 and len(existing_masks) == 0: + parser.print_help() + return + + if not all( + m[0].focus() == p.focus() for m in zip(*existing_masks) for p in list(m) + ): + print("Not all input masks are of the same shape") parser.print_help() return # Run the script - generate_mask(experiments, params) + generate_mask(experiments, params, existing_masks=existing_masks) if __name__ == "__main__": diff --git a/tests/command_line/test_generate_mask.py b/tests/command_line/test_generate_mask.py index 57c3aa2eec..c536d70e84 100644 --- a/tests/command_line/test_generate_mask.py +++ b/tests/command_line/test_generate_mask.py @@ -9,7 +9,7 @@ from dials.command_line.dials_import import do_import from dials.command_line.dials_import import phil_scope as import_phil_scope -from dials.command_line.generate_mask import generate_mask, phil_scope +from dials.command_line.generate_mask import generate_mask, phil_scope, run @pytest.fixture( @@ -252,3 +252,21 @@ def test_generate_whole_panel_mask(experiments_masks, tmp_path, monkeypatch): with (tmp_path / masks[0]).open("rb") as fh: mask = pickle.load(fh) assert mask[0].count(False) == len(mask[0]) + + +def test_combine_masks(dials_data, run_in_tmp_path): + path = dials_data("centroid_test_data", pathlib=True) + experiments_path = path / "imported_experiments.json" + mask_path = path / "mask.pickle" + experiments = ExperimentList.from_file(experiments_path) + with (mask_path).open("rb") as fh: + masks = [pickle.loads(fh.read(), encoding="bytes")] + params = phil_scope.fetch().extract() + + # Combine with existing mask + generate_mask(experiments, params, existing_masks=masks) + assert run_in_tmp_path.joinpath("pixels.mask").is_file() + + # Combine only existing masks + run(args=[str(mask_path), str(mask_path), "output.mask=pixels2.mask"]) + assert run_in_tmp_path.joinpath("pixels2.mask").is_file() From 6840bf1df37dafae27478a3e195da8b7ca178db0 Mon Sep 17 00:00:00 2001 From: DiamondLightSource-build-server Date: Fri, 23 Aug 2024 07:50:44 +0100 Subject: [PATCH 34/40] DIALS 3.21.1 Changelog --- CHANGELOG.rst | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index a5ec1d6bff..7f1efc2e0a 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,12 @@ +DIALS 3.21.1 (2024-08-23) +========================= + +Bugfixes +-------- + +- CMake build can now be used for release branches. (`#2727 `_) + + DIALS 3.21.0 (2024-08-20) ========================= From 6dc2173094262add4fd02eaf1da35fbdf61b5bbd Mon Sep 17 00:00:00 2001 From: Nicholas Devenish Date: Fri, 23 Aug 2024 13:49:05 +0100 Subject: [PATCH 35/40] Allow bootstrap of release branches with CMake (#2727) Run micromamba twice, if required. In cases where the requirements file is explicit, AND we want to add extra dependencies, we need to do a second pass, because conda will silently ignore the extra packages. We want to add packages if we are running with prebuilt cctbx, and we have explicit requirements on release branches. Also, remove option for non-cmake prebuilt CCTBX. This was only ever supported in CI, and we now use the CMake build for that purpose. To do this, we have to allow release dependencies to be updated upon install, for cmake. At the moment this isn't a directly supported release combination, but we want to allow it for image building, so let's accept the changes to that we are compatible with prebuilt CCTBX, and take this into account next time. --- .azure-pipelines/unix-build-cmake.yml | 2 +- .azure-pipelines/windows-build.yml | 2 +- installer/bootstrap.py | 388 ++++++-------------------- newsfragments/2727.misc | 1 + newsfragments/2727.removal | 1 + 5 files changed, 94 insertions(+), 300 deletions(-) create mode 100644 newsfragments/2727.misc create mode 100644 newsfragments/2727.removal diff --git a/.azure-pipelines/unix-build-cmake.yml b/.azure-pipelines/unix-build-cmake.yml index 881828987b..c555580b62 100644 --- a/.azure-pipelines/unix-build-cmake.yml +++ b/.azure-pipelines/unix-build-cmake.yml @@ -28,7 +28,7 @@ steps: dials-data pytest-cov pytest-timeout" >> modules/dials/${{ parameters.conda_environment }} - python modules/dials/installer/bootstrap.py base --clean --python $(PYTHON_VERSION) --prebuilt-cctbx + python modules/dials/installer/bootstrap.py base --clean --python $(PYTHON_VERSION) --cmake displayName: Create python $(PYTHON_VERSION) environment workingDirectory: $(Pipeline.Workspace) diff --git a/.azure-pipelines/windows-build.yml b/.azure-pipelines/windows-build.yml index 4c2c752ada..1d5cae2278 100644 --- a/.azure-pipelines/windows-build.yml +++ b/.azure-pipelines/windows-build.yml @@ -29,7 +29,7 @@ steps: mv ci-conda-env.txt modules/dials/.conda-envs/windows.txt - python3 modules/dials/installer/bootstrap.py base --clean --python $(PYTHON_VERSION) --prebuilt-cctbx + python3 modules/dials/installer/bootstrap.py base --clean --python $(PYTHON_VERSION) --cmake displayName: Create python $(PYTHON_VERSION) environment workingDirectory: $(Pipeline.Workspace) diff --git a/installer/bootstrap.py b/installer/bootstrap.py index 3b30935dda..fe9c77690b 100755 --- a/installer/bootstrap.py +++ b/installer/bootstrap.py @@ -11,7 +11,6 @@ from __future__ import absolute_import, division, print_function import argparse -import json import multiprocessing.pool import os import platform @@ -24,7 +23,6 @@ import tarfile import threading import time -import warnings import zipfile try: # Python 3 @@ -61,7 +59,36 @@ def make_executable(filepath): os.chmod(filepath, mode) -def install_micromamba(python, include_cctbx, cmake): +def _run_conda_retry(command_list): + for retry in range(5): + retry += 1 + try: + run_command( + command=command_list, + workdir=".", + ) + except Exception: + print( + """ +******************************************************************************* +There was a failure in constructing the conda environment. +Attempt {retry} of 5 will start {retry} minute(s) from {t}. +******************************************************************************* +""".format(retry=retry, t=time.asctime()) + ) + time.sleep(retry * 60) + else: + break + else: + sys.exit( + """ +The conda environment could not be constructed. Please check that there is a +working network connection for downloading conda packages. +""" + ) + + +def install_micromamba(python, cmake): """Download and install Micromamba""" if sys.platform.startswith("linux"): conda_platform = "linux" @@ -145,248 +172,52 @@ def install_micromamba(python, include_cctbx, cmake): "mamba", python_requirement, ] - if include_cctbx or cmake: - command_list.append("cctbx-base=" + _prebuilt_cctbx_base) + extra_deps = [] if cmake: - command_list.extend(["pycbf", "cmake"]) - if os.name == "nt": - # Installing pre-commit via precommittbx does not work on windows - command_list.append("pre-commit") + extra_deps = [ + "cctbx-base=" + _prebuilt_cctbx_base, + "pycbf", + "cmake", + "pre-commit", + ] + if _prebuilt_cctbx_base == "2024.7": + # Known minor incompatibility causes this to otherwise be removed + extra_deps.append("libboost-python-devel") + # If we're running from an explicit requirements file, then we + # need to install extra dependencies in a separate stage + with open(filename) as dep_file: + is_explicit_reqs = "@EXPLICIT" in dep_file.read() + + if not is_explicit_reqs: + command_list.extend(extra_deps) + extra_deps = [] print( "{text} dials environment from {filename} with Python {python}".format( text=text_messages[0], filename=filename, python=python ) ) - for retry in range(5): - retry += 1 - try: - run_command( - command=command_list, - workdir=".", - ) - except Exception: - print( - """ -******************************************************************************* -There was a failure in constructing the conda environment. -Attempt {retry} of 5 will start {retry} minute(s) from {t}. -******************************************************************************* -""".format(retry=retry, t=time.asctime()) - ) - time.sleep(retry * 60) - else: - break - else: - sys.exit( - """ -The conda environment could not be constructed. Please check that there is a -working network connection for downloading conda packages. -""" - ) - print("Completed {text}:\n {prefix}".format(text=text_messages[1], prefix=prefix)) - with open(os.path.join(prefix, ".condarc"), "w") as fh: - fh.write( - """ -changeps1: False -channels: - - conda-forge -""".lstrip() - ) - - -def install_miniconda(location): - """Download and install Miniconda3""" - if sys.platform.startswith("linux"): - filename = "Miniconda3-latest-Linux-x86_64.sh" - elif sys.platform == "darwin": - filename = "Miniconda3-latest-MacOSX-x86_64.sh" - elif os.name == "nt": - filename = "Miniconda3-latest-Windows-x86_64.exe" - else: - raise NotImplementedError( - "Unsupported platform %s / %s" % (os.name, sys.platform) - ) - url = "https://repo.anaconda.com/miniconda/" + filename - filename = os.path.join(location, filename) - - print("Downloading {url}:".format(url=url), end=" ") - result = download_to_file(url, filename) - if result in (0, -1): - sys.exit("Miniconda download failed") - - # run the installer - if os.name == "nt": - command = [ - filename, - "/InstallationType=JustMe", - "/RegisterPython=0", - "/AddToPath=0", - "/S", - "/D=" + location, - ] - else: - command = ["/bin/sh", filename, "-b", "-u", "-p", location] - - print("Installing Miniconda") - run_command(command=command, workdir=".") - - -def install_conda(python, include_cctbx, cmake): - # Find relevant conda base installation - conda_base = os.path.realpath("miniconda") - if os.name == "nt": - conda_exe = os.path.join(conda_base, "Scripts", "conda.exe") - else: - conda_exe = os.path.join(conda_base, "bin", "conda") - - # default environment file for users - environment_file = os.path.join( - os.path.expanduser("~"), ".conda", "environments.txt" - ) - - def get_environments(): - """Return a set of existing conda environment paths""" - try: - with open(environment_file) as f: - paths = f.readlines() - except IOError: - paths = [] - environments = set( # noqa: C401 # Python 2.7 compatibility - os.path.normpath(env.strip()) for env in paths if os.path.isdir(env.strip()) - ) - env_dirs = ( - os.path.join(conda_base, "envs"), - os.path.join(os.path.expanduser("~"), ".conda", "envs"), - ) - for env_dir in env_dirs: - if os.path.isdir(env_dir): - for d in os.listdir(env_dir): - d = os.path.join(env_dir, d) - if os.path.isdir(d): - environments.add(d) - - return environments - - if os.path.isdir(conda_base) and os.path.isfile(conda_exe): - print("Using miniconda installation from", conda_base) - else: - print("Installing miniconda into", conda_base) - install_miniconda(conda_base) - - # verify consistency and check conda version - if not os.path.isfile(conda_exe): - sys.exit("Conda executable not found at " + conda_exe) - environments = get_environments() - - conda_info = subprocess.check_output([conda_exe, "info", "--json"], env=clean_env) - if sys.version_info.major > 2: - conda_info = conda_info.decode("latin-1") - conda_info = json.loads(conda_info) - for env in environments: - if env not in conda_info["envs"]: - print("Consistency check:", env, "not in environments:") - print(conda_info["envs"]) - warnings.warn( - """ -There is a mismatch between the conda settings in your home directory -and what "conda info" is reporting. This is not a fatal error, but if -an error is encountered, please check that your conda installation and -environments exist and are working. -""", - RuntimeWarning, - ) - - # identify packages required for environment - if os.name == "nt": - conda_platform = "windows" - elif sys.platform == "darwin": - conda_platform = "macos" - else: - conda_platform = "linux" - filename = os.path.join( - "modules", - "dials", - ".conda-envs", - "{platform}.txt".format(platform=conda_platform), - ) - if not os.path.isfile(filename): - raise RuntimeError( - "The file {filename} is not available".format(filename=filename) - ) - - python_requirement = "conda-forge::python=%s.*" % python - - # make a new environment directory - prefix = os.path.realpath("conda_base") - - # install a new environment or update and existing one - if prefix in environments: - command = "install" - text_messages = ["Updating", "update of"] - else: - command = "create" - text_messages = ["Installing", "installation into"] - command_list = [ - conda_exe, - command, - "--prefix", - prefix, - "--file", - filename, - "--yes", - "--channel", - "conda-forge", - "--override-channels", - python_requirement, - ] - if include_cctbx or cmake: - command_list.append("cctbx-nightly::cctbx-base=" + _prebuilt_cctbx_base) - if cmake: - command_list.extend(["pycbf", "cmake", "pre-commit"]) - if os.name == "nt": + _run_conda_retry(command_list) + # If we wanted extra dependencies, and couldn't install them directly, run again + if extra_deps: command_list = [ - "cmd.exe", - "/C", - " ".join( - [os.path.join(conda_base, "Scripts", "activate"), "base", "&&"] - + command_list - ) - .replace("<", "^<") - .replace(">", "^>"), - ] - print( - "{text} dials environment from {filename} with Python {python}".format( - text=text_messages[0], filename=filename, python=python - ) - ) - for retry in range(5): - retry += 1 - try: - run_command( - command=command_list, - workdir=".", - ) - except Exception: - print( - """ -******************************************************************************* -There was a failure in constructing the conda environment. -Attempt {retry} of 5 will start {retry} minute(s) from {t}. -******************************************************************************* -""".format(retry=retry, t=time.asctime()) - ) - time.sleep(retry * 60) - else: - break - else: - sys.exit( - """ -The conda environment could not be constructed. Please check that there is a -working network connection for downloading conda packages. -""" - ) + mamba, + "--no-env", + "--no-rc", + "--prefix", + prefix, + "--root-prefix", + mamba_prefix, + "install", + "--yes", + "--channel", + "conda-forge", + "--override-channels", + "mamba", + ] + extra_deps + _run_conda_retry(command_list) + print("Completed {text}:\n {prefix}".format(text=text_messages[1], prefix=prefix)) with open(os.path.join(prefix, ".condarc"), "w") as fh: fh.write( @@ -1021,17 +852,12 @@ def update_sources(options): ("xia2/xia2", "main"), ) } - if options.prebuilt_cctbx: - repositories["cctbx_project"]["branch-local"] = ( - "releases/" + _prebuilt_cctbx_base - ) - else: - repositories["cctbx_project"] = { - "base-repository": "cctbx/cctbx_project", - "effective-repository": "dials/cctbx", - "branch-remote": "master", - "branch-local": "stable", - } + repositories["cctbx_project"] = { + "base-repository": "cctbx/cctbx_project", + "effective-repository": "dials/cctbx", + "branch-remote": "master", + "branch-local": "stable", + } else: # Only what we need for CMake repositories = { @@ -1131,10 +957,10 @@ def run_tests(): ) -def refresh_build(prebuilt_cctbx): +def refresh_build(): print("Running libtbx.refresh") run_indirect_command( - "libtbx.refresh" if prebuilt_cctbx else os.path.join("bin", "libtbx.refresh"), + os.path.join("bin", "libtbx.refresh"), args=[], ) @@ -1280,7 +1106,7 @@ def configure_build_cmake(): ) -def configure_build(config_flags, prebuilt_cctbx): +def configure_build(config_flags): conda_python = _get_base_python() # write a new-style environment setup script @@ -1298,12 +1124,6 @@ def configure_build(config_flags, prebuilt_cctbx): config_flags.append("--cxxstd=c++14") print("Setting up build directory") - if prebuilt_cctbx: - run_indirect_command( - command="libtbx.configure", - args=["cbflib", "dials", "dxtbx", "prime", "xia2"], - ) - return configcmd = [ os.path.join("..", "modules", "cctbx_project", "libtbx", "configure.py"), @@ -1331,20 +1151,16 @@ def configure_build(config_flags, prebuilt_cctbx): ) -def make_build(prebuilt_cctbx): +def make_build(): try: nproc = len(os.sched_getaffinity(0)) except AttributeError: nproc = multiprocessing.cpu_count() - if prebuilt_cctbx: - command = "libtbx.scons" - else: - command = os.path.join("bin", "libtbx.scons") + command = os.path.join("bin", "libtbx.scons") run_indirect_command(command, args=["-j", str(nproc)]) - if not prebuilt_cctbx: - # run build again to make sure everything is built - run_indirect_command(command, args=["-j", str(nproc)]) + # run build again to make sure everything is built + run_indirect_command(command, args=["-j", str(nproc)]) def make_build_cmake(): @@ -1440,29 +1256,15 @@ def run(): "Specify as repository@branch, eg. 'dials@dials-next'" ), ) - parser.add_argument( - "--conda", - help="Use miniconda instead of micromamba for the base installation step", - default=False, - action="store_true", - ) parser.add_argument( "--clean", help="Remove temporary conda environments and package caches after installation", default=False, action="store_true", ) - parser.add_argument( - # Use the conda-forge cctbx package instead of compiling cctbx from scratch - # This is not currently supported outside of CI builds - "--prebuilt-cctbx", - help=argparse.SUPPRESS, - default=False, - action="store_true", - ) parser.add_argument( "--cmake", - help="Use the CMake build system. Implies --prebuilt-cctbx.", + help="Use the CMake build system. Implies use of a prebuilt cctbx.", action="store_true", ) @@ -1476,22 +1278,12 @@ def run(): # Build base packages if "base" in options.actions: - if options.conda: - install_conda( - options.python, - include_cctbx=options.prebuilt_cctbx, - cmake=options.cmake, - ) - if options.clean: - shutil.rmtree(os.path.realpath("miniconda")) - else: - install_micromamba( - options.python, - include_cctbx=options.prebuilt_cctbx, - cmake=options.cmake, - ) - if options.clean: - shutil.rmtree(os.path.realpath("micromamba")) + install_micromamba( + options.python, + cmake=options.cmake, + ) + if options.clean: + shutil.rmtree(os.path.realpath("micromamba")) # Configure, make, get revision numbers if "build" in options.actions: @@ -1500,9 +1292,9 @@ def run(): configure_build_cmake() make_build_cmake() else: - configure_build(options.config_flags, prebuilt_cctbx=options.prebuilt_cctbx) - make_build(prebuilt_cctbx=options.prebuilt_cctbx) - refresh_build(prebuilt_cctbx=options.prebuilt_cctbx) + configure_build(options.config_flags) + make_build() + refresh_build() install_precommit(options.cmake) # Tests, tests diff --git a/newsfragments/2727.misc b/newsfragments/2727.misc new file mode 100644 index 0000000000..649aab571b --- /dev/null +++ b/newsfragments/2727.misc @@ -0,0 +1 @@ +Release builds can now be built with CMake. diff --git a/newsfragments/2727.removal b/newsfragments/2727.removal new file mode 100644 index 0000000000..674e72a544 --- /dev/null +++ b/newsfragments/2727.removal @@ -0,0 +1 @@ +``bootstrap.py`` no longer accepts ``--prebuilt-cctbx``. This was only ever supported in CI. If you want to build a distribution with prebuilt cctbx, please use the ``--cmake`` flag and build instead. From 0fecd20dae982d8f290320a080a9b19c2b0287e1 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 27 Aug 2024 09:48:56 +0100 Subject: [PATCH 36/40] [pre-commit.ci] pre-commit autoupdate (#2728) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.6.1 → v0.6.2](https://github.com/astral-sh/ruff-pre-commit/compare/v0.6.1...v0.6.2) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d2a0cd2e46..6f8a29bd9b 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -17,7 +17,7 @@ repos: name: "Don't commit to 'main' directly" - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.6.1 + rev: v0.6.2 hooks: - id: ruff args: [--fix, --exit-non-zero-on-fix, --show-fixes] From ee1b758dc044e74a57a980d43fe83d6ef93fcd61 Mon Sep 17 00:00:00 2001 From: David McDonagh <60879630+toastisme@users.noreply.github.com> Date: Tue, 27 Aug 2024 12:57:58 +0100 Subject: [PATCH 37/40] Add scaling corrections for time-of-flight data (#2704) * Added time-of-flight Lorentz correction. Added time-of-flight spherical absorption correction. Added time-of-flight normalisation w.r.t incident and empty runs. --- CMakeLists.txt | 1 + newsfragments/2704.feature | 1 + src/dials/algorithms/scaling/CMakeLists.txt | 8 +- src/dials/algorithms/scaling/SConscript | 6 + .../scaling/tof/boost_python/tof_scaling.cc | 48 ++ .../algorithms/scaling/tof/tof_scaling.h | 763 ++++++++++++++++++ tests/algorithms/scaling/test_tof_scaling.py | 166 ++++ 7 files changed, 992 insertions(+), 1 deletion(-) create mode 100644 newsfragments/2704.feature create mode 100644 src/dials/algorithms/scaling/tof/boost_python/tof_scaling.cc create mode 100644 src/dials/algorithms/scaling/tof/tof_scaling.h create mode 100644 tests/algorithms/scaling/test_tof_scaling.py diff --git a/CMakeLists.txt b/CMakeLists.txt index 4915c29314..20f5386d5e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -91,6 +91,7 @@ install( dials_pychef_ext dials_refinement_helpers_ext dials_scaling_ext + dials_tof_scaling_ext dials_util_ext dials_util_streambuf_test_ext dials_viewer_ext diff --git a/newsfragments/2704.feature b/newsfragments/2704.feature new file mode 100644 index 0000000000..b522583803 --- /dev/null +++ b/newsfragments/2704.feature @@ -0,0 +1 @@ +Added time-of-flight Lorentz, spherical absorption, and normalisation w.r.t empty and incident run scaling corrections. diff --git a/src/dials/algorithms/scaling/CMakeLists.txt b/src/dials/algorithms/scaling/CMakeLists.txt index 943c755f11..834a1c6aaa 100644 --- a/src/dials/algorithms/scaling/CMakeLists.txt +++ b/src/dials/algorithms/scaling/CMakeLists.txt @@ -4,4 +4,10 @@ Python_add_library( boost_python/scaling_helper.cc boost_python/scaling_ext.cc ) -target_link_libraries( dials_scaling_ext PUBLIC CCTBX::cctbx Boost::python ) \ No newline at end of file +Python_add_library( + dials_tof_scaling_ext + MODULE + tof/boost_python/tof_scaling.cc +) +target_link_libraries( dials_scaling_ext PUBLIC CCTBX::cctbx Boost::python ) +target_link_libraries( dials_tof_scaling_ext PUBLIC CCTBX::cctbx Boost::python ) diff --git a/src/dials/algorithms/scaling/SConscript b/src/dials/algorithms/scaling/SConscript index 4d82cfa71a..da2769b009 100644 --- a/src/dials/algorithms/scaling/SConscript +++ b/src/dials/algorithms/scaling/SConscript @@ -5,3 +5,9 @@ env.SharedLibrary( source=["boost_python/scaling_helper.cc", "boost_python/scaling_ext.cc"], LIBS=env["LIBS"], ) + +env.SharedLibrary( + target="#/lib/dials_tof_scaling_ext", + source=["tof/boost_python/tof_scaling.cc"], + LIBS=env["LIBS"], +) diff --git a/src/dials/algorithms/scaling/tof/boost_python/tof_scaling.cc b/src/dials/algorithms/scaling/tof/boost_python/tof_scaling.cc new file mode 100644 index 0000000000..c192ccbd91 --- /dev/null +++ b/src/dials/algorithms/scaling/tof/boost_python/tof_scaling.cc @@ -0,0 +1,48 @@ +#include +#include +#include + +namespace dials_scaling { namespace boost_python { + + using namespace boost::python; + BOOST_PYTHON_MODULE(dials_tof_scaling_ext) { + class_("TOFCorrectionsData", no_init) + .def(init()); + + void (*extract_shoeboxes1)(dials::af::reflection_table &, + dxtbx::model::Experiment &, + dxtbx::ImageSequence &, + bool) = &tof_extract_shoeboxes_to_reflection_table; + void (*extract_shoeboxes2)(dials::af::reflection_table &, + dxtbx::model::Experiment &, + dxtbx::ImageSequence &, + dxtbx::ImageSequence &, + dxtbx::ImageSequence &, + TOFCorrectionsData &, + bool) = &tof_extract_shoeboxes_to_reflection_table; + void (*extract_shoeboxes3)(dials::af::reflection_table &, + dxtbx::model::Experiment &, + dxtbx::ImageSequence &, + dxtbx::ImageSequence &, + dxtbx::ImageSequence &, + double, + double, + double, + bool) = &tof_extract_shoeboxes_to_reflection_table; + + def("tof_extract_shoeboxes_to_reflection_table", extract_shoeboxes1); + def("tof_extract_shoeboxes_to_reflection_table", extract_shoeboxes2); + def("tof_extract_shoeboxes_to_reflection_table", extract_shoeboxes3); + } + +}} // namespace dials_scaling::boost_python diff --git a/src/dials/algorithms/scaling/tof/tof_scaling.h b/src/dials/algorithms/scaling/tof/tof_scaling.h new file mode 100644 index 0000000000..4ad0399065 --- /dev/null +++ b/src/dials/algorithms/scaling/tof/tof_scaling.h @@ -0,0 +1,763 @@ + +#ifndef DIALS_ALGORITHMS_SCALING_TOF_SCALING_H +#define DIALS_ALGORITHMS_SCALING_TOF_SCALING_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace dials_scaling { + +using dials::algorithms::Shoebox; +using dials::algorithms::ShoeboxProcessor; +using dxtbx::ImageSequence; +using dxtbx::af::flex_table; +using dxtbx::model::Detector; +using dxtbx::model::Experiment; +using dxtbx::model::Goniometer; +using dxtbx::model::PolychromaticBeam; +using dxtbx::model::Scan; +using dxtbx::model::scan_property_types; +using scitbx::deg_as_rad; +using scitbx::mat3; +using scitbx::vec2; +using scitbx::vec3; +using scitbx::af::int6; +using scitbx::constants::m_n; +using scitbx::constants::pi; +using scitbx::constants::Planck; + +// Taken from +// https://github.com/mantidproject/mantid/blob/30e97650e69dec5f6edbc456aa81f2d8f1715fa3/Framework/Crystal/inc/MantidCrystal/AnvredCorrection.h +// Based on the method outlined in +// C.W Dwiggins Jnr, Rapid calculation of X-ray absorption correction factors for +// spheres to an accuracy of 0.05%, Acta Cryst. 1975, A31, 395-396 +// https://doi.org/10.1107/S0567739475000873 +const double pc[8][19] = {{-6.4910e-07, + -6.8938e-07, + -7.8149e-07, + 8.1682e-08, + 1.8008e-06, + 3.3916e-06, + 4.5095e-06, + 4.7970e-06, + 4.4934e-06, + 3.6700e-06, + 2.5881e-06, + 1.5007e-06, + 3.7669e-07, + -7.9487e-07, + -1.7935e-06, + -2.5563e-06, + -3.1113e-06, + -3.3993e-06, + -3.5091e-06}, + {1.0839e-05, + 1.1582e-05, + 1.1004e-05, + -2.2848e-05, + -8.1974e-05, + -1.3268e-04, + -1.6486e-04, + -1.6839e-04, + -1.5242e-04, + -1.1949e-04, + -7.8682e-05, + -3.7973e-05, + 2.9117e-06, + 4.4823e-05, + 8.0464e-05, + 1.0769e-04, + 1.2753e-04, + 1.3800e-04, + 1.4190e-04}, + {8.7140e-05, + 9.0870e-05, + 1.6706e-04, + 6.9008e-04, + 1.4781e-03, + 2.0818e-03, + 2.3973e-03, + 2.3209e-03, + 1.9935e-03, + 1.4508e-03, + 8.1903e-04, + 1.9608e-04, + -4.1128e-04, + -1.0205e-03, + -1.5374e-03, + -1.9329e-03, + -2.2212e-03, + -2.3760e-03, + -2.4324e-03}, + {-2.9549e-03, + -3.1360e-03, + -4.2431e-03, + -8.1103e-03, + -1.2989e-02, + -1.6012e-02, + -1.6815e-02, + -1.4962e-02, + -1.1563e-02, + -6.8581e-03, + -1.7302e-03, + 3.2400e-03, + 7.9409e-03, + 1.2528e-02, + 1.6414e-02, + 1.9394e-02, + 2.1568e-02, + 2.2758e-02, + 2.3182e-02}, + {1.7934e-02, + 1.9304e-02, + 2.4706e-02, + 3.6759e-02, + 4.8351e-02, + 5.1049e-02, + 4.5368e-02, + 3.0864e-02, + 1.2086e-02, + -1.0254e-02, + -3.2992e-02, + -5.4495e-02, + -7.4205e-02, + -9.2818e-02, + -1.0855e-01, + -1.2068e-01, + -1.2954e-01, + -1.3451e-01, + -1.3623e-01}, + {6.2799e-02, + 6.3892e-02, + 6.4943e-02, + 6.4881e-02, + 7.2169e-02, + 9.5669e-02, + 1.3082e-01, + 1.7694e-01, + 2.2559e-01, + 2.7655e-01, + 3.2483e-01, + 3.6888e-01, + 4.0783e-01, + 4.4330e-01, + 4.7317e-01, + 4.9631e-01, + 5.1334e-01, + 5.2318e-01, + 5.2651e-01}, + {-1.4949e+00, + -1.4952e+00, + -1.4925e+00, + -1.4889e+00, + -1.4867e+00, + -1.4897e+00, + -1.4948e+00, + -1.5025e+00, + -1.5084e+00, + -1.5142e+00, + -1.5176e+00, + -1.5191e+00, + -1.5187e+00, + -1.5180e+00, + -1.5169e+00, + -1.5153e+00, + -1.5138e+00, + -1.5125e+00, + -1.5120e+00}, + {0.0000e+00, + 0.0000e+00, + 0.0000e+00, + 0.0000e+00, + 0.0000e+00, + 0.0000e+00, + 0.0000e+00, + 0.0000e+00, + 0.0000e+00, + 0.0000e+00, + 0.0000e+00, + 0.0000e+00, + 0.0000e+00, + 0.0000e+00, + 0.0000e+00, + 0.0000e+00, + 0.0000e+00, + 0.0000e+00, + 0.0000e+00}}; + +double tof_pixel_spherical_absorption_correction(double pixel_data, + double muR, + double two_theta, + int two_theta_idx) { + double ln_t1 = 0; + double ln_t2 = 0; + for (std::size_t k = 0; k < 8; ++k) { + ln_t1 = ln_t1 * muR + pc[k][two_theta_idx]; + ln_t2 = ln_t2 * muR + pc[k][two_theta_idx + 1]; + } + const double t1 = exp(ln_t1); + const double t2 = exp(ln_t2); + const double sin_theta_1 = pow(sin(deg_as_rad(two_theta_idx * 5.0)), 2); + const double sin_theta_2 = pow(sin(deg_as_rad((two_theta_idx + 1) * 5.0)), 2); + const double l1 = (t1 - t2) / (sin_theta_1 - sin_theta_2); + const double l0 = t1 - l1 * sin_theta_1; + const double correction = 1 / (l0 + l1 * pow(sin(two_theta * .5), 2)); + return correction; +} + +/* + * Holds constants required for correcting time-of-flight data + */ +struct TOFCorrectionsData { + double sample_proton_charge; + double incident_proton_charge; + double empty_proton_charge; + double sample_radius; + double sample_scattering_x_section; + double sample_absorption_x_section; + double sample_number_density; + double incident_radius; + double incident_scattering_x_section; + double incident_absorption_x_section; + double incident_number_density; + double sample_linear_scattering_c; + double incident_linear_scattering_c; + double sample_linear_absorption_c; + double incident_linear_absorption_c; + + TOFCorrectionsData(double sample_proton_charge, + double incident_proton_charge, + double empty_proton_charge, + double sample_radius, + double sample_scattering_x_section, + double sample_absorption_x_section, + double sample_number_density, + double incident_radius, + double incident_scattering_x_section, + double incident_absorption_x_section, + double incident_number_density) + : sample_proton_charge(sample_proton_charge), + incident_proton_charge(incident_proton_charge), + empty_proton_charge(empty_proton_charge), + sample_radius(sample_radius * .1), // Given in mm but calculated in cm + sample_scattering_x_section(sample_scattering_x_section), + sample_absorption_x_section(sample_absorption_x_section), + sample_number_density(sample_number_density), + incident_radius(incident_radius * .1), // Given in mm but calculated in cm + incident_scattering_x_section(incident_scattering_x_section), + incident_absorption_x_section(incident_absorption_x_section), + incident_number_density(incident_number_density) { + sample_linear_scattering_c = sample_number_density * sample_scattering_x_section; + incident_linear_scattering_c = + incident_number_density * incident_scattering_x_section; + sample_linear_absorption_c = sample_number_density * sample_absorption_x_section; + incident_linear_absorption_c = + incident_number_density * incident_absorption_x_section; + } +}; + +/* + * Extracts shoeboxes to reflection_table with each pixel corrected + * optionally for the Lorentz correction + */ +void tof_extract_shoeboxes_to_reflection_table( + dials::af::reflection_table &reflection_table, + Experiment &experiment, + ImageSequence &data, + bool apply_lorentz_correction) { + Detector detector = *experiment.get_detector(); + Scan scan = *experiment.get_scan(); + + // Required beam params + std::shared_ptr beam_ptr = experiment.get_beam(); + std::shared_ptr beam = + std::dynamic_pointer_cast(beam_ptr); + DIALS_ASSERT(beam != nullptr); + vec3 unit_s0 = beam->get_unit_s0(); + double sample_to_source_distance = beam->get_sample_to_source_distance(); + + // Required scan params + scitbx::af::shared img_tof = scan.get_property("time_of_flight"); + + // Required detector params + int n_panels = detector.size(); + int num_images = data.size(); + vec2 image_size = detector[0].get_image_size(); + DIALS_ASSERT(num_images == img_tof.size()); + + // Processor to get the image data into shoeboxes + ShoeboxProcessor shoebox_processor(reflection_table, n_panels, 0, num_images, false); + + // Get shoeboxes for image data + for (std::size_t img_num = 0; img_num < num_images; ++img_num) { + dxtbx::format::Image img = data.get_corrected_data(img_num); + dxtbx::format::Image mask = data.get_mask(img_num); + + dials::af::shared > > output_data( + n_panels); + dials::af::shared > > output_mask( + n_panels); + + for (std::size_t i = 0; i < output_data.size(); ++i) { + output_data[i] = img.tile(i).data(); + output_mask[i] = mask.tile(i).data(); + } + shoebox_processor.next_data_only( + dials::model::Image(output_data.const_ref(), output_mask.const_ref())); + } + + // Now correct each pixel for each shoebox + dials::af::shared > shoeboxes = reflection_table["shoebox"]; + dials::af::const_ref bboxes = reflection_table["bbox"]; + + for (std::size_t i = 0; i < reflection_table.size(); ++i) { + Shoebox<> shoebox = shoeboxes[i]; + int6 bbox = bboxes[i]; + int panel = shoebox.panel; + + // Shoebox data are ordered (z, y, x) + for (std::size_t z = 0; z < shoebox.zsize(); ++z) { + int frame_z = bbox[4] + z; + double tof = img_tof[frame_z] * std::pow(10, -6); // (s) + + for (std::size_t y = 0; y < shoebox.ysize(); ++y) { + int panel_y = bbox[2] + y; + if (panel_y > image_size[1] || panel_y < 0) { + continue; + } + for (std::size_t x = 0; x < shoebox.xsize(); ++x) { + int panel_x = bbox[0] + x; + if (panel_x > image_size[0] || panel_x < 0) { + continue; + } + + double pixel_data = shoebox.data(z, y, x); + + scitbx::vec3 s1 = + detector[panel].get_pixel_lab_coord(scitbx::vec2(panel_x, panel_y)); + double distance = s1.length() + sample_to_source_distance; + distance *= std::pow(10, -3); // (m) + double wl = ((Planck * tof) / (m_n * (distance))) * std::pow(10, 10); + + // Lorentz correction + if (apply_lorentz_correction) { + double two_theta = detector[panel].get_two_theta_at_pixel( + unit_s0, scitbx::vec2(panel_x, panel_y)); + double two_theta_deg = two_theta * (180 / pi); + double sin_two_theta_sq = std::pow(sin(two_theta * .5), 2); + double lorentz_correction = sin_two_theta_sq / std::pow(wl, 4); + pixel_data *= lorentz_correction; + } + + shoebox.data(z, y, x) = double(pixel_data); + } + } + } + } +} + +/* + * Extracts shoeboxes to reflection_table with each pixel corrected w.r.t + * an incident run, an empty run, and optionally the Lorentz correction + */ +void tof_extract_shoeboxes_to_reflection_table( + dials::af::reflection_table &reflection_table, + Experiment &experiment, + ImageSequence &data, + ImageSequence &incident_data, + ImageSequence &empty_data, + double sample_proton_charge, + double incident_proton_charge, + double empty_proton_charge, + bool apply_lorentz_correction) { + Detector detector = *experiment.get_detector(); + Scan scan = *experiment.get_scan(); + + // Required beam params + std::shared_ptr beam_ptr = experiment.get_beam(); + std::shared_ptr beam = + std::dynamic_pointer_cast(beam_ptr); + DIALS_ASSERT(beam != nullptr); + vec3 unit_s0 = beam->get_unit_s0(); + double sample_to_source_distance = beam->get_sample_to_source_distance(); + + // Required scan params + scitbx::af::shared img_tof = scan.get_property("time_of_flight"); + + // Required detector params + int n_panels = detector.size(); + int num_images = data.size(); + vec2 image_size = detector[0].get_image_size(); + DIALS_ASSERT(num_images == img_tof.size()); + + // Copy reflections for incident and empty runs + boost::python::dict d; + dials::af::reflection_table i_reflection_table = + dxtbx::af::flex_table_suite::deepcopy(reflection_table, d); + dials::af::reflection_table e_reflection_table = + dxtbx::af::flex_table_suite::deepcopy(reflection_table, d); + + // Processors to get the image data into shoeboxes + ShoeboxProcessor shoebox_processor(reflection_table, n_panels, 0, num_images, false); + + ShoeboxProcessor incident_shoebox_processor( + i_reflection_table, n_panels, 0, num_images, false); + + ShoeboxProcessor empty_shoebox_processor( + e_reflection_table, n_panels, 0, num_images, false); + + // Get shoeboxes for image data + for (std::size_t img_num = 0; img_num < num_images; ++img_num) { + dxtbx::format::Image img = data.get_corrected_data(img_num); + dxtbx::format::Image mask = data.get_mask(img_num); + + dials::af::shared > > output_data( + n_panels); + dials::af::shared > > output_mask( + n_panels); + + for (std::size_t i = 0; i < output_data.size(); ++i) { + output_data[i] = img.tile(i).data(); + output_mask[i] = mask.tile(i).data(); + } + shoebox_processor.next_data_only( + dials::model::Image(output_data.const_ref(), output_mask.const_ref())); + } + + // Get shoeboxes for incident data + for (std::size_t img_num = 0; img_num < num_images; ++img_num) { + dxtbx::format::Image img = incident_data.get_corrected_data(img_num); + dxtbx::format::Image mask = incident_data.get_mask(img_num); + + dials::af::shared > > output_data( + n_panels); + dials::af::shared > > output_mask( + n_panels); + + for (std::size_t i = 0; i < output_data.size(); ++i) { + output_data[i] = img.tile(i).data(); + output_mask[i] = mask.tile(i).data(); + } + incident_shoebox_processor.next_data_only( + dials::model::Image(output_data.const_ref(), output_mask.const_ref())); + } + + // Get shoeboxes for empty data + for (std::size_t img_num = 0; img_num < num_images; ++img_num) { + dxtbx::format::Image img = empty_data.get_corrected_data(img_num); + dxtbx::format::Image mask = empty_data.get_mask(img_num); + + dials::af::shared > > output_data( + n_panels); + dials::af::shared > > output_mask( + n_panels); + + for (std::size_t i = 0; i < output_data.size(); ++i) { + output_data[i] = img.tile(i).data(); + output_mask[i] = mask.tile(i).data(); + } + empty_shoebox_processor.next_data_only( + dials::model::Image(output_data.const_ref(), output_mask.const_ref())); + } + + // Now correct each pixel for each shoebox + dials::af::shared > shoeboxes = reflection_table["shoebox"]; + dials::af::shared > e_shoeboxes = i_reflection_table["shoebox"]; + dials::af::shared > i_shoeboxes = e_reflection_table["shoebox"]; + dials::af::const_ref bboxes = reflection_table["bbox"]; + + for (std::size_t i = 0; i < reflection_table.size(); ++i) { + Shoebox<> shoebox = shoeboxes[i]; + Shoebox<> i_shoebox = e_shoeboxes[i]; + Shoebox<> e_shoebox = i_shoeboxes[i]; + int6 bbox = bboxes[i]; + int panel = shoebox.panel; + + // Shoebox data are ordered (z, y, x) + for (std::size_t z = 0; z < shoebox.zsize(); ++z) { + int frame_z = bbox[4] + z; + double tof = img_tof[frame_z] * std::pow(10, -6); // (s) + + for (std::size_t y = 0; y < shoebox.ysize(); ++y) { + int panel_y = bbox[2] + y; + if (panel_y > image_size[1] || panel_y < 0) { + continue; + } + for (std::size_t x = 0; x < shoebox.xsize(); ++x) { + int panel_x = bbox[0] + x; + if (panel_x > image_size[0] || panel_x < 0) { + continue; + } + + double incident_pixel_data = i_shoebox.data(z, y, x); + double empty_pixel_data = e_shoebox.data(z, y, x); + double pixel_data = shoebox.data(z, y, x); + + // Normalise w.r.t proton charge + pixel_data /= sample_proton_charge; + incident_pixel_data /= incident_proton_charge; + empty_pixel_data /= empty_proton_charge; + + // Subtract empty from incident and sample + pixel_data -= empty_pixel_data; + incident_pixel_data -= empty_pixel_data; + + scitbx::vec3 s1 = + detector[panel].get_pixel_lab_coord(scitbx::vec2(panel_x, panel_y)); + double distance = s1.length() + sample_to_source_distance; + distance *= std::pow(10, -3); // (m) + double wl = ((Planck * tof) / (m_n * (distance))) * std::pow(10, 10); + + // Pixel data will be divided by incident run + // Infinities are set to zero + if (incident_pixel_data < 1e-5) { + shoebox.data(z, y, x) = 0; + continue; + } + + pixel_data /= incident_pixel_data; + + // Lorentz correction + if (apply_lorentz_correction) { + double two_theta = detector[panel].get_two_theta_at_pixel( + unit_s0, scitbx::vec2(panel_x, panel_y)); + double two_theta_deg = two_theta * (180 / pi); + double sin_two_theta_sq = std::pow(sin(two_theta * .5), 2); + double lorentz_correction = sin_two_theta_sq / std::pow(wl, 4); + pixel_data *= lorentz_correction; + } + + shoebox.data(z, y, x) = double(pixel_data); + } + } + } + } +} + +/* + * Extracts shoeboxes to reflection_table with each pixel corrected w.r.t + * an incident run, an empty run, a spherical absorption correction, + * and optionally the Lorentz correction + */ +void tof_extract_shoeboxes_to_reflection_table( + dials::af::reflection_table &reflection_table, + Experiment &experiment, + ImageSequence &data, + ImageSequence &incident_data, + ImageSequence &empty_data, + TOFCorrectionsData &corrections_data, + bool apply_lorentz_correction) { + Detector detector = *experiment.get_detector(); + Scan scan = *experiment.get_scan(); + + // Required beam params + std::shared_ptr beam_ptr = experiment.get_beam(); + std::shared_ptr beam = + std::dynamic_pointer_cast(beam_ptr); + DIALS_ASSERT(beam != nullptr); + vec3 unit_s0 = beam->get_unit_s0(); + double sample_to_source_distance = beam->get_sample_to_source_distance(); + + // Required scan params + scitbx::af::shared img_tof = scan.get_property("time_of_flight"); + + // Required detector params + int n_panels = detector.size(); + int num_images = data.size(); + vec2 image_size = detector[0].get_image_size(); + DIALS_ASSERT(num_images == img_tof.size()); + + // Copy reflections for incident and empty runs + boost::python::dict d; + dials::af::reflection_table i_reflection_table = + dxtbx::af::flex_table_suite::deepcopy(reflection_table, d); + dials::af::reflection_table e_reflection_table = + dxtbx::af::flex_table_suite::deepcopy(reflection_table, d); + + // Processors to get the image data into shoeboxes + ShoeboxProcessor shoebox_processor(reflection_table, n_panels, 0, num_images, false); + + ShoeboxProcessor incident_shoebox_processor( + i_reflection_table, n_panels, 0, num_images, false); + + ShoeboxProcessor empty_shoebox_processor( + e_reflection_table, n_panels, 0, num_images, false); + + // Get shoeboxes for image data + for (std::size_t img_num = 0; img_num < num_images; ++img_num) { + dxtbx::format::Image img = data.get_corrected_data(img_num); + dxtbx::format::Image mask = data.get_mask(img_num); + + dials::af::shared > > output_data( + n_panels); + dials::af::shared > > output_mask( + n_panels); + + for (std::size_t i = 0; i < output_data.size(); ++i) { + output_data[i] = img.tile(i).data(); + output_mask[i] = mask.tile(i).data(); + } + shoebox_processor.next_data_only( + dials::model::Image(output_data.const_ref(), output_mask.const_ref())); + } + + // Get shoeboxes for incident data + for (std::size_t img_num = 0; img_num < num_images; ++img_num) { + dxtbx::format::Image img = incident_data.get_corrected_data(img_num); + dxtbx::format::Image mask = incident_data.get_mask(img_num); + + dials::af::shared > > output_data( + n_panels); + dials::af::shared > > output_mask( + n_panels); + + for (std::size_t i = 0; i < output_data.size(); ++i) { + output_data[i] = img.tile(i).data(); + output_mask[i] = mask.tile(i).data(); + } + incident_shoebox_processor.next_data_only( + dials::model::Image(output_data.const_ref(), output_mask.const_ref())); + } + + // Get shoeboxes for empty data + for (std::size_t img_num = 0; img_num < num_images; ++img_num) { + dxtbx::format::Image img = empty_data.get_corrected_data(img_num); + dxtbx::format::Image mask = empty_data.get_mask(img_num); + + dials::af::shared > > output_data( + n_panels); + dials::af::shared > > output_mask( + n_panels); + + for (std::size_t i = 0; i < output_data.size(); ++i) { + output_data[i] = img.tile(i).data(); + output_mask[i] = mask.tile(i).data(); + } + empty_shoebox_processor.next_data_only( + dials::model::Image(output_data.const_ref(), output_mask.const_ref())); + } + + // Now correct each pixel for each shoebox + dials::af::shared > shoeboxes = reflection_table["shoebox"]; + dials::af::shared > e_shoeboxes = i_reflection_table["shoebox"]; + dials::af::shared > i_shoeboxes = e_reflection_table["shoebox"]; + dials::af::const_ref bboxes = reflection_table["bbox"]; + + for (std::size_t i = 0; i < reflection_table.size(); ++i) { + Shoebox<> shoebox = shoeboxes[i]; + Shoebox<> i_shoebox = e_shoeboxes[i]; + Shoebox<> e_shoebox = i_shoeboxes[i]; + int6 bbox = bboxes[i]; + int panel = shoebox.panel; + + // Shoebox data are ordered (z, y, x) + for (std::size_t z = 0; z < shoebox.zsize(); ++z) { + int frame_z = bbox[4] + z; + double tof = img_tof[frame_z] * std::pow(10, -6); // (s) + + for (std::size_t y = 0; y < shoebox.ysize(); ++y) { + int panel_y = bbox[2] + y; + if (panel_y > image_size[1] || panel_y < 0) { + continue; + } + for (std::size_t x = 0; x < shoebox.xsize(); ++x) { + int panel_x = bbox[0] + x; + if (panel_x > image_size[0] || panel_x < 0) { + continue; + } + + double incident_pixel_data = i_shoebox.data(z, y, x); + double empty_pixel_data = e_shoebox.data(z, y, x); + double pixel_data = shoebox.data(z, y, x); + + // Normalise w.r.t proton charge + pixel_data /= corrections_data.sample_proton_charge; + incident_pixel_data /= corrections_data.incident_proton_charge; + empty_pixel_data /= corrections_data.empty_proton_charge; + + // Subtract empty from incident and sample + pixel_data -= empty_pixel_data; + incident_pixel_data -= empty_pixel_data; + + double two_theta = detector[panel].get_two_theta_at_pixel( + unit_s0, scitbx::vec2(panel_x, panel_y)); + double two_theta_deg = two_theta * (180 / pi); + int two_theta_idx = static_cast(two_theta_deg / 10); + + scitbx::vec3 s1 = + detector[panel].get_pixel_lab_coord(scitbx::vec2(panel_x, panel_y)); + double distance = s1.length() + sample_to_source_distance; + distance *= std::pow(10, -3); // (m) + double wl = ((Planck * tof) / (m_n * (distance))) * std::pow(10, 10); + + // Spherical absorption correction + // for image data and incident data + double sample_muR = + (corrections_data.sample_linear_scattering_c + + (corrections_data.sample_linear_absorption_c / 1.8) * wl) + * corrections_data.sample_radius; + double sample_absorption_correction = + tof_pixel_spherical_absorption_correction( + pixel_data, sample_muR, two_theta, two_theta_idx); + + // Pixel data will be divided by absorption correction + // Infinities are set to zero + if (sample_absorption_correction < 1e-5) { + shoebox.data(z, y, x) = 0; + continue; + } + + double incident_muR = + (corrections_data.incident_linear_scattering_c + + (corrections_data.incident_linear_absorption_c / 1.8) * wl) + * corrections_data.incident_radius; + double incident_absorption_correction = + tof_pixel_spherical_absorption_correction( + pixel_data, incident_muR, two_theta, two_theta_idx); + + // Pixel data will be divided by absorption correction + // Infinities are set to zero + if (incident_absorption_correction < 1e-5) { + shoebox.data(z, y, x) = 0; + continue; + } + + // Pixel data will be divided by incident run + // Infinities are set to zero + incident_pixel_data /= incident_absorption_correction; + if (incident_pixel_data < 1e-5) { + shoebox.data(z, y, x) = 0; + continue; + } + + pixel_data /= incident_pixel_data; + pixel_data /= sample_absorption_correction; + + // Lorentz correction + if (apply_lorentz_correction) { + double sin_two_theta_sq = std::pow(sin(two_theta * .5), 2); + double lorentz_correction = sin_two_theta_sq / std::pow(wl, 4); + pixel_data *= lorentz_correction; + } + + shoebox.data(z, y, x) = double(pixel_data); + } + } + } + } +} + +} // namespace dials_scaling + +#endif /* DIALS_ALGORITHMS_SCALING_TOF_SCALING_H */ diff --git a/tests/algorithms/scaling/test_tof_scaling.py b/tests/algorithms/scaling/test_tof_scaling.py new file mode 100644 index 0000000000..11271f8067 --- /dev/null +++ b/tests/algorithms/scaling/test_tof_scaling.py @@ -0,0 +1,166 @@ +from __future__ import annotations + +from os.path import join + +from dxtbx.model.experiment_list import ExperimentListFactory + +from dials.array_family import flex +from dials_tof_scaling_ext import ( + TOFCorrectionsData, + tof_extract_shoeboxes_to_reflection_table, +) + + +def test_tof_extract_shoeboxes(dials_data): + image_file = join( + dials_data("isis_sxd_example_data", pathlib=True), "sxd_nacl_run.nxs" + ) + experiments = ExperimentListFactory.from_filenames([image_file]) + reflections = flex.reflection_table.from_msgpack_file( + join(dials_data("isis_sxd_nacl_processed", pathlib=True), "strong.refl") + ) + + reflections["shoebox"] = flex.shoebox( + reflections["panel"], + reflections["bbox"], + allocate=False, + flatten=False, + ) + + expt_data = experiments[0].imageset + + ## Shoeboxes with no corrections + + tof_extract_shoeboxes_to_reflection_table( + reflections, experiments[0], expt_data, False + ) + + ## Shoeboxes with Lorentz correction + + reflections["shoebox"] = flex.shoebox( + reflections["panel"], + reflections["bbox"], + allocate=False, + flatten=False, + ) + tof_extract_shoeboxes_to_reflection_table( + reflections, experiments[0], expt_data, True + ) + + ## Shoeboxes with incident/empty run normalisation + + reflections["shoebox"] = flex.shoebox( + reflections["panel"], + reflections["bbox"], + allocate=False, + flatten=False, + ) + experiment_cls = experiments[0].imageset.get_format_class() + incident_run_file = join( + dials_data("isis_sxd_example_data", pathlib=True), "sxd_vanadium_run.nxs" + ) + empty_run_file = join( + dials_data("isis_sxd_example_data", pathlib=True), "sxd_empty_run.nxs" + ) + incident_fmt_class = experiment_cls.get_instance(incident_run_file) + empty_fmt_class = experiment_cls.get_instance(empty_run_file) + + incident_data = experiment_cls(incident_run_file).get_imageset(incident_run_file) + empty_data = experiment_cls(empty_run_file).get_imageset(empty_run_file) + incident_proton_charge = incident_fmt_class.get_proton_charge() + empty_proton_charge = empty_fmt_class.get_proton_charge() + expt_proton_charge = experiment_cls.get_instance( + experiments[0].imageset.paths()[0], + **experiments[0].imageset.data().get_params(), + ).get_proton_charge() + + tof_extract_shoeboxes_to_reflection_table( + reflections, + experiments[0], + expt_data, + incident_data, + empty_data, + expt_proton_charge, + incident_proton_charge, + empty_proton_charge, + False, + ) + + ## Shoeboxes with incident/empty run normalisation + ## and Lorentz correction + reflections["shoebox"] = flex.shoebox( + reflections["panel"], + reflections["bbox"], + allocate=False, + flatten=False, + ) + tof_extract_shoeboxes_to_reflection_table( + reflections, + experiments[0], + expt_data, + incident_data, + empty_data, + expt_proton_charge, + incident_proton_charge, + empty_proton_charge, + True, + ) + + ## Shoeboxes with incident/empty run normalisation + ## and spherical absorption correction + reflections["shoebox"] = flex.shoebox( + reflections["panel"], + reflections["bbox"], + allocate=False, + flatten=False, + ) + target_spectrum_sample_number_density = 0.0223 + target_spectrum_sample_radius = 0.3 + target_spectrum_scattering_x_section = 10.040 + target_spectrum_absorption_x_section = 17.015 + incident_spectrum_sample_number_density = 0.0722 + incident_spectrum_sample_radius = 0.3 + incident_spectrum_scattering_x_section = 5.158 + incident_spectrum_absorption_x_section = 4.4883 + + corrections_data = TOFCorrectionsData( + expt_proton_charge, + incident_proton_charge, + empty_proton_charge, + target_spectrum_sample_radius, + target_spectrum_scattering_x_section, + target_spectrum_absorption_x_section, + target_spectrum_sample_number_density, + incident_spectrum_sample_radius, + incident_spectrum_scattering_x_section, + incident_spectrum_absorption_x_section, + incident_spectrum_sample_number_density, + ) + + tof_extract_shoeboxes_to_reflection_table( + reflections, + experiments[0], + expt_data, + incident_data, + empty_data, + corrections_data, + False, + ) + + ## Shoeboxes with incident/empty run normalisation, + ## spherical absorption correction and Lorentz correction + reflections["shoebox"] = flex.shoebox( + reflections["panel"], + reflections["bbox"], + allocate=False, + flatten=False, + ) + tof_extract_shoeboxes_to_reflection_table( + reflections, + experiments[0], + expt_data, + incident_data, + empty_data, + corrections_data, + True, + ) From 8ab52a0d80d6e2815e7d4f4029dfc647308abcc6 Mon Sep 17 00:00:00 2001 From: David McDonagh <60879630+toastisme@users.noreply.github.com> Date: Wed, 28 Aug 2024 09:16:26 +0100 Subject: [PATCH 38/40] Enable radial_profile spot finding algorithm for polychromatic data (#2729) Enable radial_profile spot finding algorithm for polychromatic data by removing call to beam s0. --- newsfragments/2729.bugfix | 1 + src/dials/extensions/radial_profile_spotfinder_threshold_ext.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 newsfragments/2729.bugfix diff --git a/newsfragments/2729.bugfix b/newsfragments/2729.bugfix new file mode 100644 index 0000000000..3dde18530d --- /dev/null +++ b/newsfragments/2729.bugfix @@ -0,0 +1 @@ +Enable radial_profile spot finding algorithm for polychromatic data by removing call to beam s0. diff --git a/src/dials/extensions/radial_profile_spotfinder_threshold_ext.py b/src/dials/extensions/radial_profile_spotfinder_threshold_ext.py index 3007d82297..5d67765350 100644 --- a/src/dials/extensions/radial_profile_spotfinder_threshold_ext.py +++ b/src/dials/extensions/radial_profile_spotfinder_threshold_ext.py @@ -105,7 +105,7 @@ def compute_threshold( beam = imageset.get_beam() # Get 2θ array for the panel or ROI - two_theta_array = panel.get_two_theta_array(beam.get_s0()) + two_theta_array = panel.get_two_theta_array(beam.get_unit_s0()) if region_of_interest: x0, x1, y0, y1 = region_of_interest two_theta_array = two_theta_array[y0:y1, x0:x1] From e637db319e7a9c9c08cb3d1032388a6bf6b4c458 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 6 Sep 2024 13:33:51 +0100 Subject: [PATCH 39/40] [pre-commit.ci] pre-commit autoupdate (#2733) Also, make future updates quarterly to reduce spam. --- .pre-commit-config.yaml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 6f8a29bd9b..6ca4d753dd 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,5 +1,7 @@ ci: skip: [no-images] + autoupdate_schedule: quarterly + repos: # Syntax validation and some basic sanity checks - repo: https://github.com/pre-commit/pre-commit-hooks @@ -17,7 +19,7 @@ repos: name: "Don't commit to 'main' directly" - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.6.2 + rev: v0.6.3 hooks: - id: ruff args: [--fix, --exit-non-zero-on-fix, --show-fixes] From 9d30a86d93fc0cba5b6a24aaff3805e2d55554fa Mon Sep 17 00:00:00 2001 From: biochem_fan Date: Fri, 6 Sep 2024 21:41:35 +0900 Subject: [PATCH 40/40] Image viewer masking UI fixes (#2731) - Fix glitches in dials.image_viewer masking tool (#1824, #1964) - Allow panning and zoom change while polygons are drawn (#1824) - Make sure polygon vertices are within a single panel since polygons over multiple panels are not (yet) supported (#2712) - Clear the current rectangle and exit the rectangle mode when failed - Make masking errors visible as a dialog box (#2260) --- newsfragments/2731.bugfix | 1 + src/dials/util/image_viewer/mask_frame.py | 49 +++++++++++++++++------ 2 files changed, 37 insertions(+), 13 deletions(-) create mode 100644 newsfragments/2731.bugfix diff --git a/newsfragments/2731.bugfix b/newsfragments/2731.bugfix new file mode 100644 index 0000000000..3d3e47a620 --- /dev/null +++ b/newsfragments/2731.bugfix @@ -0,0 +1 @@ +``dials.image_viewer``: Fix UI glitches in masking tools. diff --git a/src/dials/util/image_viewer/mask_frame.py b/src/dials/util/image_viewer/mask_frame.py index f058e5f558..deb386b54b 100644 --- a/src/dials/util/image_viewer/mask_frame.py +++ b/src/dials/util/image_viewer/mask_frame.py @@ -685,8 +685,31 @@ def OnLeftDown(self, event): self._circle_radius = None return elif self._mode_polygon: - self._mode_polygon_points.append(click_posn) + xgeo, ygeo = self._pyslip.ConvertView2Geo(click_posn) + xc, yc = self._pyslip.tiles.map_relative_to_picture_fast_slow( + xgeo, ygeo + ) + p1, p0, p_id = self._pyslip.tiles.flex_image.picture_to_readout(yc, xc) + + if p_id < 0: + return + + # polygon must be within a single panel + if len(self._mode_polygon_points) > 0: + xgeo0, ygeo0 = self._mode_polygon_points[0] + xc0, yc0 = self._pyslip.tiles.map_relative_to_picture_fast_slow( + xgeo0, ygeo0 + ) + _, _, p_id0 = self._pyslip.tiles.flex_image.picture_to_readout( + yc0, xc0 + ) + + if p_id0 != p_id: + return + + self._mode_polygon_points.append((xgeo, ygeo)) self.DrawPolygon(self._mode_polygon_points) + event.Skip() def OnLeftUp(self, event): @@ -697,12 +720,16 @@ def OnLeftUp(self, event): self._rectangle_x1y1 = click_posn x0, y0 = self._rectangle_x0y0 x1, y1 = self._rectangle_x1y1 - self.AddUntrustedRectangle(x0, y0, x1, y1) - self._pyslip.DeleteLayer(self._mode_rectangle_layer) - self._mode_rectangle_layer = None - self.mode_rectangle_button.SetValue(False) - self.OnUpdate(event) - return + try: + self.AddUntrustedRectangle(x0, y0, x1, y1) + except Exception as e: + wx.MessageBox(str(e)) + finally: + self._pyslip.DeleteLayer(self._mode_rectangle_layer) + self._mode_rectangle_layer = None + self.mode_rectangle_button.SetValue(False) + self.OnUpdate(event) + return elif self._mode_circle and self._circle_xy is not None: xc, yc = self._circle_xy @@ -711,7 +738,7 @@ def OnLeftUp(self, event): try: self.AddUntrustedCircle(xc, yc, xedge, yedge) except Exception as e: - print(e) + wx.MessageBox(str(e)) finally: self._pyslip.DeleteLayer(self._mode_circle_layer) self._mode_circle_layer = None @@ -806,10 +833,7 @@ def DrawPolygon(self, vertices): for i in range(len(vertices) - 1): polygon_data.append( ( - ( - self._pyslip.ConvertView2Geo(vertices[i]), - self._pyslip.ConvertView2Geo(vertices[i + 1]), - ), + (vertices[i], vertices[i + 1]), d, ) ) @@ -828,7 +852,6 @@ def AddUntrustedPolygon(self, vertices): if len(vertices) < 4: return vertices.append(vertices[0]) - vertices = [self._pyslip.ConvertView2Geo(v) for v in vertices] vertices = [ self._pyslip.tiles.map_relative_to_picture_fast_slow(*v) for v in vertices ]