From 5894dcb73160e140705839304a861a17d4f20f51 Mon Sep 17 00:00:00 2001 From: Matthias Probst Date: Fri, 15 Mar 2024 10:12:22 +0100 Subject: [PATCH] make particle ppp generation more robust --- synpivimage/core.py | 7 ++++ synpivimage/io.py | 8 +++- synpivimage/laser.py | 8 +++- synpivimage/particles.py | 45 +++++++++++++------- synpivimage/utils.py | 76 +++++++++++++++++++++++++++------ tests/test_core.py | 91 +++++++++++++++++++++++++++++++++++++++- tests/test_io.py | 8 ++-- 7 files changed, 205 insertions(+), 38 deletions(-) diff --git a/synpivimage/core.py b/synpivimage/core.py index 05f323b..5f2231c 100644 --- a/synpivimage/core.py +++ b/synpivimage/core.py @@ -48,6 +48,13 @@ def take_image(laser: Laser, The peak count of the particles kwargs : dict Additional parameters + + Returns + ------- + img: np.ndarray + The created image + particles: + The updated Particles object """ logger = kwargs.get('logger', LOGGER) # compute the particle intensity factor in order to reach particle_peak_count diff --git a/synpivimage/io.py b/synpivimage/io.py index 0683288..6e7ce3e 100644 --- a/synpivimage/io.py +++ b/synpivimage/io.py @@ -366,12 +366,16 @@ def __exit__(self, exc_type, exc_val, exc_tb): img_idx_ds.make_scale() ds.dims[0].attach_scale(img_idx_ds) - nx_ds = ds.parent.create_dataset('nx', data=np.arange(self.camera.nx)) + nx_ds = ds.parent.create_dataset('ix', data=np.arange(self.camera.nx)) + ds.attrs['long_name'] = 'x pixel coordinate' + ds.attrs['standard_name'] = 'x_pixel_coordinate' # https://matthiasprobst.github.io/pivmeta/#x_pixel_coordinate nx_ds.make_scale() ds.dims[2].attach_scale(nx_ds) - ny_ds = ds.parent.create_dataset('ny', data=np.arange(self.camera.ny)) + ny_ds = ds.parent.create_dataset('iy', data=np.arange(self.camera.ny)) ny_ds.make_scale() + ds.attrs['long_name'] = 'y pixel coordinate' + ds.attrs['standard_name'] = 'y_pixel_coordinate' # https://matthiasprobst.github.io/pivmeta/#x_pixel_coordinate ds.dims[1].attach_scale(ny_ds) for cmp_name, cmp, onto_model in zip(('laser', 'camera'), diff --git a/synpivimage/laser.py b/synpivimage/laser.py index 6177458..bd3a282 100644 --- a/synpivimage/laser.py +++ b/synpivimage/laser.py @@ -77,7 +77,7 @@ def illuminate(self, Returns ------- Particles - The illuminated particles (new object!) + The illuminated particles (same object!) """ logger = kwargs.get('logger', LOGGER) @@ -92,6 +92,8 @@ def illuminate(self, laser_intensity = tophat(self.width) else: laser_intensity = real(dz0, s) + + particles.reset() particles.source_intensity = laser_intensity(particles.z) inside_laser = particles.source_intensity > np.exp(-2) @@ -113,7 +115,9 @@ def illuminate(self, plt.ylabel('Normalized particle intensity in beam / -') plt.grid() plt.show() - return Particles(**particles.dict()) + + return particles + # return Particles(**particles.dict()) def model_dump_jsonld(self) -> str: """Return JSON-LD string representation""" diff --git a/synpivimage/particles.py b/synpivimage/particles.py index 12d4690..a324b63 100644 --- a/synpivimage/particles.py +++ b/synpivimage/particles.py @@ -116,6 +116,9 @@ def _parse_array(_arr, _n: int, dtype=None): self._image_electrons = _parse_array(image_electrons, N) self._image_quantized_electrons = _parse_array(image_quantized_electrons, N) self._flag = _parse_array(flag, N, dtype=int) + self._xlim = None + self._ylim = None + self._zlim = None @property def x(self): @@ -212,32 +215,44 @@ def reset(self): self._flag = np.zeros_like(self.x, dtype=int) @classmethod - def generate(cls, ppp: float, dx_max, dy_max, dz_max, camera, laser) -> "Particles": + def generate(cls, ppp: float, dx_max, dy_max, dz_max, size, camera, laser) -> "Particles": """Generate particles based on a certain ppp (particles per pixel). With dx, dy, dz the maximum displacement of the particles can be set. The camera and laser are used to determine the sensor size and the laser width.""" from .utils import generate_particles - return generate_particles(ppp=ppp, dx_max=dx_max, dy_max=dy_max, dz_max=dz_max, camera=camera, laser=laser) + return generate_particles(ppp=ppp, dx_max=dx_max, dy_max=dy_max, dz_max=dz_max, size=size, camera=camera, laser=laser) def get_ppp(self, camera_size: int) -> float: """Return the particles per pixel""" return self.active.sum() / camera_size def regenerate(self) -> "Particles": - """Regenerate the particles. Does return a new object and does not touch the current object! + """Regenerate the particles of this object. + The locations (x, y, z) of the particles will change and the intensities + will be reset - The number of particles and hence the ppp will remain constant (within a statistical deviation) - but the position and size of the particles will change.""" - new_part = self.copy() + .. note:: + + Does NOT create a new object! + The returned object is the same + """ + self.reset() N = len(self.x) - x = np.random.uniform(min(self.x), max(self.x), N) - y = np.random.uniform(min(self.y), max(self.y), N) - z = np.random.uniform(min(self.z), max(self.z), N) - new_part._x = x - new_part._y = y - new_part._z = z - new_part.reset() - return new_part + if self._xlim is None: + self._x = np.random.uniform(min(self.x), max(self.x), N) + else: + self._x = np.random.uniform(*self._xlim, N) + + if self._ylim is None: + self._y = np.random.uniform(min(self.y), max(self.y), N) + else: + self._y = np.random.uniform(*self._ylim, N) + + if self._ylim is None: + self._z = np.random.uniform(min(self.z), max(self.z), N) + else: + self._z = np.random.uniform(*self._zlim, N) + return self def __len__(self): return self.x.size @@ -508,7 +523,7 @@ def compute_intensity_distribution( xp : float x-coordinate of the particles on the sensor in pixels yp : float - y-coordinate of the particles on thesensor in pixels + y-coordinate of the particles on the sensor in pixels dp : float particle image diameter (in pixels) sigmax : float diff --git a/synpivimage/utils.py b/synpivimage/utils.py index a00b4ca..cb63716 100644 --- a/synpivimage/utils.py +++ b/synpivimage/utils.py @@ -1,4 +1,5 @@ import logging +import warnings from typing import Tuple import numpy as np @@ -16,9 +17,10 @@ def generate_particles(ppp: float, dx_max: Tuple[float, float], dy_max: Tuple[float, float], dz_max: Tuple[float, float], + size: float, camera: Camera, laser: Laser, - iter_max: int = 40): + **kwargs) -> Particles: """Generates a particle class based on the current setup and given max displacements Parameters @@ -31,7 +33,27 @@ def generate_particles(ppp: float, Maximal displacement in y-direction [lower, upper] dz_max: Tuple[float, float] Maximal displacement in z-direction [lower, upper] + camera: Camera + camera + laser: Laser + laser + size: float + Particle size + kwargs: + iter_max: int=40 + Max. iteration for particle number determination algorithm + N_max: int=10**7 + Max. number of particles + + Returns + ------- + particles: Particles + The generated particles """ + + iter_max = kwargs.get('iter_max', 40) + N_max = kwargs.get('N_ma', 10 ** 7) + logger.debug(f'Generating particles with a ppp of {ppp}') assert 0 < ppp < 1, f"Expected ppp to be between 0 and 1, got {ppp}" @@ -43,36 +65,50 @@ def generate_particles(ppp: float, zmax = laser.width area = (camera.nx + (dx_max[1] - dx_max[0])) * (camera.ny + (dy_max[1] - dy_max[0])) N = int(area * ppp) + if N < 2: + raise ValueError( + 'Number of particles is too low. For generation of only one particle,' + ' please use discrete generation by providing the coordinates' + ) + logger.debug(f'Initial particle count: {N}') # Ntarget = ppp * camera.size curr_ppp = 0 # rel_dev = abs((curr_ppp - ppp) / ppp) + _xlim = min(-dx_max[1], 0), max(camera.nx, camera.nx - dx_max[0]) + _ylim = min(-dy_max[1], 0), max(camera.ny, camera.ny - dy_max[0]) + _zlim = min(zmin, zmin - dz_max[1]), max(zmax, zmax + dz_max[0]) + + x_extent = _xlim[1] - _xlim[0] + y_extent = _ylim[1] - _ylim[0] + + n_too_much_noise = 0 i = 0 while abs((curr_ppp - ppp) / ppp) > 0.01: i += 1 if i == 1: # generate a initial random distribution. From this. particles are either added or removed logger.debug(f'Generate initial random distribution of {N} particles') - xe = np.random.uniform(min(-dx_max[1], 0), max(camera.nx, camera.nx - dx_max[0]), N) - ye = np.random.uniform(min(-dy_max[1], 0), max(camera.ny, camera.ny - dy_max[0]), N) - ze = np.random.uniform(min(zmin, zmin - dz_max[1]), max(zmax, zmax + dz_max[0]), N) + xe = np.random.uniform(*_xlim, N) + ye = np.random.uniform(*_ylim, N) + ze = np.random.uniform(*_zlim, N) particles = Particles( x=xe, y=ye, z=ze, - size=np.ones_like(xe) * 2 + size=size * np.ones_like(xe) ) _img, _part = take_image(particles=particles, camera=camera, laser=laser, particle_peak_count=1000) - curr_ppp = _part.active.sum() / camera.size - # print(f'curr ppp: {curr_ppp:.5f}') + curr_ppp = _part.get_ppp(camera.size) # _part.active.sum() / camera.size + diff_ppp = ppp - curr_ppp - # print(f'diff ppp: {diff_ppp:.5f}') - Nadd = int(diff_ppp * camera.size) # dampen the addition by 10% + + Nadd = int(diff_ppp * x_extent * y_extent) if Nadd == 0: logger.debug('Stopping early because no new particles to be added.') @@ -80,12 +116,11 @@ def generate_particles(ppp: float, logger.debug(f'Generate particles. Iteration {i}/{iter_max}: curr ppp: {curr_ppp:.5f}. ' f'diff ppp: {diff_ppp:.5f}. Adding {Nadd} particles') - if Nadd > 0: # generate new particles within the given range - xe_new = np.random.uniform(min(-dx_max[1], 0), max(camera.nx, camera.nx - dx_max[0]), Nadd) - ye_new = np.random.uniform(min(-dy_max[1], 0), max(camera.ny, camera.ny - dy_max[0]), Nadd) - ze_new = np.random.uniform(min(zmin, zmin - dz_max[1]), max(zmax, zmax + dz_max[0]), Nadd) + xe_new = np.random.uniform(*_xlim, Nadd) + ye_new = np.random.uniform(*_ylim, Nadd) + ze_new = np.random.uniform(*_zlim, Nadd) # add them to the existing particles _n_new = len(xe) + Nadd xe = np.concatenate([xe, xe_new]) @@ -101,6 +136,18 @@ def generate_particles(ppp: float, assert len(xe) == N + Nadd N += Nadd + + if N > N_max: + raise ValueError(f'Number of particles exceeded maximum of {N_max}. ' + f'Consider increasing the number of iterations or the particle size') + + if curr_ppp == 0 and _part.in_fov.sum() > 0: + n_too_much_noise += 1 + warnings.warn('No particles in the field of view are illuminated beyond the laser noise. Consider using a' + 'smaller noise level or a larger particle size.') + if n_too_much_noise > 10: + raise ValueError('Too many iterations without any particles in the field of view. Stopping.') + err = abs((curr_ppp - ppp) / ppp) if err < 0.01: @@ -110,4 +157,7 @@ def generate_particles(ppp: float, logger.debug(f'Reached max iteration of {iter_max}') break + _part._xlim = _xlim + _part._ylim = _ylim + _part._zlim = _zlim return _part diff --git a/tests/test_core.py b/tests/test_core.py index 5410dfd..4cd8bc7 100644 --- a/tests/test_core.py +++ b/tests/test_core.py @@ -66,6 +66,25 @@ def test_effective_laser_width(self): class TestParticles(unittest.TestCase): + def test_regenerate(self): + particles = Particles( + x=4, y=4, z=0, size=2 + ) + particles.regenerate() + self.assertEqual(particles.x, 4) + self.assertEqual(particles.y, 4) + + particles = Particles( + x=[4, 10], y=[4, 10], z=[0, 0], size=[2, 2] + ) + self.assertEqual(particles.x[0], 4) + self.assertEqual(particles.x[1], 10) + new_part = particles.regenerate() + self.assertNotEqual(particles.x[0], 4) + self.assertNotEqual(particles.x[1], 10) + + self.assertTrue(new_part, particles) + def test_compute_ppp(self): laser = Laser( width=0.25, @@ -88,6 +107,8 @@ def test_compute_ppp(self): x=4, y=4, z=0, size=2 ) imgOne, partOne = take_image(laser, cam, particles, particle_peak_count=1000) + self.assertTrue(particles is partOne) + self.assertEqual(partOne.get_ppp(camera_size=cam.size), 1 / cam.size) particles = Particles( @@ -136,7 +157,9 @@ def test_generate_certain_ppp(self): partA = Particles.generate(ppp=target_ppp, dx_max=[10, 100], dy_max=[-100, 100], - dz_max=[1, 1], camera=cam, + dz_max=[1, 1], + size=2, + camera=cam, laser=laser) print(partA.get_ppp(cam.size)) # check if crit is really reached (err < 0.01): @@ -144,6 +167,71 @@ def test_generate_certain_ppp(self): err = abs((realized_ppp - target_ppp) / target_ppp) self.assertTrue(err <= 0.01) + def test_generate_certain_ppp_with_noise(self): + laser = Laser(shape_factor=10 ** 3, width=10) + + cam = Camera( + nx=128, + ny=128, + bit_depth=16, + qe=1, + sensitivity=1, + baseline_noise=0, + dark_noise=0, + shot_noise=False, + fill_ratio_x=1.0, + fill_ratio_y=1.0, + particle_image_diameter=4, + seed=10 + ) + + true_dx = 0.6 + true_dy = 0.3 + ppp_list = np.linspace(0.001, 0.1, 11, dtype='float32') + dark_noise_list = np.linspace(0, 100, 6, dtype='float32') + for ppp in ppp_list[:]: + for dark_noise in dark_noise_list: + cam.dark_noise = dark_noise + particles = Particles.generate( + ppp=ppp, + dx_max=[0, true_dx], + dy_max=[0, true_dy], + dz_max=[0, 0], + size=2, + camera=cam, + laser=laser) + + realized_ppp = particles.get_ppp(cam.size) + err = abs((realized_ppp - ppp) / ppp) + self.assertTrue(err <= 0.05, msg=f'ppp: {ppp}, realized_ppp: {realized_ppp}, err: {err}') + + def test_too_much_noise(self): + laser = Laser(shape_factor=10 ** 3, width=10) + + cam = Camera( + nx=128, + ny=128, + bit_depth=16, + qe=1, + sensitivity=1, + baseline_noise=0, + dark_noise=1000, # too much dark noise! + shot_noise=False, + fill_ratio_x=1.0, + fill_ratio_y=1.0, + particle_image_diameter=2, + seed=10 + ) + target_ppp = 0.01 + with self.assertRaises(ValueError): + partA = Particles.generate(ppp=target_ppp, + dx_max=[10, 100], + dy_max=[-100, 100], + dz_max=[1, 1], + size=2, + camera=cam, + laser=laser) + def test_single_particle(self): one_particle = Particles( x=1, @@ -300,7 +388,6 @@ def test_take_image(self): pathlib.Path(pathlib.Path('img01a.hdf')).unlink(missing_ok=True) pathlib.Path(pathlib.Path('img01a.json')).unlink(missing_ok=True) - # # # class TestCore(unittest.TestCase): diff --git a/tests/test_io.py b/tests/test_io.py index 2934744..aa9d971 100644 --- a/tests/test_io.py +++ b/tests/test_io.py @@ -127,9 +127,9 @@ def test_write_hdf(self): self.assertEqual(h5['images/img_A'].shape, (2, 16, 16)) self.assertEqual(h5['images/img_A'].dtype, np.uint16) self.assertEqual(h5['images/image_index'].shape, (2,)) - self.assertEqual(h5['images/nx'].shape, (cam.nx,)) - self.assertEqual(h5['images/ny'].shape, (cam.ny,)) + self.assertEqual(h5['images/ix'].shape, (cam.nx,)) + self.assertEqual(h5['images/iy'].shape, (cam.ny,)) self.assertEqual(h5['images/image_index'][0], 0) self.assertEqual(h5['images/image_index'][1], 1) - np.testing.assert_array_equal(h5['images/nx'][()], np.arange(cam.nx)) - np.testing.assert_array_equal(h5['images/ny'][()], np.arange(cam.ny)) + np.testing.assert_array_equal(h5['images/ix'][()], np.arange(cam.nx)) + np.testing.assert_array_equal(h5['images/iy'][()], np.arange(cam.ny))