diff --git a/.gitignore b/.gitignore index 4bb4b6697..6177d8d7e 100644 --- a/.gitignore +++ b/.gitignore @@ -11,6 +11,7 @@ doc/rst/data_management.rst doc/_img/*.png tutorial/*.png ghostdriver* +doc/version.py ptypy/version.py .idea/ .cache/ diff --git a/.travis.yml b/.travis.yml index fac0049c0..f24e860eb 100644 --- a/.travis.yml +++ b/.travis.yml @@ -2,9 +2,9 @@ cache: apt sudo: true language: python python: - - 2.7 + - 3.7 before_install: - - wget https://repo.continuum.io/miniconda/Miniconda2-latest-Linux-x86_64.sh -O miniconda.sh; # grab miniconda + - wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh; # grab miniconda - bash miniconda.sh -b -p $HOME/miniconda # install miniconda - export PATH="$HOME/miniconda/bin:$PATH" # add it to the path - conda config --set always_yes yes --set changeps1 no # we want it to always do yes, and start a new console @@ -19,8 +19,8 @@ env: install: True script: - - conda env create --file ${TEST_ENV_NAME}.yml #set up the environment thats in the file - - source activate ${TEST_ENV_NAME} # activate it + - conda env create --file ${TEST_ENV_NAME}.yml; + - source activate ${TEST_ENV_NAME}; # activate it - conda install pytest # additional dependencies for the tests - pip install pytest-cov - pip install coveralls @@ -32,5 +32,4 @@ script: after_script: - coveralls -allow_failures: -- python: "3.3" + diff --git a/benchmark/class_benchmarks.py b/benchmark/class_benchmarks.py index 09daa980c..855de7047 100644 --- a/benchmark/class_benchmarks.py +++ b/benchmark/class_benchmarks.py @@ -35,15 +35,15 @@ def add_views(nviews): for k in range(int(steps)): add_views(nviews) gc.collect() - print k - print C1._recs.values()[0].nbytes / 1e6 + print(k) + print(list(C1._recs.values())[0].nbytes / 1e6) -print C1.formatted_report() +print(C1.formatted_report()) u.pause(1) gc.collect() u.pause(4) u.pause(4) -print C1.formatted_report() +print(C1.formatted_report()) add_views(nviews) u.pause(4) diff --git a/core_dependencies.yml b/core_dependencies.yml index 957f3c8b0..2a449e8bf 100644 --- a/core_dependencies.yml +++ b/core_dependencies.yml @@ -2,7 +2,7 @@ name: core_dependencies channels: - conda-forge dependencies: - - python=2.7 + - python=3.7 - numpy - scipy - h5py diff --git a/doc/conf.py b/doc/conf.py index 5a7ec654e..3f345e5ee 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -23,9 +23,9 @@ # generate paramters.rst and other rst import subprocess subprocess.check_call(['python', 'script2rst.py']) # We need this to have a clean sys.argv -subprocess.check_call(['python','parameters2rst.py']) -subprocess.check_call(['python','tmp2rst.py']) -execfile('version.py') +subprocess.check_call(['python', 'parameters2rst.py']) +subprocess.check_call(['python', 'tmp2rst.py']) +exec(open('version.py').read()) # -- General configuration ------------------------------------------------ @@ -64,8 +64,8 @@ def get_refs(dct, pd, depth=2, indent=''): if depth < 0: return - for k, value in dct.iteritems(): - ref = ', see :py:data:`~%s`' % pd.children[k].entry_point if pd.children.has_key(k) else '' + for k, value in dct.items(): + ref = ', see :py:data:`~%s`' % pd.children[k].entry_point if k in pd.children else '' if hasattr(value, 'items'): v = str(value.__class__.__name__) elif str(value) == value: diff --git a/doc/parameters2rst.py b/doc/parameters2rst.py index 37f93563a..be8928777 100644 --- a/doc/parameters2rst.py +++ b/doc/parameters2rst.py @@ -40,7 +40,8 @@ if is_wildcard: prst.write(' *Wildcard*: multiple entries with arbitrary names are accepted.\n\n') - prst.write(' '+desc.help+'\n\n') + # prst.write(' '+desc.help+'\n\n') + prst.write(' ' + desc.help.replace('', '\n').replace('\n', '\n ') + '\n\n') prst.write(' '+desc.doc.replace('','\n').replace('\n', '\n ')+'\n\n') if desc.is_symlink: diff --git a/doc/script2rst.py b/doc/script2rst.py index 0a19c45ac..206a3f07d 100644 --- a/doc/script2rst.py +++ b/doc/script2rst.py @@ -1,5 +1,5 @@ import sys -import StringIO +import io import contextlib import os @@ -12,7 +12,6 @@ if len(sys.argv) == 1: import pkg_resources - import subprocess for script in scripts: scr = pkg_resources.resource_filename('ptypy', tutorial_dir+script) @@ -25,11 +24,11 @@ indent_keys = ['for', 'if', 'with', 'def', 'class'] -sout = StringIO.StringIO() @contextlib.contextmanager def stdoutIO(stdout=None): + sout = io.StringIO() old = sys.stdout if stdout is None: stdout = sout @@ -38,12 +37,7 @@ def stdoutIO(stdout=None): yield stdout finally: sys.stdout = old - -def exec2str(statement): - with stdoutIO() as s: - exec(statement) - print(s.getvalue()) script_name = sys.argv[1] fpy = open(script_name, 'r') @@ -64,37 +58,10 @@ def exec2str(statement): """ % {'fname': os.path.split(script_name)[-1], 'this': sys.argv[0]}) was_comment = True -def debug(x): - print(x) - -def check_for_fig(wline): - if 'savefig' in wline: - print('found fig') - from matplotlib import pyplot as plt - fig = plt.gcf() - fig_name = name + '_%02d' % fig.number - fname = fig_path + fig_name + '.png' - plt.tight_layout() - fig.savefig(fname, dpi=300) - - frst.write('.. figure:: '+'..'+os.sep+fname+'\n') - frst.write(' :width: 70 %\n') - frst.write(' :figclass: highlights\n') - other = [s.strip() for s in wline.split(';')] - print(other) - if len(other) > 1: - frst.write(' :name: '+other[1]+'\n\n') - if len(other) > 2: - frst.write(' '+other[2]+'\n\n') - frst.write('\n') - wline == '' - return True - else: - return False while True: line = fpy.readline() - if line == '': + if not line: break print(line) if 'savefig' in line: @@ -156,22 +123,22 @@ def check_for_fig(wline): func += line2 frst.write(' >>> '+line2) pt = fpy.tell() - exec func+'\n' + exec(func+'\n') continue wline = line.strip() - if wline == '': + if not wline: frst.write('\n') continue with stdoutIO() as sout: - exec wline + exec(wline) out = sout.getvalue() sout.buf = '' + if len(wline) > 0: if line.startswith('# '): wline = line[2:] - #isfig = check_for_fig(wline) was_comment = True frst.write(wline) else: @@ -183,17 +150,11 @@ def check_for_fig(wline): frst.write(wline+'\n') #print out - if out.strip() != '': - #frst.write('\n') + if out.strip(): + print(out) for l in out.split('\n'): frst.write(' '*3+l+'\n') out = '' - """ - frst.write(wline+'\n') - if out.strip()!='': - frst.write('\n:Out:\n ::\n\n') - for l in out.split('\n'): - frst.write(' '*6+l+'\n') - """ + diff --git a/doc/version.py b/doc/version.py index e36958bf6..d6c8607a4 100644 --- a/doc/version.py +++ b/doc/version.py @@ -12,5 +12,5 @@ except: pass else: - version += git_commit.strip() + version += git_commit.strip().decode() diff --git a/extra/ipynb/plotclient.ipynb b/extra/ipynb/plotclient.ipynb index 1d543be4a..646d57fd4 100644 --- a/extra/ipynb/plotclient.ipynb +++ b/extra/ipynb/plotclient.ipynb @@ -70,8 +70,8 @@ "outputs": [], "source": [ "prdict,obdict,metadict = newdata()\n", - "pr = prdict[prdict.keys()[0]]['data']\n", - "ob = obdict[prdict.keys()[0]]['data']\n", + "pr = prdict[list(prdict.keys())[0]]['data']\n", + "ob = obdict[list(prdict.keys())[0]]['data']\n", "fig, axes = plt.subplots(ncols=3, figsize=(12,3), dpi=100)\n", "\n", "# Plotting the object\n", diff --git a/full_dependencies.yml b/full_dependencies.yml index 6bc0d9a67..d6bd742a7 100644 --- a/full_dependencies.yml +++ b/full_dependencies.yml @@ -1,9 +1,8 @@ name: full_dependencies channels: - conda-forge - - anaconda dependencies: - - python=2.7 + - python=3.7 - numpy - scipy - matplotlib @@ -11,7 +10,7 @@ dependencies: - pyzmq - pep8 - mpi4py - - pil + - pillow - pyfftw - pip - pip: diff --git a/ptypy/__init__.py b/ptypy/__init__.py index d6cee956f..2f1633878 100644 --- a/ptypy/__init__.py +++ b/ptypy/__init__.py @@ -51,17 +51,17 @@ if not __has_zmq__: __zmq_msg = 'ZeroMQ not found.\nInteraction server & client disabled.\n\ Install python-zmq via the package repositories or with `pip install --user pyzmq`' - verbose.logger.warn(__zmq_msg) + verbose.logger.warning(__zmq_msg) if not __has_mpi4py__: __mpi_msg = 'Message Passaging for Python (mpi4py) not found.\n\ CPU-parallelization disabled.\n\ Install python-mpi4py via the package repositories or with `pip install --user mpi4py`' - verbose.logger.warn(__mpi_msg) + verbose.logger.warning(__mpi_msg) if not __has_matplotlib__: __mpl_msg = 'Plotting for Python (matplotlib) not found.\n\ Plotting disabled.\n\ Install python-matplotlib via the package repositories or with `pip install --user matplotlib`' - verbose.logger.warn(__mpl_msg) + verbose.logger.warning(__mpl_msg) # Start a parameter tree from .utils.descriptor import EvalDescriptor diff --git a/ptypy/core/classes.py b/ptypy/core/classes.py index 3e574fdd0..cd01753de 100644 --- a/ptypy/core/classes.py +++ b/ptypy/core/classes.py @@ -90,7 +90,7 @@ class Base(object): _PREFIX = BASE_PREFIX __slots__ = ['ID','numID','owner','_pool','_recs','_record'] - _fields = [('ID',' 0) - has_weights = (len(weights) > 0) and len(weights.values()[0]) > 0 + has_weights = (len(weights) > 0) and len(list(weights.values())[0]) > 0 if has_data: - dsh = np.array(data.values()[0].shape[-2:]) + dsh = np.array(list(data.values())[0].shape[-2:]) else: dsh = np.array([0, 0]) @@ -822,7 +822,7 @@ def get_data_chunk(self, chunksize, start=None): # Adapt geometric info self.meta.center = cen / float(self.rebin) - self.meta.shape = u.expect2(sh) / self.rebin + self.meta.shape = u.expect2(sh) // self.rebin if self.info.psize is not None: self.meta.psize = u.expect2(self.info.psize) * self.rebin @@ -840,7 +840,7 @@ def get_data_chunk(self, chunksize, start=None): chunk.weights = weights elif has_data: chunk.weights = {} - self.weight2d = weights.values()[0] + self.weight2d = list(weights.values())[0] # Slice positions from common if they are empty too if positions is None or len(positions) == 0: @@ -1115,7 +1115,7 @@ def _mpi_autocenter(self, data, weights): node. """ cen = {} - for k, d in data.iteritems(): + for k, d in data.items(): cen[k] = u.mass_center(d * (weights[k] > 0)) # For some nodes, cen may still be empty. @@ -1125,7 +1125,7 @@ def _mpi_autocenter(self, data, weights): # Now master possesses all calculated centers if parallel.master: - cen = np.array(cen.values()).mean(0) + cen = np.array(list(cen.values())).mean(0) cen = parallel.bcast(cen) return cen @@ -1178,7 +1178,7 @@ def _mpi_save_chunk(self, kind='link', chunk=None): for k in ['data', 'weights']: if k in c.keys(): - if hasattr(c[k], 'iteritems'): + if hasattr(c[k], 'items'): v = c[k] else: v = dict(zip(ind, np.asarray(c[k]))) @@ -1364,7 +1364,7 @@ def check(self, frames=None, start=None): with h5py.File(self.source, 'r') as f: d = {} ch_items = [] - for k, v in f['chunks'].iteritems(): + for k, v in f['chunks'].items(): if v is not None: ch_items.append((int(k), v)) @@ -1424,7 +1424,7 @@ def load(self, indices): # Get our data from the ptyd file out = {} with h5py.File(self.source, 'r') as f: - for array, call in calls.iteritems(): + for array, call in calls.items(): out[array] = [np.squeeze(f[path][slce]) for path, slce in call] f.close() @@ -1434,7 +1434,7 @@ def load(self, indices): # indices = out.get('indices', indices) # Wrap in a dict - for k, v in out.iteritems(): + for k, v in out.items(): out[k] = dict(zip(indices, v)) return (out.get(key, {}) for key in ['data', 'positions', 'weights']) diff --git a/ptypy/core/geometry.py b/ptypy/core/geometry.py index 4c0ade50b..872ccbc28 100644 --- a/ptypy/core/geometry.py +++ b/ptypy/core/geometry.py @@ -165,7 +165,7 @@ def __init__(self, owner=None, ID=None, pars=None, **kwargs): for k, v in p.items(): if k in _old2new.keys(): p[_old2new[k]] = v - for k, v in kwargs.iteritems(): + for k, v in kwargs.items(): if k in p: p[k] = v @@ -372,7 +372,7 @@ def propagator(self): return self._propagator def __str__(self): - keys = self.p.keys() + keys = list(self.p.keys()) keys.sort() start = "" for key in keys: @@ -527,7 +527,7 @@ def update(self, geo_pars=None, **kwargs): p = self.p if geo_pars is not None: p.update(geo_pars) - for k, v in kwargs.iteritems(): + for k, v in kwargs.items(): if k in p: p[k] = v @@ -697,7 +697,7 @@ def update(self, geo_pars=None, **kwargs): p = self.p if geo_pars is not None: p.update(geo_pars) - for k, v in kwargs.iteritems(): + for k, v in kwargs.items(): if k in p: p[k] = v diff --git a/ptypy/core/geometry_bragg.py b/ptypy/core/geometry_bragg.py index 55f8c8982..262c04dea 100644 --- a/ptypy/core/geometry_bragg.py +++ b/ptypy/core/geometry_bragg.py @@ -4,9 +4,9 @@ from .. import utils as u from ..utils.verbose import logger -from geometry import Geo as _Geo +from .geometry import Geo as _Geo from ..utils.descriptor import EvalDescriptor -from classes import Container, Storage, View +from .classes import Container, Storage, View import numpy as np from scipy.ndimage.interpolation import map_coordinates @@ -377,7 +377,7 @@ def coordinate_shift(self, input_storage, input_space='real', # optionally crop the new array if keep_dims: - d = d[:, pad / 2:shape[1] + pad / 2, :] + d = d[:, pad // 2:shape[1] + pad // 2, :] # construct a new Storage if input_system == 'cartesian': new_psize = S.psize * np.array([1 / self.costheta, 1, 1]) @@ -413,7 +413,7 @@ def coordinate_shift(self, input_storage, input_space='real', d[:, i, :] = np.roll(d[:, i, :], shift, axis=0) # optionally crop the new array if keep_dims: - d = d[pad / 2:shape[0] + pad / 2, :, :] + d = d[pad // 2:shape[0] + pad // 2, :, :] # construct a new Storage if input_system == 'cartesian': new_psize = S.psize * np.array([1, 1 / self.costheta, 1]) @@ -467,8 +467,8 @@ def prepare_3d_probe(self, S_2d, auto_center=False, system='cartesian', layer=0) # center both storages (meaning that the central pixel is the # physical origin) - S_3d.center = np.array(S_3d.shape[1:]) / 2 - S_2d.center = np.array(S_2d.shape[1:]) / 2 + S_3d.center = np.array(S_3d.shape[1:]) // 2 + S_2d.center = np.array(S_2d.shape[1:]) // 2 # find the physical coordinates (zi, yi) of each point in the 3d probe if system == 'natural': diff --git a/ptypy/core/illumination.py b/ptypy/core/illumination.py index 85d2973e2..ce4430400 100644 --- a/ptypy/core/illumination.py +++ b/ptypy/core/illumination.py @@ -310,7 +310,7 @@ def init_storage(storage, pars, energy=None, **kwargs): p = DEFAULT.copy(depth=3) model = None - if hasattr(pars, 'items') or hasattr(pars, 'iteritems'): + if hasattr(pars, 'items') or hasattr(pars, 'items'): # This is a dict p.update(pars, in_place_depth=3) diff --git a/ptypy/core/manager.py b/ptypy/core/manager.py index 553f29a4e..b3b403b1f 100644 --- a/ptypy/core/manager.py +++ b/ptypy/core/manager.py @@ -586,7 +586,7 @@ def _initialize_probe(self, probe_ids): logger.info('\n'+headerline('Probe initialization', 'l')) # pick storage from container, there's only one probe - pid = probe_ids.keys()[0] + pid = list(probe_ids.keys())[0] s = self.ptycho.probe.S.get(pid) logger.info('Initializing probe storage %s' % pid) @@ -605,7 +605,7 @@ def _initialize_object(self, object_ids): logger.info('\n'+headerline('Object initialization', 'l')) # pick storage from container, there's only one object - oid = object_ids.keys()[0] + oid = list(object_ids.keys())[0] s = self.ptycho.obj.S.get(oid) logger.info('Initializing probe storage %s' % oid) @@ -708,8 +708,8 @@ def _create_pods(self): label = self.label # Get a list of probe and object that already exist - existing_probes = self.ptycho.probe.storages.keys() - existing_objects = self.ptycho.obj.storages.keys() + existing_probes = list(self.ptycho.probe.storages.keys()) + existing_objects = list(self.ptycho.obj.storages.keys()) logger.info('Found these probes : ' + ', '.join(existing_probes)) logger.info('Found these objects: ' + ', '.join(existing_objects)) @@ -880,7 +880,7 @@ def _initialize_probe(self, probe_ids): logger.info('Found no photon count for probe in parameters.\nUsing photon count %.2e from photon report' % phot_max) illu_pars['photons'] = phot_max elif np.abs(np.log10(phot)-np.log10(phot_max)) > 1: - logger.warn('Photon count from input parameters (%.2e) differs from statistics (%.2e) by more than a magnitude' % (phot, phot_max)) + logger.warning('Photon count from input parameters (%.2e) differs from statistics (%.2e) by more than a magnitude' % (phot, phot_max)) if (self.p.coherence.num_probe_modes>1) and (type(illu_pars) is not np.ndarray): @@ -944,7 +944,7 @@ def _initialize_object(self, object_ids): Full.DEFAULT = defaults_tree['scan.Full'].make_default(99) -import geometry_bragg +from . import geometry_bragg defaults_tree['scan'].add_child(EvalDescriptor('Bragg3dModel')) defaults_tree['scan.Bragg3dModel'].add_child(illumination.illumination_desc, copy=True) defaults_tree['scan.Bragg3dModel.illumination'].prune_child('diversity') @@ -1046,7 +1046,7 @@ def _mpi_redistribute_raw_frames(self, dp): # now we can work out which node should own a certain position def __node(pos): - return int((pos - lims[0]) / domain_width) + return (pos - lims[0]) // domain_width # work out which node should have each of my buffered frames N = parallel.size @@ -1066,7 +1066,7 @@ def __node(pos): if receiver == parallel.rank: continue lst = [] - for idx, rec in senditems.iteritems(): + for idx, rec in senditems.items(): if rec == receiver: lst.append(dp['iterable'][idx]) parallel.send(lst, dest=receiver) @@ -1120,7 +1120,7 @@ def _make_3d_data_package(self): complete 3d positions. """ dp_new = {'iterable': []} - for idx, dct in self.buffered_frames.iteritems(): + for idx, dct in self.buffered_frames.items(): if len(dct['angles']) == self.geometries[0].shape[0]: # this one is ready to go logger.debug('3d diffraction data for position %d ready, will create POD' % idx) @@ -1223,7 +1223,7 @@ def _initialize_probe(self, probe_ids): logger.info('\n'+headerline('Probe initialization', 'l')) # pick storage from container, there's only one probe - pid = probe_ids.keys()[0] + pid = list(probe_ids.keys())[0] s = self.ptycho.probe.S.get(pid) logger.info('Initializing probe storage %s' % pid) @@ -1272,7 +1272,7 @@ def __init__(self, ptycho, pars): # Create scan model objects self.scans = OrderedDict() - for label, scan_pars in pars.iteritems(): + for label, scan_pars in pars.items(): # find out which scan model class to instantiate if scan_pars.name in u.all_subclasses(ScanModel, names=True): cls = eval(scan_pars.name) @@ -1294,7 +1294,7 @@ def _from_dict(cls, dct): @property def data_available(self): - return any(s.data_available for s in self.scans.values()) + return any(s.data_available for s in list(self.scans.values())) def new_data(self): """ @@ -1310,5 +1310,5 @@ def new_data(self): logger.info('Processing new data.') # Attempt to get new data - for label, scan in self.scans.iteritems(): + for label, scan in self.scans.items(): new_data = scan.new_data() diff --git a/ptypy/core/paths.py b/ptypy/core/paths.py index 943bf3596..caf86d292 100644 --- a/ptypy/core/paths.py +++ b/ptypy/core/paths.py @@ -113,6 +113,6 @@ def get_path(self, path, runtime): if __name__ == "__main__": pa = Paths() - print pa.auto_file() - print pa.plot_file() - print pa.recon_file() + print(pa.auto_file()) + print(pa.plot_file()) + print(pa.recon_file()) diff --git a/ptypy/core/ptycho.py b/ptypy/core/ptycho.py index 303cb5651..bae668db2 100644 --- a/ptypy/core/ptycho.py +++ b/ptypy/core/ptycho.py @@ -9,7 +9,7 @@ """ import numpy as np import time -import paths +from . import paths from collections import OrderedDict from .. import utils as u @@ -425,7 +425,7 @@ def init_communication(self): port = self.interactor.activate() if port is None: - logger.warn('Interaction server initialization failed. ' + logger.warning('Interaction server initialization failed. ' 'Continuing without server.') self.interactor = None self.plotter = None @@ -700,7 +700,7 @@ def finalize(self): citation_info = '\n'.join([headerline('This reconstruction relied on the following work', 'l', '='), str(self.citations), headerline('', 'l', '=')]) - logger.warn(citation_info) + logger.warning(citation_info) @classmethod def _from_dict(cls, dct): @@ -727,7 +727,7 @@ def load_run(cls, runfile, load_data=True): P : Ptycho Ptycho instance with ``level == 2`` """ - import save_load + from . import save_load from .. import io # Determine if this is a .pty file @@ -772,7 +772,7 @@ def load_run(cls, runfile, load_data=True): logger.info('Regenerating exit waves') P.exit.reformat() - P.model._initialize_exit(P.pods.values()) + P.model._initialize_exit(list(P.pods.values())) if load_data: logger.info('Loading data') @@ -798,7 +798,7 @@ def save_run(self, alt_file=None, kind='minimal', force_overwrite=True): - *'full_flat'*, (almost) complete environment """ - import save_load + from . import save_load from .. import io dest_file = None @@ -816,13 +816,13 @@ def save_run(self, alt_file=None, kind='minimal', force_overwrite=True): import os if os.path.exists(dest_file): if force_overwrite: - logger.warn('Save file exists but will be overwritten ' + logger.warning('Save file exists but will be overwritten ' '(force_overwrite is True)') elif not force_overwrite: raise RuntimeError('File %s exists! Operation cancelled.' % dest_file) elif force_overwrite is None: - ans = raw_input('File %s exists! Overwrite? [Y]/N' + ans = input('File %s exists! Overwrite? [Y]/N' % dest_file) if ans and ans.upper() != 'Y': raise RuntimeError('Operation cancelled by user.') @@ -876,7 +876,7 @@ def save_run(self, alt_file=None, kind='minimal', force_overwrite=True): try: defaults_tree['ptycho'].validate(self.p) # check the parameters are actually able to be read back in except RuntimeError: - logger.warn("The parameters we are saving won't pass a validator check!") + logger.warning("The parameters we are saving won't pass a validator check!") dump.pars = self.p.copy() # _to_dict(Recursive=True) dump.runtime = self.runtime.copy() # Discard some bits of runtime to save space @@ -907,7 +907,7 @@ def save_run(self, alt_file=None, kind='minimal', force_overwrite=True): try: defaults_tree['ptycho'].validate(self.p) # check the parameters are actually able to be read back in except RuntimeError: - logger.warn("The parameters we are saving won't pass a validator check!") + logger.warning("The parameters we are saving won't pass a validator check!") minimal.pars = self.p.copy() # _to_dict(Recursive=True) minimal.runtime = self.runtime.copy() @@ -938,7 +938,7 @@ def print_stats(self, table_format=None, detail='summary'): '-' * 80 + '\n'] header = True - for ID, C in self.containers.iteritems(): + for ID, C in self.containers.items(): info.append(C.formatted_report(table_format, offset, include_header=header)) @@ -947,7 +947,7 @@ def print_stats(self, table_format=None, detail='summary'): info.append('\n') if str(detail) != 'summary': - for ID, C in self.containers.iteritems(): + for ID, C in self.containers.items(): info.append(C.report()) logger.info(''.join(info), extra={'allprocesses': True}) diff --git a/ptypy/core/sample.py b/ptypy/core/sample.py index 6173c897d..a425953e9 100644 --- a/ptypy/core/sample.py +++ b/ptypy/core/sample.py @@ -196,7 +196,7 @@ def init_storage(storage, sample_pars=None, energy=None): sam = sample_pars p = DEFAULT.copy(depth=3) model = None - if hasattr(sam, 'items') or hasattr(sam, 'iteritems'): + if hasattr(sam, 'items') or hasattr(sam, 'items'): # This is a dict p.update(sam, in_place_depth=3) diff --git a/ptypy/core/save_load.py b/ptypy/core/save_load.py index f4a93e834..24f050f1b 100644 --- a/ptypy/core/save_load.py +++ b/ptypy/core/save_load.py @@ -55,16 +55,11 @@ def _pool(obj): # If object contains references, make shallow copy and # recursively iterate over the copy - # if type(obj) in _dict_like: - # pool[ID] = dict(obj) - # nobj = pool[ID] - # for k, v in nobj.iteritems(): - # nobj[k] = _pool(v) - if hasattr(obj, 'iteritems'): + if hasattr(obj, 'items'): # pool[ID] = {} nobj = {} pool[ID] = nobj - for k, v in obj.iteritems(): + for k, v in obj.items(): # pool[ID][k] = _pool(v) nobj[k] = _pool(v) @@ -111,7 +106,7 @@ def link(pool, replace_objects_only=False, preserve_input_pool=True): pool = pool.copy() # First replace all occurrences of object dictionaries with their # respective objects. Since all objects appear only once, this is a safe. - for k, v in pool.iteritems(): + for k, v in pool.items(): # At this point we can make copies of objects, # since they are uniquely referenced here if preserve_input_pool: @@ -138,17 +133,16 @@ def link(pool, replace_objects_only=False, preserve_input_pool=True): return pool calls = [] - keys = pool.keys() + keys = list(pool.keys()) def _unpool(obj): calls.append(None) - # for k,v in pool.iteritems(): if str(obj) in keys: # Replace key by object. As these keys ALWAYS refer to objects # and not to other keys, no further checking is needed obj = pool[obj] if type(obj) in _dict_like and id(obj) not in used: used.append(id(obj)) - for k, v in obj.iteritems(): + for k, v in obj.items(): obj[k] = _unpool(v) elif type(obj) in _list_like and id(obj) not in used: used.append(id(obj)) @@ -156,7 +150,7 @@ def _unpool(obj): obj[k] = _unpool(v) elif type(obj) in _ptypy and id(obj) not in used: used.append(id(obj)) - for k, v in obj.__dict__.iteritems(): + for k, v in obj.__dict__.items(): obj.__dict__[k]= _unpool(v) return obj diff --git a/ptypy/core/xy.py b/ptypy/core/xy.py index 3aeddebcf..e5899b02d 100644 --- a/ptypy/core/xy.py +++ b/ptypy/core/xy.py @@ -79,7 +79,7 @@ def from_pars(xypars=None): """ p = DEFAULT.copy(depth=3) model = None - if hasattr(xypars, 'items') or hasattr(xypars, 'iteritems'): + if hasattr(xypars, 'items') or hasattr(xypars, 'items'): # This is a dict p.update(xypars, in_place_depth=3) elif xypars is None: @@ -109,7 +109,7 @@ def from_pars(xypars=None): pos = np.asarray(p.model) elif p.model == 'round': e, l, s = _complete(p.extent, p.steps, p.spacing) - pos = round_scan(s[0], l[0]/2) + pos = round_scan(s[0], l[0]//2) elif p.model == 'spiral': e, l, s = _complete(p.extent, p.steps, p.spacing) pos = spiral_scan(s[0], e[0]/2) @@ -292,7 +292,7 @@ def spiral_scan(dr=1.5e-6, r=7.5e-6, maxpts=None): maxpts = 100000 positions = [] - for k in xrange(maxpts): + for k in range(maxpts): theta = alpha * np.sqrt(k) rr = beta * theta if rr > r: diff --git a/ptypy/engines/Bragg3d_engines.py b/ptypy/engines/Bragg3d_engines.py index d3f3aa858..2314eccab 100644 --- a/ptypy/engines/Bragg3d_engines.py +++ b/ptypy/engines/Bragg3d_engines.py @@ -129,7 +129,7 @@ def object_update(self): return # access object storage and geometry through any active pod - for name, pod in self.pods.iteritems(): + for name, pod in self.pods.items(): if pod.active: break geo = pod.geometry @@ -200,7 +200,7 @@ def object_update(self): shigh = s[len(sprofile) - 1] for i in range(1, len(sprofile)-icenter-1): if parallel.master: - print sprofile[icenter+i] / sprofile[icenter] + print(sprofile[icenter+i] / sprofile[icenter]) if (sprofile[icenter+i] / sprofile[icenter] < cutoff or (self.p.sample_support.shrinkwrap.monotonic and sprofile[icenter+i] > sprofile[icenter+i-1])): @@ -243,7 +243,7 @@ def _fill_runtime(self): try: assert self.p.sample_support.shrinkwrap.plot - except AttributeError, AssertionError: + except (AttributeError, AssertionError): return try: diff --git a/ptypy/engines/DM.py b/ptypy/engines/DM.py index e414541cb..1aace76c6 100644 --- a/ptypy/engines/DM.py +++ b/ptypy/engines/DM.py @@ -179,14 +179,14 @@ def engine_prepare(self): """ self.pbound = {} mean_power = 0. - for name, s in self.di.storages.iteritems(): + for name, s in self.di.storages.items(): self.pbound[name] = ( .25 * self.p.fourier_relax_factor**2 * s.pbound_stub) mean_power += s.mean_power self.mean_power = mean_power / len(self.di.storages) # Fill object with coverage of views - for name, s in self.ob_viewcover.storages.iteritems(): + for name, s in self.ob_viewcover.storages.items(): s.fill(s.get_view_coverage()) def engine_iterate(self, num=1): @@ -255,7 +255,7 @@ def fourier_update(self): DM Fourier constraint update (including DM step). """ error_dct = {} - for name, di_view in self.di.views.iteritems(): + for name, di_view in self.di.views.items(): if not di_view.active: continue pbound = self.pbound[di_view.storage.ID] @@ -298,7 +298,7 @@ def overlap_update(self): def center_probe(self): if self.p.probe_center_tol is not None: - for name, s in self.pr.storages.iteritems(): + for name, s in self.pr.storages.items(): c1 = u.mass_center(u.abs2(s.data).sum(0)) c2 = np.asarray(s.shape[-2:]) // 2 # fft convention should however use geometry instead @@ -325,7 +325,7 @@ def object_update(self): ob.fill(0.0) ob_nrm.fill(0.) else: - for name, s in self.ob.storages.iteritems(): + for name, s in self.ob.storages.items(): # The amplitude of the regularization term has to be scaled with the # power of the probe (which is estimated from the power in diffraction patterns). # This estimate assumes that the probe power is uniformly distributed through the @@ -346,14 +346,14 @@ def object_update(self): ob_nrm.storages[name].fill(cfact) # DM update per node - for name, pod in self.pods.iteritems(): + for name, pod in self.pods.items(): if not pod.active: continue pod.object += pod.probe.conj() * pod.exit * pod.object_weight ob_nrm[pod.ob_view] += u.cabs2(pod.probe) * pod.object_weight # Distribute result with MPI - for name, s in self.ob.storages.iteritems(): + for name, s in self.ob.storages.items(): # Get the np arrays nrm = ob_nrm.storages[name].data parallel.allreduce(s.data) @@ -387,7 +387,7 @@ def probe_update(self): # BE: was this asymmetric in original code # only because of the number of MPI nodes ? if parallel.master: - for name, s in pr.storages.iteritems(): + for name, s in pr.storages.items(): # Instead of Npts_scan, the number of views should be considered # Please note that a call to s.views may be # slow for many views in the probe. @@ -399,7 +399,7 @@ def probe_update(self): pr_nrm.fill(0.0) # DM update per node - for name, pod in self.pods.iteritems(): + for name, pod in self.pods.items(): if not pod.active: continue pod.probe += pod.object.conj() * pod.exit * pod.probe_weight @@ -408,7 +408,7 @@ def probe_update(self): change = 0. # Distribute result with MPI - for name, s in pr.storages.iteritems(): + for name, s in pr.storages.items(): # MPI reduction of results nrm = pr_nrm.storages[name].data parallel.allreduce(s.data) diff --git a/ptypy/engines/DM_simple.py b/ptypy/engines/DM_simple.py index aedd3ee73..c99bb3ff2 100644 --- a/ptypy/engines/DM_simple.py +++ b/ptypy/engines/DM_simple.py @@ -107,7 +107,7 @@ def engine_iterate(self, num): # fourier update error_dct = {} - for name, di_view in self.di.V.iteritems(): + for name, di_view in self.di.V.items(): if not di_view.active: continue error_dct[name] = basic_fourier_update( @@ -165,14 +165,14 @@ def object_update(self): self.ob_nrm.fill(0.) # DM update per node: sum over all the positions - for name, pod in self.pods.iteritems(): + for name, pod in self.pods.items(): if not pod.active: continue pod.object += pod.probe.conj() * pod.exit self.ob_nrm[pod.ob_view] += u.cabs2(pod.probe) # Distribute result with MPI - for name, s in self.ob.S.iteritems(): + for name, s in self.ob.S.items(): nrm = self.ob_nrm.S[name].data parallel.allreduce(s.data) parallel.allreduce(nrm) @@ -188,7 +188,7 @@ def probe_update(self): self.pr_nrm.fill(0.0) # DM update per node: sum over all the positions - for name, pod in self.pods.iteritems(): + for name, pod in self.pods.items(): if not pod.active: continue pod.probe += pod.object.conj() * pod.exit @@ -196,7 +196,7 @@ def probe_update(self): # Distribute result with MPI and keep track of the overlap convergence. change = 0. - for name, s in self.pr.S.iteritems(): + for name, s in self.pr.S.items(): # MPI reduction of results nrm = self.pr_nrm.S[name].data parallel.allreduce(s.data) diff --git a/ptypy/engines/ML.py b/ptypy/engines/ML.py index cea7b65ba..87c1f95a6 100644 --- a/ptypy/engines/ML.py +++ b/ptypy/engines/ML.py @@ -183,7 +183,7 @@ def engine_prepare(self): when new data arrives. """ # - # fill object with coverage of views - # - for name,s in self.ob_viewcover.S.iteritems(): + # - for name,s in self.ob_viewcover.S.items(): # - s.fill(s.get_view_coverage()) pass @@ -204,7 +204,7 @@ def engine_iterate(self, num=1): if self.p.probe_update_start <= self.curiter: # Apply probe support if needed - for name, s in new_pr_grad.storages.iteritems(): + for name, s in new_pr_grad.storages.items(): support = self.probe_support.get(name) if support is not None: s.data *= support @@ -214,7 +214,7 @@ def engine_iterate(self, num=1): # Smoothing preconditioner if self.smooth_gradient: self.smooth_gradient.sigma *= (1. - self.p.smooth_gradient_decay) - for name, s in new_ob_grad.storages.iteritems(): + for name, s in new_ob_grad.storages.items(): s.data[:] = self.smooth_gradient(s.data) # probe/object rescaling @@ -260,7 +260,7 @@ def engine_iterate(self, num=1): # Smoothing preconditioner if self.smooth_gradient: - for name, s in self.ob_h.storages.iteritems(): + for name, s in self.ob_h.storages.items(): s.data[:] -= self.smooth_gradient(self.ob_grad.storages[name].data) else: self.ob_h -= self.ob_grad @@ -374,7 +374,7 @@ def __del__(self): del self.pr_grad # Remove working attributes - for name, diff_view in self.di.views.iteritems(): + for name, diff_view in self.di.views.items(): if not diff_view.active: continue try: @@ -416,7 +416,7 @@ def __init__(self, MLengine): self.weights = self.engine.di.copy(self.engine.di.ID + '_weights') # FIXME: This part needs to be updated once statistical weights are properly # supported in the data preparation. - for name, di_view in self.di.views.iteritems(): + for name, di_view in self.di.views.items(): if not di_view.active: continue self.weights[di_view] = (self.Irenorm * di_view.pod.ma_view.data @@ -445,7 +445,7 @@ def new_grad(self): error_dct = {} # Outer loop: through diffraction patterns - for dname, diff_view in self.di.views.iteritems(): + for dname, diff_view in self.di.views.items(): if not diff_view.active: continue @@ -457,7 +457,7 @@ def new_grad(self): f = {} # First pod loop: compute total intensity - for name, pod in diff_view.pods.iteritems(): + for name, pod in diff_view.pods.items(): if not pod.active: continue f[name] = pod.fw(pod.probe * pod.object) @@ -473,7 +473,7 @@ def new_grad(self): # Second pod loop: gradients computation LLL = np.sum((w * DI**2).astype(np.float64)) - for name, pod in diff_view.pods.iteritems(): + for name, pod in diff_view.pods.items(): if not pod.active: continue xi = pod.bw(w * DI * f[name]) @@ -491,7 +491,7 @@ def new_grad(self): # Object regularizer if self.regularizer: - for name, s in self.ob.storages.iteritems(): + for name, s in self.ob.storages.items(): self.ob_grad.storages[name].data += self.regularizer.grad( s.data) LL += self.regularizer.LL @@ -510,7 +510,7 @@ def poly_line_coeffs(self, ob_h, pr_h): Brenorm = 1. / self.LL[0]**2 # Outer loop: through diffraction patterns - for dname, diff_view in self.di.views.iteritems(): + for dname, diff_view in self.di.views.items(): if not diff_view.active: continue @@ -522,7 +522,7 @@ def poly_line_coeffs(self, ob_h, pr_h): A1 = None A2 = None - for name, pod in diff_view.pods.iteritems(): + for name, pod in diff_view.pods.items(): if not pod.active: continue f = pod.fw(pod.probe * pod.object) @@ -554,7 +554,7 @@ def poly_line_coeffs(self, ob_h, pr_h): # Object regularizer if self.regularizer: - for name, s in self.ob.storages.iteritems(): + for name, s in self.ob.storages.items(): B += Brenorm * self.regularizer.poly_line_coeffs( ob_h.storages[name].data, s.data) @@ -575,7 +575,7 @@ def __init__(self, MLengine): BaseModel.__init__(self, MLengine) from scipy import special self.LLbase = {} - for name, di_view in self.di.views.iteritems(): + for name, di_view in self.di.views.items(): if not di_view.active: continue self.LLbase[name] = special.gammaln(di_view.data+1).sum() @@ -595,7 +595,7 @@ def new_grad(self): error_dct = {} # Outer loop: through diffraction patterns - for dname, diff_view in self.di.views.iteritems(): + for dname, diff_view in self.di.views.items(): if not diff_view.active: continue @@ -607,7 +607,7 @@ def new_grad(self): f = {} # First pod loop: compute total intensity - for name, pod in diff_view.pods.iteritems(): + for name, pod in diff_view.pods.items(): if not pod.active: continue f[name] = pod.fw(pod.probe * pod.object) @@ -623,7 +623,7 @@ def new_grad(self): # Second pod loop: gradients computation LLL = self.LLbase[dname] + (m * (Imodel - I * np.log(Imodel))).sum().astype(np.float64) - for name, pod in diff_view.pods.iteritems(): + for name, pod in diff_view.pods.items(): if not pod.active: continue xi = pod.bw(DI * f[name]) @@ -641,7 +641,7 @@ def new_grad(self): # Object regularizer if self.regularizer: - for name, s in self.ob.storages.iteritems(): + for name, s in self.ob.storages.items(): self.ob_grad.storages[name].data += self.regularizer.grad( s.data) LL += self.regularizer.LL @@ -659,7 +659,7 @@ def poly_line_coeffs(self, ob_h, pr_h): Brenorm = 1/(self.tot_measpts * self.LL[0])**2 # Outer loop: through diffraction patterns - for dname, diff_view in self.di.views.iteritems(): + for dname, diff_view in self.di.views.items(): if not diff_view.active: continue @@ -671,7 +671,7 @@ def poly_line_coeffs(self, ob_h, pr_h): A1 = None A2 = None - for name, pod in diff_view.pods.iteritems(): + for name, pod in diff_view.pods.items(): if not pod.active: continue f = pod.fw(pod.probe * pod.object) @@ -705,7 +705,7 @@ def poly_line_coeffs(self, ob_h, pr_h): # Object regularizer if self.regularizer: - for name, s in self.ob.storages.iteritems(): + for name, s in self.ob.storages.items(): B += Brenorm * self.regularizer.poly_line_coeffs( ob_h.storages[name].data, s.data) @@ -789,7 +789,7 @@ def prepare_smoothing_preconditioner(amplitude): if amplitude == 0.: return None - class GaussFilt: + class GaussFilt(object): def __init__(self, sigma): self.sigma = sigma diff --git a/ptypy/engines/MLtmp.py b/ptypy/engines/MLtmp.py deleted file mode 100644 index f76ec81ce..000000000 --- a/ptypy/engines/MLtmp.py +++ /dev/null @@ -1,659 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Maximum Likelihood reconstruction engine. - -TODO: - * Implement other regularizers - -This file is part of the PTYPY package. - - :copyright: Copyright 2014 by the PTYPY team, see AUTHORS. - :license: GPLv2, see LICENSE for details. -""" -import numpy as np -import time -from .. import utils as u -from ..utils.verbose import logger -from ..utils import parallel -from utils import Cnorm2, Cdot -from . import BaseEngine -from .. import defaults_tree - -__all__ = ['ML'] - -@register() -class ML(BaseEngine): - """ - Maximum likelihood reconstruction engine. - - - Defaults: - - [name] - default = ML - type = str - help = - doc = - - [ML_type] - default = 'gaussian' - type = str - help = Likelihood model - choices = ['gaussian','poisson','euclid'] - doc = One of ‘gaussian’, poisson’ or ‘euclid’. Only 'gaussian' is implemented. - - [floating_intensities] - default = False - type = bool - help = Adaptive diffraction pattern rescaling - doc = If True, allow for adaptative rescaling of the diffraction pattern intensities (to correct for incident beam intensity fluctuations). - - [intensity_renormalization] - default = 1. - type = float - lowlim = 0.0 - help = Rescales the intensities so they can be interpreted as Poisson counts. - - [reg_del2] - default = False - type = bool - help = Whether to use a Gaussian prior (smoothing) regularizer - - [reg_del2_amplitude] - default = .01 - type = float - lowlim = 0.0 - help = Amplitude of the Gaussian prior if used - - [smooth_gradient] - default = 0.0 - type = float - help = Smoothing preconditioner - doc = Sigma for gaussian filter (turned off if 0.) - - [smooth_gradient_decay] - default = 0. - type = float - help = Decay rate for smoothing preconditioner - doc = Sigma for gaussian filter will reduce exponentially at this rate - - [scale_precond] - default = False - type = bool - help = Whether to use the object/probe scaling preconditioner - doc = This parameter can give faster convergence for weakly scattering samples. - - [scale_probe_object] - default = 1. - type = float - lowlim = 0.0 - help = Relative scale of probe to object - - [probe_update_start] - default = 2 - type = int - lowlim = 0 - help = Number of iterations before probe update starts - - """ - - def __init__(self, ptycho_parent, pars=None): - """ - Maximum likelihood reconstruction engine. - """ - super(ML, self).__init__(ptycho_parent, pars) - - p = self.DEFAULT.copy() - if pars is not None: - p.update(pars) - self.p = p - - # Instance attributes - - # Object gradient - self.ob_grad = None - - # Object minimization direction - self.ob_h = None - - # Probe gradient - self.pr_grad = None - - # Probe minimization direction - self.pr_h = None - - # Other - self.tmin = None - self.ML_model = None - self.smooth_gradient = None - self.scale_p_o = None - self.scale_p_o_memory = .9 - - def engine_initialize(self): - """ - Prepare for ML reconstruction. - """ - - # Object gradient and minimization direction - self.ob_grad = self.ob.copy(self.ob.ID + '_grad', fill=0.) - self.ob_h = self.ob.copy(self.ob.ID + '_h', fill=0.) - - # Probe gradient and minimization direction - self.pr_grad = self.pr.copy(self.pr.ID + '_grad', fill=0.) - self.pr_h = self.pr.copy(self.pr.ID + '_h', fill=0.) - - self.tmin = 1. - - # Create noise model - if self.p.ML_type.lower() == "gaussian": - self.ML_model = ML_Gaussian(self) - elif self.p.ML_type.lower() == "poisson": - self.ML_model = ML_Gaussian(self) - elif self.p.ML_type.lower() == "euclid": - self.ML_model = ML_Gaussian(self) - else: - raise RuntimeError("Unsupported ML_type: '%s'" % self.p.ML_type) - - # Other options - self.smooth_gradient = prepare_smoothing_preconditioner( - self.p.smooth_gradient) - - def engine_prepare(self): - """ - Last minute initialization, everything, that needs to be recalculated, - when new data arrives. - """ - # - # fill object with coverage of views - # - for name,s in self.ob_viewcover.S.iteritems(): - # - s.fill(s.get_view_coverage()) - pass - - def engine_iterate(self, num=1): - """ - Compute `num` iterations. - """ - ######################## - # Compute new gradient - ######################## - tg = 0. - tc = 0. - ta = time.time() - for it in range(num): - t1 = time.time() - new_ob_grad, new_pr_grad, error_dct = self.ML_model.new_grad() - tg += time.time() - t1 - - if self.p.probe_update_start <= self.curiter: - # Apply probe support if needed - for name, s in new_pr_grad.storages.iteritems(): - support = self.probe_support.get(name) - if support is not None: - s.data *= support - else: - new_pr_grad.fill(0.) - - # Smoothing preconditioner - if self.smooth_gradient: - self.smooth_gradient.sigma *= (1. - self.p.smooth_gradient_decay) - for name, s in new_ob_grad.storages.iteritems(): - s.data[:] = self.smooth_gradient(s.data) - - # probe/object rescaling - if self.p.scale_precond: - cn2_new_pr_grad = Cnorm2(new_pr_grad) - if cn2_new_pr_grad > 1e-5: - scale_p_o = (self.p.scale_probe_object * Cnorm2(new_ob_grad) - / Cnorm2(new_pr_grad)) - else: - scale_p_o = self.p.scale_probe_object - if self.scale_p_o is None: - self.scale_p_o = scale_p_o - else: - self.scale_p_o = self.scale_p_o ** self.scale_p_o_memory - self.scale_p_o *= scale_p_o ** (1-self.scale_p_o_memory) - logger.debug('Scale P/O: %6.3g' % scale_p_o) - else: - self.scale_p_o = self.p.scale_probe_object - - ############################ - # Compute next conjugate - ############################ - if self.curiter == 0: - bt = 0. - else: - bt_num = (self.scale_p_o - * (Cnorm2(new_pr_grad) - - np.real(Cdot(new_pr_grad, self.pr_grad))) - + (Cnorm2(new_ob_grad) - - np.real(Cdot(new_ob_grad, self.ob_grad)))) - - bt_denom = self.scale_p_o*Cnorm2(self.pr_grad) + Cnorm2(self.ob_grad) - - bt = max(0, bt_num/bt_denom) - - # verbose(3,'Polak-Ribiere coefficient: %f ' % bt) - - self.ob_grad << new_ob_grad - self.pr_grad << new_pr_grad - """ - for name, s in self.ob_grad.storages.iteritems(): - s.data[:] = new_ob_grad.storages[name].data - for name, s in self.pr_grad.storages.iteritems(): - s.data[:] = new_pr_grad.storages[name].data - """ - # 3. Next conjugate - self.ob_h *= bt / self.tmin - - # Smoothing preconditioner - if self.smooth_gradient: - for name, s in self.ob_h.storages.iteritems(): - s.data[:] -= self.smooth_gradient(self.ob_grad.storages[name].data) - else: - self.ob_h -= self.ob_grad - self.pr_h *= bt / self.tmin - self.pr_grad *= self.scale_p_o - self.pr_h -= self.pr_grad - """ - for name,s in self.ob_h.storages.iteritems(): - s.data *= bt - s.data -= self.ob_grad.storages[name].data - - for name,s in self.pr_h.storages.iteritems(): - s.data *= bt - s.data -= scale_p_o * self.pr_grad.storages[name].data - """ - # 3. Next conjugate - # ob_h = self.ob_h - # ob_h *= bt - - # Smoothing preconditioner not implemented. - # if self.smooth_gradient: - # ob_h -= object_smooth_filter(grad_obj) - # else: - # ob_h -= ob_grad - - # ob_h -= ob_grad - # pr_h *= bt - # pr_h -= scale_p_o * pr_grad - - # Minimize - for now always use quadratic approximation - # (i.e. single Newton-Raphson step) - # In principle, the way things are now programmed this part - # could be iterated over in a real NR style. - t2 = time.time() - B = self.ML_model.poly_line_coeffs(self.ob_h, self.pr_h) - tc += time.time() - t2 - - if np.isinf(B).any() or np.isnan(B).any(): - logger.warning( - 'Warning! inf or nan found! Trying to continue...') - B[np.isinf(B)] = 0. - B[np.isnan(B)] = 0. - - self.tmin = -.5 * B[1] / B[2] - self.ob_h *= self.tmin - self.pr_h *= self.tmin - self.ob += self.ob_h - self.pr += self.pr_h - """ - for name,s in self.ob.storages.iteritems(): - s.data += tmin*self.ob_h.storages[name].data - for name,s in self.pr.storages.iteritems(): - s.data += tmin*self.pr_h.storages[name].data - """ - # Newton-Raphson loop would end here - - # increase iteration counter - self.curiter +=1 - - logger.info('Time spent in gradient calculation: %.2f' % tg) - logger.info(' .... in coefficient calculation: %.2f' % tc) - return error_dct # np.array([[self.ML_model.LL[0]] * 3]) - - def engine_finalize(self): - """ - Delete temporary containers. - """ - del self.ptycho.containers[self.ob_grad.ID] - del self.ob_grad - del self.ptycho.containers[self.ob_h.ID] - del self.ob_h - del self.ptycho.containers[self.pr_grad.ID] - del self.pr_grad - del self.ptycho.containers[self.pr_h.ID] - del self.pr_h - - -class ML_Gaussian(object): - """ - """ - - def __init__(self, MLengine): - """ - Core functions for ML computation using a Gaussian model. - """ - self.engine = MLengine - - # Transfer commonly used attributes from ML engine - self.di = self.engine.di - self.p = self.engine.p - self.ob = self.engine.ob - self.pr = self.engine.pr - - if self.p.intensity_renormalization is None: - self.Irenorm = 1. - else: - self.Irenorm = self.p.intensity_renormalization - - # Create working variables - # New object gradient - self.ob_grad = self.engine.ob.copy(self.ob.ID + '_ngrad', fill=0.) - # New probe gradient - self.pr_grad = self.engine.pr.copy(self.pr.ID + '_ngrad', fill=0.) - self.LL = 0. - - # Gaussian model requires weights - # TODO: update this part of the code once actual weights are passed in the PODs - self.weights = self.engine.di.copy(self.engine.di.ID + '_weights') - # FIXME: This part needs to be updated once statistical weights are properly - # supported in the data preparation. - for name, di_view in self.di.views.iteritems(): - if not di_view.active: - continue - self.weights[di_view] = (self.Irenorm * di_view.pod.ma_view.data - / (1./self.Irenorm + di_view.data)) - - # Useful quantities - self.tot_measpts = sum(s.data.size - for s in self.di.storages.values()) - self.tot_power = self.Irenorm * sum(s.tot_power - for s in self.di.storages.values()) - # Prepare regularizer - if self.p.reg_del2: - obj_Npix = self.ob.size - expected_obj_var = obj_Npix / self.tot_power # Poisson - reg_rescale = self.tot_measpts / (8. * obj_Npix * expected_obj_var) - logger.debug( - 'Rescaling regularization amplitude using ' - 'the Poisson distribution assumption.') - logger.debug('Factor: %8.5g' % reg_rescale) - reg_del2_amplitude = self.p.reg_del2_amplitude * reg_rescale - self.regularizer = Regul_del2(amplitude=reg_del2_amplitude) - else: - self.regularizer = None - - def __del__(self): - """ - Clean up routine - """ - # Delete containers - del self.engine.ptycho.containers[self.weights.ID] - del self.weights - del self.engine.ptycho.containers[self.ob_grad.ID] - del self.ob_grad - del self.engine.ptycho.containers[self.pr_grad.ID] - del self.pr_grad - - # Remove working attributes - for name, diff_view in self.di.views.iteritems(): - if not diff_view.active: - continue - try: - del diff_view.float_intens_coeff - del diff_view.error - except: - pass - - def new_grad(self): - """ - Compute a new gradient direction according to a Gaussian noise model. - - Note: The negative log-likelihood and local errors are also computed - here. - """ - self.ob_grad.fill(0.) - self.pr_grad.fill(0.) - - # We need an array for MPI - LL = np.array([0.]) - error_dct = {} - - # Outer loop: through diffraction patterns - for dname, diff_view in self.di.views.iteritems(): - if not diff_view.active: - continue - - # Weights and intensities for this view - w = self.weights[diff_view] - I = diff_view.data - - Imodel = np.zeros_like(I) - f = {} - - # First pod loop: compute total intensity - for name, pod in diff_view.pods.iteritems(): - if not pod.active: - continue - f[name] = pod.fw(pod.probe * pod.object) - Imodel += u.abs2(f[name]) - - # Floating intensity option - if self.p.floating_intensities: - diff_view.float_intens_coeff = ((w * Imodel * I).sum() - / (w * Imodel**2).sum()) - Imodel *= diff_view.float_intens_coeff - - DI = Imodel - I - - # Second pod loop: gradients computation - LLL = np.sum((w * DI**2).astype(np.float64)) - for name, pod in diff_view.pods.iteritems(): - if not pod.active: - continue - xi = pod.bw(w * DI * f[name]) - self.ob_grad[pod.ob_view] += 2. * xi * pod.probe.conj() - self.pr_grad[pod.pr_view] += 2. * xi * pod.object.conj() - - # Negative log-likelihood term - # LLL += (w * DI**2).sum() - - # LLL - diff_view.error = LLL - error_dct[dname] = np.array([0, LLL / np.prod(DI.shape), 0]) - LL += LLL - - # MPI reduction of gradients - self.ob_grad.allreduce() - self.pr_grad.allreduce() - """ - for name, s in ob_grad.storages.iteritems(): - parallel.allreduce(s.data) - for name, s in pr_grad.storages.iteritems(): - parallel.allreduce(s.data) - """ - parallel.allreduce(LL) - - # Object regularizer - if self.regularizer: - for name, s in self.ob.storages.iteritems(): - self.ob_grad.storages[name].data += self.regularizer.grad( - s.data) - LL += self.regularizer.LL - - self.LL = LL / self.tot_measpts - - return self.ob_grad, self.pr_grad, error_dct - - def poly_line_coeffs(self, ob_h, pr_h): - """ - Compute the coefficients of the polynomial for line minimization - in direction h - """ - - B = np.zeros((3,), dtype=np.longdouble) - Brenorm = 1. / self.LL[0]**2 - - # Outer loop: through diffraction patterns - for dname, diff_view in self.di.views.iteritems(): - if not diff_view.active: - continue - - # Weights and intensities for this view - w = self.weights[diff_view] - I = diff_view.data - - A0 = None - A1 = None - A2 = None - - for name, pod in diff_view.pods.iteritems(): - if not pod.active: - continue - f = pod.fw(pod.probe * pod.object) - a = pod.fw(pod.probe * ob_h[pod.ob_view] - + pr_h[pod.pr_view] * pod.object) - b = pod.fw(pr_h[pod.pr_view] * ob_h[pod.ob_view]) - - if A0 is None: - A0 = u.abs2(f).astype(np.longdouble) - A1 = 2 * np.real(f * a.conj()).astype(np.longdouble) - A2 = (2 * np.real(f * b.conj()).astype(np.longdouble) - + u.abs2(a).astype(np.longdouble)) - else: - A0 += u.abs2(f) - A1 += 2 * np.real(f * a.conj()) - A2 += 2 * np.real(f * b.conj()) + u.abs2(a) - - if self.p.floating_intensities: - A0 *= diff_view.float_intens_coeff - A1 *= diff_view.float_intens_coeff - A2 *= diff_view.float_intens_coeff - A0 -= I - - B[0] += np.dot(w.flat, (A0**2).flat) * Brenorm - B[1] += np.dot(w.flat, (2 * A0 * A1).flat) * Brenorm - B[2] += np.dot(w.flat, (A1**2 + 2*A0*A2).flat) * Brenorm - - parallel.allreduce(B) - - # Object regularizer - if self.regularizer: - for name, s in self.ob.storages.iteritems(): - B += Brenorm * self.regularizer.poly_line_coeffs( - ob_h.storages[name].data, s.data) - - self.B = B - - return B - -# Regul class does not exist, replace by objectclass -# class Regul_del2(Regul): - - -class Regul_del2(object): - """\ - Squared gradient regularizer (Gaussian prior). - - This class applies to any numpy array. - """ - def __init__(self, amplitude, axes=[-2, -1]): - # Regul.__init__(self, axes) - self.axes = axes - self.amplitude = amplitude - self.delxy = None - self.g = None - self.LL = None - - def grad(self, x): - """ - Compute and return the regularizer gradient given the array x. - """ - ax0, ax1 = self.axes - del_xf = u.delxf(x, axis=ax0) - del_yf = u.delxf(x, axis=ax1) - del_xb = u.delxb(x, axis=ax0) - del_yb = u.delxb(x, axis=ax1) - - self.delxy = [del_xf, del_yf, del_xb, del_yb] - self.g = 2. * self.amplitude*(del_xb + del_yb - del_xf - del_yf) - - self.LL = self.amplitude * (u.norm2(del_xf) - + u.norm2(del_yf) - + u.norm2(del_xb) - + u.norm2(del_yb)) - - return self.g - - def poly_line_coeffs(self, h, x=None): - ax0, ax1 = self.axes - if x is None: - del_xf, del_yf, del_xb, del_yb = self.delxy - else: - del_xf = u.delxf(x, axis=ax0) - del_yf = u.delxf(x, axis=ax1) - del_xb = u.delxb(x, axis=ax0) - del_yb = u.delxb(x, axis=ax1) - - hdel_xf = u.delxf(h, axis=ax0) - hdel_yf = u.delxf(h, axis=ax1) - hdel_xb = u.delxb(h, axis=ax0) - hdel_yb = u.delxb(h, axis=ax1) - - c0 = self.amplitude * (u.norm2(del_xf) - + u.norm2(del_yf) - + u.norm2(del_xb) - + u.norm2(del_yb)) - - c1 = 2 * self.amplitude * np.real(np.vdot(del_xf, hdel_xf) - + np.vdot(del_yf, hdel_yf) - + np.vdot(del_xb, hdel_xb) - + np.vdot(del_yb, hdel_yb)) - - c2 = self.amplitude * (u.norm2(hdel_xf) - + u.norm2(hdel_yf) - + u.norm2(hdel_xb) - + u.norm2(hdel_yb)) - - self.coeff = np.array([c0, c1, c2]) - return self.coeff - - -def prepare_smoothing_preconditioner(amplitude): - """ - Factory for smoothing preconditioner. - """ - if amplitude == 0.: - return None - - class GaussFilt: - def __init__(self, sigma): - self.sigma = sigma - - def __call__(self, x): - return u.c_gf(x, [0, self.sigma, self.sigma]) - - # from scipy.signal import correlate2d - # class HannFilt: - # def __call__(self, x): - # y = np.empty_like(x) - # sh = x.shape - # xf = x.reshape((-1,) + sh[-2:]) - # yf = y.reshape((-1,) + sh[-2:]) - # for i in range(len(xf)): - # yf[i] = correlate2d(xf[i], - # np.array([[.0625, .125, .0625], - # [.125, .25, .125], - # [.0625, .125, .0625]]), - # mode='same') - # return y - - if amplitude > 0.: - logger.debug( - 'Using a smooth gradient filter (Gaussian blur - only for ML)') - return GaussFilt(amplitude) - - elif amplitude < 0.: - raise RuntimeError('Hann filter not implemented (negative smoothing amplitude not supported)') - # logger.debug( - # 'Using a smooth gradient filter (Hann window - only for ML)') - # return HannFilt() diff --git a/ptypy/engines/__init__.py b/ptypy/engines/__init__.py index 72dc96a57..8a45180d7 100644 --- a/ptypy/engines/__init__.py +++ b/ptypy/engines/__init__.py @@ -35,7 +35,7 @@ def by_name(name): raise RuntimeError('Unknown engine: %s' % name) return ENGINES[name] -from base import BaseEngine, DEFAULT_iter_info +from .base import BaseEngine, DEFAULT_iter_info # These imports should be executable separately from . import DM @@ -47,5 +47,5 @@ def by_name(name): # dynamic load, maybe discarded in future -dynamic_load('./', ['BaseEngine', 'PositionCorrectionEngine'] + ENGINES.keys(), True) -dynamic_load('~/.ptypy/', ['BaseEngine', 'PositionCorrectionEngine'] + ENGINES.keys(), True) +dynamic_load('./', ['BaseEngine', 'PositionCorrectionEngine'] + list(ENGINES.keys()), True) +dynamic_load('~/.ptypy/', ['BaseEngine', 'PositionCorrectionEngine'] + list(ENGINES.keys()), True) diff --git a/ptypy/engines/base.py b/ptypy/engines/base.py index 025be4316..cb61af9be 100644 --- a/ptypy/engines/base.py +++ b/ptypy/engines/base.py @@ -137,14 +137,14 @@ def prepare(self): # in the dict self.probe_support supp = self.p.probe_support if supp is not None: - for name, s in self.pr.storages.iteritems(): + for name, s in self.pr.storages.items(): sh = s.data.shape ll, xx, yy = u.grids(sh, FFTlike=False) support = (np.pi * (xx**2 + yy**2) < supp * sh[1] * sh[2]) self.probe_support[name] = support # Make sure all the pods are supported - for label_, pod_ in self.pods.iteritems(): + for label_, pod_ in self.pods.items(): if not pod_.model.__class__ in self.SUPPORTED_MODELS: raise Exception('Model %s not supported by engine' % pod_.model.__class__) @@ -154,6 +154,7 @@ def prepare(self): def iterate(self, num=None): """ Compute one or several iterations. + num : None, int number of iterations. If None or num<1, a single iteration is performed. """ @@ -186,7 +187,7 @@ def iterate(self, num=None): # Check if engine did things right. if it >= self.curiter: - logger.warn("""Engine %s did not increase iteration counter + logger.warning("""Engine %s did not increase iteration counter `self.curiter` internally. Accessing this attribute in that engine is inaccurate""" % self.__class__.__name__) @@ -215,7 +216,7 @@ def iterate(self, num=None): def _fill_runtime(self): local_error = u.parallel.gather_dict(self.error) if local_error: - error = np.array(local_error.values()).mean(0) + error = np.array(list(local_error.values())).mean(0) else: error = np.zeros((1,)) info = dict( @@ -332,7 +333,7 @@ def engine_initialize(self): # Enlarge object arrays, # This can be skipped though if the boundary is less important for name, s in self.ob.storages.items(): - s.padding = int(self.p.position_refinement.max_shift / np.max(s.psize)) + s.padding = int(self.p.position_refinement.max_shift // np.max(s.psize)) s.reformat() # this could be some kind of dictionary lookup if we want to add more @@ -363,7 +364,7 @@ def position_update(self): self.position_refinement.update_constraints(self.curiter) # this stays here # Iterate through all diffraction views - for dname, di_view in self.di.views.iteritems(): + for dname, di_view in self.di.views.items(): # Check for new coordinates if di_view.active: #self.position_refinement.update_view_position(di_view) @@ -395,7 +396,7 @@ def prepare(self): raise NotImplementedError # Make sure all the pods are supported - for label_, pod_ in self.pods.iteritems(): + for label_, pod_ in self.pods.items(): if not pod_.model.__class__ in self.SUPPORTED_MODELS: raise Exception('Model %s not supported by engine' % pod_.model.__class__) diff --git a/ptypy/engines/dummy.py b/ptypy/engines/dummy.py index de531a36b..08b8ff3f2 100644 --- a/ptypy/engines/dummy.py +++ b/ptypy/engines/dummy.py @@ -78,7 +78,7 @@ def engine_iterate(self,numiter): time.sleep(self.itertime) # virtual error reduces 10% error_dct = error = {} - for dname, diff_view in self.di.views.iteritems(): + for dname, diff_view in self.di.views.items(): error_dct[dname] = [0., 0.9**self.ntimescalled, 0.] self.ntimescalled+=1 return error_dct diff --git a/ptypy/engines/ePIE.py b/ptypy/engines/ePIE.py index 5a1267d6c..d78cdcfe4 100644 --- a/ptypy/engines/ePIE.py +++ b/ptypy/engines/ePIE.py @@ -170,10 +170,10 @@ def engine_prepare(self): # mark the pixels covered per node self.ob_nodecover.fill(0.0) - for name, pod in self.pods.iteritems(): + for name, pod in self.pods.items(): if pod.active: self.ob_nodecover[pod.ob_view] = 1 - self.nodemask = np.array(self.ob_nodecover.S.values()[0].data[0], + self.nodemask = np.array(list(self.ob_nodecover.S.values())[0].data[0], dtype=np.bool) # communicate this over MPI @@ -187,7 +187,7 @@ def engine_prepare(self): # plt.show() # if parallel.master: # import matplotlib.pyplot as plt - # plt.imshow(self.ob_nodecover.S.values()[0].data[0].real) + # plt.imshow(list(self.ob_nodecover.S.values())[0].data[0].real) # plt.colorbar() # plt.show() @@ -195,7 +195,7 @@ def engine_iterate(self, num=1): """ Compute `num` iterations. """ - pod_order = self.pods.keys() + pod_order = list(self.pods.keys()) pod_order.sort() to = 0.0 tf = 0.0 @@ -209,7 +209,7 @@ def engine_iterate(self, num=1): # object smooting prior to update, if requested if self.p.obj_smooth_std is not None: - for name, s in self.ob.S.iteritems(): + for name, s in self.ob.S.items(): # u.c_gf is a complex wrapper around # scipy.ndimage.gaussian_filter() std = self.p.obj_smooth_std @@ -256,7 +256,7 @@ def engine_iterate(self, num=1): if do_update_probe: logger.debug(pre_str + '----- ePIE probe update -----') object_max = np.max( - np.abs(self.ob.S.values()[0].data.max())**2) + np.abs(list(self.ob.S.values())[0].data.max())**2) pod.probe += (self.p.beta * np.conj(pod.object) / object_max * (pod.exit - exit_)) @@ -271,7 +271,7 @@ def engine_iterate(self, num=1): # clip the object, if requested if self.p.clip_object is not None: low, high = self.p.clip_object - for name, s in self.ob.S.iteritems(): + for name, s in self.ob.S.items(): phase = np.angle(s.data) ampl = np.abs(s.data) under = (ampl < low) @@ -290,18 +290,18 @@ def engine_iterate(self, num=1): # only share the part of the object which whis node has # contributed to, and zero the rest to avoid weird # feedback. - self.ob.S.values()[0].data[0] *= self.nodemask + list(self.ob.S.values())[0].data[0] *= self.nodemask parallel.allreduceC(self.ob) # the reduced sum should be an average, and the # denominator (the number of contributing nodes) varies # across the object. - for name, s in self.ob.S.iteritems(): + for name, s in self.ob.S.items(): s.data /= (np.abs(self.ob_nodecover.S[name].data) + 1e-5) # average the probe across nodes, if requested if self.p.average_probe and do_update_probe: - for name, s in self.pr.S.iteritems(): + for name, s in self.pr.S.items(): parallel.allreduce(s.data) s.data /= parallel.size t3 = time.time() @@ -346,10 +346,10 @@ def _redestribute_data(self): t0 = time.time() # get the range of positions and define the size of each node's domain - pod = self.pods.values()[0] + pod = list(self.pods.values())[0] xlims = [pod.ob_view.coord[1], ] * 2 # min, max ylims = [pod.ob_view.coord[0], ] * 2 # min, max - for name, pod in self.pods.iteritems(): + for name, pod in self.pods.items(): xlims = [min(xlims[0], pod.ob_view.coord[1]), max(xlims[1], pod.ob_view.coord[1])] ylims = [min(ylims[0], pod.ob_view.coord[0]), @@ -363,13 +363,13 @@ def _redestribute_data(self): # now, the node number corresponding to a coordinate (x, y) is def __node(x, y): - return (int((x - xlims[0]) / dx) - + layout[1] * int((y - ylims[0]) / dy)) + return ((x - xlims[0]) // dx + + layout[1] * (y - ylims[0]) // dy) # now, each node works out which of its own pods to send off, # and the result is communicated to all other nodes as a dict. destinations = {} - for name, pod in self.pods.iteritems(): + for name, pod in self.pods.items(): if not pod.active: continue y, x = pod.ob_view.coord @@ -382,7 +382,7 @@ def __node(x, y): # prepare (enlarge) the storages on the receiving nodes sendpods = [] - for name, dest in destinations.iteritems(): + for name, dest in destinations.items(): if self.pods[name].active: # sending this pod, so add it to a temporary list sendpods.append(name) @@ -396,7 +396,7 @@ def __node(x, y): # transfer data transferred = 0 - for name, dest in destinations.iteritems(): + for name, dest in destinations.items(): if name in sendpods: # your turn to send parallel.send(self.pods[name].diff, dest=dest) @@ -431,15 +431,15 @@ def _best_decomposition(self, N): if N % i == 0: solutions.append(i) i = max(solutions) - assert (i * (N / i) == N) - return [i, N / i] + assert (i * (N // i) == N) + return [i, N // i] def center_probe(self): """ Stolen in its entirety from the DM engine. """ if self.p.probe_center_tol is not None: - for name, s in self.pr.S.iteritems(): + for name, s in self.pr.S.items(): c1 = u.mass_center(u.abs2(s.data).sum(0)) # fft convention should however use geometry instead c2 = np.asarray(s.shape[-2:]) // 2 diff --git a/ptypy/engines/posref.py b/ptypy/engines/posref.py index 4e57db2f4..c6f1d939b 100644 --- a/ptypy/engines/posref.py +++ b/ptypy/engines/posref.py @@ -93,7 +93,7 @@ def fourier_error(self, di_view, obj): ''' af2 = np.zeros_like(di_view.data) - for name, pod in di_view.pods.iteritems(): + for name, pod in di_view.pods.items(): af2 += u.abs2(pod.fw(pod.probe*obj)) return np.sum(di_view.pod.mask * (np.sqrt(af2) - np.sqrt(np.abs(di_view.data)))**2) diff --git a/ptypy/engines/utils.py b/ptypy/engines/utils.py index 8e39981d3..a34eea842 100644 --- a/ptypy/engines/utils.py +++ b/ptypy/engines/utils.py @@ -22,7 +22,7 @@ def dynamic_load(path, baselist, fail_silently = True): import os import glob import re - import imp + from importlib.machinery import SourceFileLoader # Loop through paths engine_path = {} @@ -58,11 +58,12 @@ def dynamic_load(path, baselist, fail_silently = True): % (classname, filename)) # Load engines that have been found - for classname, mf in engine_path.iteritems(): + for classname, mf in engine_path.items(): # Import module modname, filename = mf - engine_module = imp.load_source(modname, filename) + print(modname, filename) + engine_module = SourceFileLoader(modname, filename).load_module() except Exception as e: if not fail_silently: @@ -121,7 +122,7 @@ def basic_fourier_update(diff_view, pbound=None, alpha=1., LL_error=True): # For log likelihood error if LL_error is True: LL = np.zeros_like(diff_view.data) - for name, pod in diff_view.pods.iteritems(): + for name, pod in diff_view.pods.items(): LL += u.abs2(pod.fw(pod.probe * pod.object)) err_phot = (np.sum(fmask * (LL - I)**2 / (I + 1.)) / np.prod(LL.shape)) @@ -129,7 +130,7 @@ def basic_fourier_update(diff_view, pbound=None, alpha=1., LL_error=True): err_phot = 0. # Propagate the exit waves - for name, pod in diff_view.pods.iteritems(): + for name, pod in diff_view.pods.items(): if not pod.active: continue f[name] = pod.fw((1 + alpha) * pod.probe * pod.object @@ -148,7 +149,7 @@ def basic_fourier_update(diff_view, pbound=None, alpha=1., LL_error=True): if pbound is None: # No power bound fm = (1 - fmask) + fmask * fmag / (af + 1e-10) - for name, pod in diff_view.pods.iteritems(): + for name, pod in diff_view.pods.items(): if not pod.active: continue df = pod.bw(fm * f[name]) - pod.probe * pod.object @@ -158,7 +159,7 @@ def basic_fourier_update(diff_view, pbound=None, alpha=1., LL_error=True): # Power bound is applied renorm = np.sqrt(pbound / err_fmag) fm = (1 - fmask) + fmask * (fmag + fdev * renorm) / (af + 1e-10) - for name, pod in diff_view.pods.iteritems(): + for name, pod in diff_view.pods.items(): if not pod.active: continue df = pod.bw(fm * f[name]) - pod.probe * pod.object @@ -166,7 +167,7 @@ def basic_fourier_update(diff_view, pbound=None, alpha=1., LL_error=True): err_exit += np.mean(u.abs2(df)) else: # Within power bound so no constraint applied. - for name, pod in diff_view.pods.iteritems(): + for name, pod in diff_view.pods.items(): if not pod.active: continue df = alpha * (pod.probe * pod.object - pod.exit) @@ -193,7 +194,7 @@ def Cnorm2(c): ptypy.utils.math_utils.norm2 """ r = 0. - for name, s in c.storages.iteritems(): + for name, s in c.storages.items(): r += u.norm2(s.data) return r @@ -207,6 +208,6 @@ def Cdot(c1, c2): :returns: The dot product (*scalar*) """ r = 0. - for name, s in c1.storages.iteritems(): + for name, s in c1.storages.items(): r += np.vdot(c1.storages[name].data.flat, c2.storages[name].data.flat) return r diff --git a/ptypy/experiment/Bragg3dSim.py b/ptypy/experiment/Bragg3dSim.py index 9f5ba5056..5eebb217e 100644 --- a/ptypy/experiment/Bragg3dSim.py +++ b/ptypy/experiment/Bragg3dSim.py @@ -231,7 +231,7 @@ def simulate(self): # shuffle everything as a test if self.p.shuffle: - order = range(len(self.diff)) + order = list(range(len(self.diff))) from random import shuffle shuffle(order) self.diff = [self.diff[i] for i in order] diff --git a/ptypy/experiment/ID16Anfp.py b/ptypy/experiment/ID16Anfp.py index a9d755df9..e272ef00d 100644 --- a/ptypy/experiment/ID16Anfp.py +++ b/ptypy/experiment/ID16Anfp.py @@ -337,7 +337,7 @@ def load_common(self): if self.info.recipe.use_bpm5_ct: print('Reading the values of the bpm5 ct') bpm5_ct_val = np.zeros(self.num_frames) - for ii in xrange(self.num_frames): + for ii in range(self.num_frames): projobj = io.h5read(self.frame_format.format(ii),self.h5_path)[self.h5_path] #projobj = io.h5read(self._index_to_frame(ii),self.h5_path)[self.h5_path] # metadata diff --git a/ptypy/experiment/UCL.py b/ptypy/experiment/UCL.py index 21aa62f02..155747e7d 100644 --- a/ptypy/experiment/UCL.py +++ b/ptypy/experiment/UCL.py @@ -194,12 +194,8 @@ class UCLLaserScan(PtyScan): def __init__(self, pars=None, **kwargs): """ Initializes parent class. - - :param pars: dict - - contains parameter tree. - :param kwargs: key-value pair - - additional parameters. """ + p = self.DEFAULT.copy(99) p.update(pars) pars = p @@ -309,8 +305,6 @@ def load_weight(self): Function description see parent class. - :return: weight2d - - np.array: Mask or weight if provided from file """ # FIXME: do something better here. (detector-dependent) # Load mask as weight @@ -322,8 +316,6 @@ def load_positions(self): """ Load the positions and return as an (N,2) array. - :return: positions - - np.array: contains scan positions. """ # Load positions from file if possible. motor_positions = io.h5read( @@ -345,8 +337,6 @@ def load_common(self): """ Load dark and flat. - :return: common - - dict: contains averaged dark and flat (np.array). """ common = u.Param() @@ -385,13 +375,6 @@ def load_common(self): def load(self, indices): """ Load frames given by the indices. - - :param indices: list - Frame indices available per node. - :return: raw, pos, weight - - dict: index matched data frames (np.array). - - dict: new positions. - - dict: new weights. """ raw = {} pos = {} @@ -411,29 +394,7 @@ def load(self, indices): return raw, pos, weights def correct(self, raw, weights, common): - """ - Apply corrections to frames. See below for possible options. - - Options for corrections: - - Hot pixel removal: - Replace outlier pixels in frames by median. - - Richardson–Lucy deconvolution: - Deconvolve frames from detector psf. - - Dark subtraction: - Subtract dark from frames. - - Flat division: - Divide frames by flat. - - :param raw: dict - - dict containing index matched data frames (np.array). - :param weights: dict - - dict containing possible weights. - :param common: dict - - dict containing possible dark and flat frames. - :return: data, weights - - dict: contains index matched corrected data frames (np.array). - - dict: contains modified weights. - """ + # Apply hot pixel removal if self.info.remove_hot_pixels.apply: u.log(3, 'Applying hot pixel removal...') @@ -471,7 +432,7 @@ def correct(self, raw, weights, common): else: gau_sum = 0 for k in ( - self.info.rl_deconvolution.gaussians.iteritems()): + self.info.rl_deconvolution.gaussians.items()): gau_sum += u.gaussian2D(raw[0].shape[0], k[1].std_x, k[1].std_y, diff --git a/ptypy/experiment/__init__.py b/ptypy/experiment/__init__.py index 5f58958bd..a6244b060 100644 --- a/ptypy/experiment/__init__.py +++ b/ptypy/experiment/__init__.py @@ -16,6 +16,7 @@ :copyright: Copyright 2014 by the PTYPY team, see AUTHORS. :license: GPLv2, see LICENSE for details. """ +from importlib import import_module from .. import defaults_tree from ..core.data import MoonFlowerScan, PtydScan, PtyScan, QuickScan from ..simulations import SimScan @@ -48,25 +49,24 @@ def _register_PtyScan_class(cls, name=None): return cls -ptyscan_modules = [('hdf5_loader', 'Hdf5Loader'), - ('diamond_nexus', 'DiamondNexus'), - ('cSAXS', 'cSAXSScan'), - ('savu', 'Savu'), - ('plugin', 'makeScanPlugin'), - ('ID16Anfp', 'ID16AScan'), - ('AMO_LCLS', 'AMOScan'), - ('DiProI_FERMI', 'DiProIFERMIScan'), - ('optiklabor', 'FliSpecScanMultexp'), - ('UCL', 'UCLLaserScan'), - ('nanomax', 'NanomaxStepscanMay2017'), - ('nanomax', 'NanomaxStepscanNov2016'), - ('nanomax', 'NanomaxFlyscanJune2017'), - ('ALS_5321', 'ALS5321Scan'), - ('Bragg3dSim', 'Bragg3dSimScan')] +ptyscan_modules = [('.hdf5_loader', 'Hdf5Loader'), + ('.cSAXS', 'cSAXSScan'), + ('.savu', 'Savu'), + ('.plugin', 'makeScanPlugin'), + ('.ID16Anfp', 'ID16AScan'), + ('.AMO_LCLS', 'AMOScan'), + ('.DiProI_FERMI', 'DiProIFERMIScan'), + ('.optiklabor', 'FliSpecScanMultexp'), + ('.UCL', 'UCLLaserScan'), + ('.nanomax', 'NanomaxStepscanMay2017'), + ('.nanomax', 'NanomaxStepscanNov2016'), + ('.nanomax', 'NanomaxFlyscanJune2017'), + ('.ALS_5321', 'ALS5321Scan'), + ('.Bragg3dSim', 'Bragg3dSimScan')] for module, obj in ptyscan_modules: try: - lib = __import__(module, globals(), locals()) + lib = import_module(module, 'ptypy.experiment') except ImportError as exception: log(2, 'Could not import experiment %s from %s, Reason: %s' % (obj, module, exception)) pass diff --git a/ptypy/experiment/cSAXS.py b/ptypy/experiment/cSAXS.py index d67e930ea..236dba49d 100644 --- a/ptypy/experiment/cSAXS.py +++ b/ptypy/experiment/cSAXS.py @@ -113,7 +113,7 @@ def get_position_path(inargs): positions_path = file_path + 'scan_%(scan_number)05d.dat' % pathargs # check that it exists if not os.path.isfile(positions_path): - print "File:%s does not exist." % positions_path + print("File:%s does not exist." % positions_path) exists = False else: exists = True @@ -166,7 +166,7 @@ def get_data_path(inargs): num_frames = len([name for name in os.listdir(file_path) if os.path.isfile(file_path + name)]) # check that it exists if not os.path.isfile(frame_path): - print "File:%s does not exist." % frame_path + print("File:%s does not exist." % frame_path) exists = False else: exists = True diff --git a/ptypy/experiment/diamond_nexus.py b/ptypy/experiment/diamond_nexus.py index 9f34b1794..1cb72c000 100644 --- a/ptypy/experiment/diamond_nexus.py +++ b/ptypy/experiment/diamond_nexus.py @@ -316,7 +316,7 @@ def get_corrected_intensities(self, index): There is a lot of logic here, I wonder if there is a better way to get rid of it. Limited a bit by the MPI, adn thinking about extension to large data size. ''' - if isinstance(index, int): + if not hasattr(index, '__iter__'): index = (index,) indexed_frame_slices = tuple([slice(ix, ix+1, 1) for ix in index]) indexed_frame_slices += self.frame_slices @@ -333,15 +333,15 @@ def get_corrected_intensities(self, index): if self.flatfield is not None: if self.flatfield_laid_out_like_data: - intensity /= self.flatfield[indexed_frame_slices].squeeze() + intensity[:] = intensity / self.flatfield[indexed_frame_slices].squeeze() else: - intensity /= self.flatfield[self.frame_slices].squeeze() + intensity[:] = intensity / self.flatfield[self.frame_slices].squeeze() if self.normalisation is not None: if self.normalisation_laid_out_like_positions: - intensity /= self.normalisation[index] + intensity[:] = intensity / self.normalisation[index] else: - intensity /= self.normalisation + intensity[:] = intensity / self.normalisation if self.mask is not None: if self.mask_laid_out_like_data: @@ -353,7 +353,6 @@ def get_corrected_intensities(self, index): return mask, intensity - def compute_scan_mapping_and_trajectory(self, data_shape, positions_fast_shape, positions_slow_shape): ''' This horrendous block of logic is all to do with making a semi-intelligent guess at what the data looks like. @@ -387,7 +386,7 @@ def compute_scan_mapping_and_trajectory(self, data_shape, positions_fast_shape, elif isinstance(fast_axis_bounds, (tuple, list)): fast_axis_bounds = set_fast_axis_bounds - indices = np.meshgrid(range(*fast_axis_bounds), range(*slow_axis_bounds)) + indices = np.meshgrid(list(range(*fast_axis_bounds)), list(range(*slow_axis_bounds))) self.preview_indices = np.array([indices[1].flatten(), indices[0].flatten()], dtype=int) self.num_frames = len(self.preview_indices[0]) else: @@ -400,7 +399,7 @@ def compute_scan_mapping_and_trajectory(self, data_shape, positions_fast_shape, elif isinstance(fast_axis_bounds, (tuple, list)): fast_axis_bounds = set_fast_axis_bounds self._scantype = "arb" - self.preview_indices = range(*fast_axis_bounds) + self.preview_indices = list(range(*fast_axis_bounds)) self.num_frames = len(self.preview_indices) elif ((len(positions_fast_shape)>1) and (len(positions_slow_shape)>1)) and data_shape[0] == np.prod(positions_fast_shape) == np.prod(positions_slow_shape): @@ -425,7 +424,7 @@ def compute_scan_mapping_and_trajectory(self, data_shape, positions_fast_shape, elif isinstance(fast_axis_bounds, (tuple, list)): fast_axis_bounds = set_fast_axis_bounds - indices = np.meshgrid(range(*fast_axis_bounds), range(*slow_axis_bounds)) + indices = np.meshgrid(list(range(*fast_axis_bounds)), list(range(*slow_axis_bounds))) self.preview_indices = np.array([indices[1].flatten(), indices[0].flatten()]) self.num_frames = len(self.preview_indices[0]) self._ismapped = False @@ -456,7 +455,7 @@ def compute_scan_mapping_and_trajectory(self, data_shape, positions_fast_shape, self.fast_axis, self.slow_axis = np.meshgrid(self.fast_axis[...], self.slow_axis[...]) - indices = np.meshgrid(range(*fast_axis_bounds), range(*slow_axis_bounds)) + indices = np.meshgrid(list(range(*fast_axis_bounds)), list(range(*slow_axis_bounds))) self.preview_indices = np.array([indices[1].flatten(), indices[0].flatten()], dtype=int) self.num_frames = np.prod(indices[0].shape) @@ -485,7 +484,7 @@ def compute_scan_mapping_and_trajectory(self, data_shape, positions_fast_shape, self.fast_axis, self.slow_axis = np.meshgrid(self.fast_axis[...], self.slow_axis[...]) - indices = np.meshgrid(range(*fast_axis_bounds), range(*slow_axis_bounds)) + indices = np.meshgrid(list(range(*fast_axis_bounds)), list(range(*slow_axis_bounds))) self.preview_indices = np.array([indices[1].flatten(), indices[0].flatten()], dtype=int) self.num_frames = np.prod(indices[0].shape) diff --git a/ptypy/experiment/hdf5_loader.py b/ptypy/experiment/hdf5_loader.py index 2f2c76094..93616fe1b 100644 --- a/ptypy/experiment/hdf5_loader.py +++ b/ptypy/experiment/hdf5_loader.py @@ -388,7 +388,7 @@ def __init__(self, pars=None, **kwargs): log(3, "No normalisation will be applied.") if None not in [self.p.recorded_energy.file, self.p.recorded_energy.key]: - print self.p.recorded_energy.multiplier + print(self.p.recorded_energy.multiplier) self.p.energy = np.float(h5.File(self.p.recorded_energy.file, 'r')[self.p.recorded_energy.key][()] * self.p.recorded_energy.multiplier) self.meta.energy = self.p.energy log(3, "loading energy={} from file".format(self.p.energy)) @@ -492,7 +492,7 @@ def get_corrected_intensities(self, index): There is a lot of logic here, I wonder if there is a better way to get rid of it. Limited a bit by the MPI, adn thinking about extension to large data size. ''' - if isinstance(index, int): + if not hasattr(index, '__iter__'): index = (index,) indexed_frame_slices = tuple([slice(ix, ix+1, 1) for ix in index]) indexed_frame_slices += self.frame_slices @@ -509,15 +509,15 @@ def get_corrected_intensities(self, index): if self.flatfield is not None: if self.flatfield_laid_out_like_data: - intensity /= self.flatfield[indexed_frame_slices].squeeze() + intensity[:] = intensity / self.flatfield[indexed_frame_slices].squeeze() else: - intensity /= self.flatfield[self.frame_slices].squeeze() + intensity[:] = intensity / self.flatfield[self.frame_slices].squeeze() if self.normalisation is not None: if self.normalisation_laid_out_like_positions: - intensity /= self.normalisation[index] + intensity[:] = intensity / self.normalisation[index] else: - intensity /= self.normalisation + intensity[:] = intensity / self.normalisation if self.mask is not None: if self.mask_laid_out_like_data: @@ -563,7 +563,7 @@ def compute_scan_mapping_and_trajectory(self, data_shape, positions_fast_shape, elif isinstance(fast_axis_bounds, (tuple, list)): fast_axis_bounds = set_fast_axis_bounds - indices = np.meshgrid(range(*fast_axis_bounds), range(*slow_axis_bounds)) + indices = np.meshgrid(list(range(*fast_axis_bounds)), list(range(*slow_axis_bounds))) self.preview_indices = np.array([indices[1].flatten(), indices[0].flatten()], dtype=int) self.num_frames = len(self.preview_indices[0]) else: @@ -576,7 +576,7 @@ def compute_scan_mapping_and_trajectory(self, data_shape, positions_fast_shape, elif isinstance(fast_axis_bounds, (tuple, list)): fast_axis_bounds = set_fast_axis_bounds self._scantype = "arb" - self.preview_indices = range(*fast_axis_bounds) + self.preview_indices = list(range(*fast_axis_bounds)) self.num_frames = len(self.preview_indices) elif ((len(positions_fast_shape)>1) and (len(positions_slow_shape)>1)) and data_shape[0] == np.prod(positions_fast_shape) == np.prod(positions_slow_shape): @@ -601,7 +601,7 @@ def compute_scan_mapping_and_trajectory(self, data_shape, positions_fast_shape, elif isinstance(fast_axis_bounds, (tuple, list)): fast_axis_bounds = set_fast_axis_bounds - indices = np.meshgrid(range(*fast_axis_bounds), range(*slow_axis_bounds)) + indices = np.meshgrid(list(range(*fast_axis_bounds)), list(range(*slow_axis_bounds))) self.preview_indices = np.array([indices[1].flatten(), indices[0].flatten()]) self.num_frames = len(self.preview_indices[0]) self._ismapped = False @@ -632,7 +632,7 @@ def compute_scan_mapping_and_trajectory(self, data_shape, positions_fast_shape, self.fast_axis, self.slow_axis = np.meshgrid(self.fast_axis[...], self.slow_axis[...]) - indices = np.meshgrid(range(*fast_axis_bounds), range(*slow_axis_bounds)) + indices = np.meshgrid(list(range(*fast_axis_bounds)), list(range(*slow_axis_bounds))) self.preview_indices = np.array([indices[1].flatten(), indices[0].flatten()], dtype=int) self.num_frames = np.prod(indices[0].shape) @@ -661,7 +661,7 @@ def compute_scan_mapping_and_trajectory(self, data_shape, positions_fast_shape, self.fast_axis, self.slow_axis = np.meshgrid(self.fast_axis[...], self.slow_axis[...]) - indices = np.meshgrid(range(*fast_axis_bounds), range(*slow_axis_bounds)) + indices = np.meshgrid(list(range(*fast_axis_bounds)), list(range(*slow_axis_bounds))) self.preview_indices = np.array([indices[1].flatten(), indices[0].flatten()], dtype=int) self.num_frames = np.prod(indices[0].shape) diff --git a/ptypy/experiment/legacy/DLS.py b/ptypy/experiment/legacy/DLS.py index da5733d35..676e3ffe7 100644 --- a/ptypy/experiment/legacy/DLS.py +++ b/ptypy/experiment/legacy/DLS.py @@ -246,7 +246,7 @@ def check(self, frames, start): frames_accessible = min((frames, num_avail)) stop = f[NEXUS_PATHS.finished_pattern][0] and (self.num_frames == start) f.close() - print "HERE",frames_accessible, stop + print("HERE",frames_accessible, stop) return frames_accessible,stop def load(self, indices): diff --git a/ptypy/experiment/legacy/DLS_mapping.py b/ptypy/experiment/legacy/DLS_mapping.py index 72212b031..66e378b5f 100644 --- a/ptypy/experiment/legacy/DLS_mapping.py +++ b/ptypy/experiment/legacy/DLS_mapping.py @@ -111,7 +111,7 @@ def load_positions(self): # Load positions from file if possible. # print self.info.recipe stage_path = NEXUS_PATHS.instrument % self.info.recipe - print stage_path + print(stage_path) instrument = h5.File(self.data_file, 'r', libver='latest', swmr=True)[stage_path] if self.info.recipe.israster: self.position_shape = instrument[0].shape @@ -169,7 +169,7 @@ def load(self, indices): key = NEXUS_PATHS.frame_pattern % self.info.recipe if not self.info.recipe.israster: for j in indices: - print j + print(j) if not self.info.recipe.is_swmr: dataset = h5.File(self.data_file)[key] try: diff --git a/ptypy/experiment/legacy/I08.py b/ptypy/experiment/legacy/I08.py index fc60b97b2..52ebbf8e1 100644 --- a/ptypy/experiment/legacy/I08.py +++ b/ptypy/experiment/legacy/I08.py @@ -202,14 +202,14 @@ def load_common(self): return common._to_dict() def load_positions(self): - """ + """ Load the positions and return as an (N,2) array """ base_path = self.info.base_path mmult = u.expect2(self.info.motors_multiplier) keyx = STXM_PATHS.motors+str(self.info.motors[0]) keyy=STXM_PATHS.motors+str(self.info.motors[1]) - print "file name is:%s" % self.stxm_filename + print("file name is:%s" % self.stxm_filename) x1 = io.h5read(self.stxm_filename,keyx) y1 = io.h5read(self.stxm_filename,keyy) x ,y = np.meshgrid(x1[keyx],y1[keyy]) # grab out the positions- they are the demand positions rather than readback and are not a list of co-ords. Meshgrid to get almost the thing we need. diff --git a/ptypy/experiment/legacy/I13_nfp.py b/ptypy/experiment/legacy/I13_nfp.py index 807e8b4d9..dafb63621 100644 --- a/ptypy/experiment/legacy/I13_nfp.py +++ b/ptypy/experiment/legacy/I13_nfp.py @@ -407,9 +407,9 @@ def load_common(self): # Load dark. if self.info.dark_number is not None: key = NEXUS_PATHS.frame_pattern % self.info - dark_indices = range(len( + dark_indices = list(range(len( io.h5read(self.dark_file, NEXUS_PATHS.frame_pattern - % self.info)[key])) + % self.info)[key]))) dark = [io.h5read(self.dark_file, NEXUS_PATHS.frame_pattern % self.info, slice=j)[key][ @@ -425,9 +425,9 @@ def load_common(self): # Load flat. if self.info.flat_number is not None: key = NEXUS_PATHS.frame_pattern % self.info - flat_indices = range(len( + flat_indices = list(range(len( io.h5read(self.flat_file, NEXUS_PATHS.frame_pattern - % self.info)[key])) + % self.info)[key]))) flat = [io.h5read(self.flat_file, NEXUS_PATHS.frame_pattern % self.info, slice=j)[key][ @@ -544,8 +544,7 @@ def correct(self, raw, weights, common): # Create fake psf as a sum of gaussians from parameters else: gau_sum = 0 - for k in ( - self.info.rl_deconvolution.gaussians.iteritems()): + for k in self.info.rl_deconvolution.gaussians.items(): gau_sum += u.gaussian2D(raw[0].shape[0], k[1].std_x, k[1].std_y, diff --git a/ptypy/experiment/nanomax3d.py b/ptypy/experiment/nanomax3d.py index 6343a878c..c4e120baf 100644 --- a/ptypy/experiment/nanomax3d.py +++ b/ptypy/experiment/nanomax3d.py @@ -78,14 +78,14 @@ def load_common(self): for a complete POD. We also have to specify a single number for the rocking step size. """ - print '*** load_common' + print('*** load_common') angles = [] for scannr in self.p.scans: with h5py.File(self.p.datapath + self.p.datafile) as fp: angles.append(float(fp.get('entry%d'%scannr + '/measurement/gonphi').value)) - print angles + print(angles) step = np.mean(np.diff(sorted(angles))) - print step + print(step) return { 'rocking_step': step, 'n_rocking_positions': len(angles), @@ -103,7 +103,7 @@ def load_positions(self): acquired: x fastest, then y, then scan number in the order provided. """ - print '*** load_positions' + print('*** load_positions') # first, calculate mean x and y positions for all scans, they # have to match anyway so may as well average them. @@ -117,7 +117,7 @@ def load_positions(self): for i in range(tmp.shape[1]): if np.allclose(tmp[:, i:], 0.0): cutoff = i - print 'using %i samx values' % cutoff + print('using %i samx values' % cutoff) break x.append(np.array(fp[entry + '/measurement/AdLinkAI_buff'][:, :cutoff])) y.append(np.array(fp[entry + '/measurement/samy'])) @@ -127,7 +127,7 @@ def load_positions(self): Ny = x_mean.shape[0] Nxy = Nx * Ny assert Ny == y_mean.shape[0] - print 'Scan positions are Nx=%d, Ny=%d, Nxy=%d' % (Nx, Ny, Nxy) + print('Scan positions are Nx=%d, Ny=%d, Nxy=%d' % (Nx, Ny, Nxy)) # save these numbers for the diff image loader self.Nx = Nx @@ -161,7 +161,7 @@ def load(self, indices): viewed along the beam, i e they have (-q1, q2) indexing. PtyScan can always flip/rotate images. """ - print '*** load' + print('*** load') raw, positions, weights = {}, {}, {} for ind in indices: @@ -175,7 +175,7 @@ def load(self, indices): return raw, positions, weights def load_weight(self): - print '*** load_weight' + print('*** load_weight') with h5py.File(self.p.maskfile) as fp: mask = np.array(fp['mask']) return mask diff --git a/ptypy/experiment/optiklabor.py b/ptypy/experiment/optiklabor.py index 2e77a1fe6..79f69242d 100644 --- a/ptypy/experiment/optiklabor.py +++ b/ptypy/experiment/optiklabor.py @@ -179,7 +179,7 @@ def correct(self, raw, weights, common): weights = {} expos = common['exposures'] darks = common['darks'] - for j,rr in raw.iteritems(): + for j, rr in raw.items(): data_hdr,lmask=u.hdr_image(rr, expos, thresholds=self.hdr_thresholds, dark_list=darks, avg_type='highest') data[j] = data_hdr weights[j] = lmask[-1] @@ -191,7 +191,7 @@ def correct(self, raw, weights, common): RS = RawScan(p,num_frames=50,roi=512 ) RS.initialize() RS.report() - print 'loading data' + print('loading data') msg = True for i in range(200): if msg is False: diff --git a/ptypy/experiment/plugin.py b/ptypy/experiment/plugin.py index 655c86103..45f8be669 100644 --- a/ptypy/experiment/plugin.py +++ b/ptypy/experiment/plugin.py @@ -12,7 +12,7 @@ """ import os -import imp +from importlib.machinery import SourceFileLoader import inspect from .. import utils as u from ..core.data import PtyScan @@ -46,21 +46,21 @@ def makeScanPlugin(pars=None): raise IOError('Plugin file %s is not a python file.' % str(filename)) # Load plugin - plugin = imp.load_source(plugin_name, filename) + plugin = SourceFileLoader(plugin_name, filename).load_module() # Find the PtyScan class if rinfo.classname is None: # We try to find the class ptyscan_objects = {} - for name, obj in plugin.__dict__.iteritems(): + for name, obj in plugin.__dict__.items(): if inspect.isclass(obj) and issubclass(obj, PtyScan) and obj is not PtyScan: ptyscan_objects[name] = obj if not ptyscan_objects: raise RuntimeError('Failed to find a PtyScan subclass in plugin %s' % plugin_name) elif len(ptyscan_objects) > 1: - raise RuntimeError('Multiple PtyScan subclasses in plugin %s: %s' % (plugin_name, str(ptyscan_objects.keys()))) + raise RuntimeError('Multiple PtyScan subclasses in plugin %s: %s' % (plugin_name, str(list(ptyscan_objects.keys())))) # Class found - ptyscan_obj_name = ptyscan_objects.keys()[0] + ptyscan_obj_name = list(ptyscan_objects.keys())[0] ptyscan_obj = ptyscan_objects[ptyscan_obj_name] else: ptyscan_obj_name = rinfo.classname diff --git a/ptypy/experiment/spec.py b/ptypy/experiment/spec.py index 615975cb7..c1ef67d80 100644 --- a/ptypy/experiment/spec.py +++ b/ptypy/experiment/spec.py @@ -20,7 +20,7 @@ def verbose(n,s): This function should be replaced by the real verbose class after import. It is here for convenience since this module has no other external dependencies. """ - print s + print(s) class SpecScan(object): pass @@ -51,7 +51,7 @@ def parse(self, rehash=False): continue_reading = True while continue_reading: try: - line = f.next(); lnum += 1 + line = next(f); lnum += 1 except StopIteration: break if line.startswith('#O0'): @@ -59,7 +59,7 @@ def parse(self, rehash=False): mlist = line.split(' ',1)[1].strip().split() motordefs.append(lnum) while True: - line = f.next(); lnum += 1 + line = next(f); lnum += 1 if not line.startswith('#O'): break mlist.extend( line.split(' ',1)[1].strip().split() ) # This updates the current list of motor names @@ -69,7 +69,7 @@ def parse(self, rehash=False): _,scannr,scancmd = line.split(' ', 2) scannr = int(scannr) scancmd = scancmd.strip() - if (not rehash) and self.scans.has_key(scannr): + if (not rehash) and scannr in self.scans: #print('Skipping known scan #S %d' % scannr) continue #print line.strip() @@ -89,12 +89,12 @@ def parse(self, rehash=False): # Get a new line, exit everything if we are # at the end of a file try: - line = f.next(); lnum += 1 + line = next(f); lnum += 1 except StopIteration: continue_reading = False break if line.startswith('#START_TIME'): - line = f.next(); lnum += 1 + line = next(f); lnum += 1 if line.startswith('#'): # We have the beginning of a new section label = line[1] @@ -155,14 +155,14 @@ def parse(self, rehash=False): # Data try: - data = zip(*[[float(x) for x in Lline.split()] for Lline in scanstr['L'][1:]]) + data = list(zip(*[[float(x) for x in Lline.split()] for Lline in scanstr['L'][1:]])) scan.data = dict(zip(scan.counternames, data)) except Exception as e: good_scan = False verbose(1, 'Error extracting counter values for scan number %d.' % scannr) verbose(1, e.message) - scan.valid = good_scan + scan.valid = good_scan scans[scannr] = scan self.scans = scans diff --git a/ptypy/io/__init__.py b/ptypy/io/__init__.py index f2cee42d7..e81674c3a 100644 --- a/ptypy/io/__init__.py +++ b/ptypy/io/__init__.py @@ -14,5 +14,5 @@ from .. import __has_zmq__ as hzmq if hzmq: - import interaction + from . import interaction del hzmq diff --git a/ptypy/io/edfIO.py b/ptypy/io/edfIO.py index ebbe586ef..99ebc8f09 100644 --- a/ptypy/io/edfIO.py +++ b/ptypy/io/edfIO.py @@ -218,9 +218,9 @@ def readHeader(filename, headerlength=None): # convert to dictionary hdict = dict(hlist[0:-1]) # convert counter and motor settings in separate dictionaries inside hdict - if hdict.has_key('counter_mne'): + if 'counter_mne' in hdict: hdict["counter"] = dict(zip(hdict["counter_mne"].split(" "),[convertStr(elem) for elem in hdict["counter_pos"].split(" ")])) - if hdict.has_key('motor_mne'): + if 'motor_mne' in hdict: hdict["motor"] = dict(zip(hdict["motor_mne"].split(" "),[convertStr(elem) for elem in hdict["motor_pos"].split(" ")])) # add header length in meta-data hdict["headerlength"] = headerlength diff --git a/ptypy/io/h5rw.py b/ptypy/io/h5rw.py index 690f71380..a446bce35 100644 --- a/ptypy/io/h5rw.py +++ b/ptypy/io/h5rw.py @@ -9,12 +9,11 @@ """ import h5py import numpy as np -import cPickle import time import os import glob from collections import OrderedDict - +import pickle from ..utils import Param from ..utils.verbose import logger @@ -23,8 +22,7 @@ h5options = dict( H5RW_VERSION='0.1', H5PY_VERSION=h5py.version.version, - # UNSUPPORTED = 'pickle' - # UNSUPPORTED = 'ignore' + # UNSUPPORTED = 'ignore', UNSUPPORTED='fail', SLASH_ESCAPE='_SLASH_') STR_CONVERT = [type] @@ -36,7 +34,7 @@ def sdebug(f): """ def newf(*args, **kwds): - print '{0:20} {1:20}'.format(f.func_name, args[2]) + print('{0:20} {1:20}'.format(f.__name__, args[2])) return f(*args, **kwds) newf.__doc__ = f.__doc__ @@ -63,7 +61,7 @@ def _h5write(filename, mode, *args, **kwargs): Writes variables var1, var2, ... to file filename. The file mode can be chosen according to the h5py documentation. The key-value - arguments have precedence on the provided dictionnary. + arguments have precedence on the provided dictionary. supported variable types are: * scalars @@ -72,8 +70,7 @@ def _h5write(filename, mode, *args, **kwargs): * lists * dictionaries - (if the option UNSUPPORTED is equal to 'pickle', any other type - is pickled and saved. UNSUPPORTED = 'ignore' silently eliminates + (Setting the option UNSUPPORTED equal to 'ignore' eliminates unsupported types. Default is 'fail', which raises an error.) The file mode can be chosen according to the h5py documentation. @@ -85,7 +82,7 @@ def _h5write(filename, mode, *args, **kwargs): ctime = time.asctime() mtime = ctime - # Update input dictionnary + # Update input dictionary if args: d = args[0].copy() # shallow copy else: @@ -119,14 +116,8 @@ def _store_numpy(group, a, name, compress=True): # @sdebug def _store_string(group, s, name): - dset = group.create_dataset(name, data=np.asarray(s), dtype=dt) - dset.attrs['type'] = 'string' - return dset - - # @sdebug - def _store_unicode(group, s, name): dset = group.create_dataset(name, data=np.asarray(s.encode('utf8')), dtype=dt) - dset.attrs['type'] = 'unicode' + dset.attrs['type'] = 'string' return dset # @sdebug @@ -163,11 +154,11 @@ def _store_tuple(group, t, name): # @sdebug def _store_dict(group, d, name): check_id(id(d)) - if any([type(k) not in [str, unicode] for k in d.keys()]): + if any([type(k) not in [str,] for k in d.keys()]): raise RuntimeError('Only dictionaries with string keys are supported.') dset = group.create_group(name) dset.attrs['type'] = 'dict' - for k, v in d.iteritems(): + for k, v in d.items(): if k.find('/') > -1: k = k.replace('/', h5options['SLASH_ESCAPE']) ndset = _store(dset, v, k) @@ -181,11 +172,11 @@ def _store_dict(group, d, name): # @sdebug def _store_ordered_dict(group, d, name): check_id(id(d)) - if any([type(k) not in [str, unicode] for k in d.keys()]): + if any([type(k) not in [str,] for k in d.keys()]): raise RuntimeError('Only dictionaries with string keys are supported.') dset = group.create_group(name) dset.attrs['type'] = 'ordered_dict' - for k, v in d.iteritems(): + for k, v in d.items(): if k.find('/') > -1: k = k.replace('/', h5options['SLASH_ESCAPE']) ndset = _store(dset, v, k) @@ -207,37 +198,36 @@ def _store_dict_new(group, d, name): check_id(id(d)) dset = group.create_group(name) dset.attrs['type'] = 'dict' - for i, kv in enumerate(d.iteritems()): + for i, kv in enumerate(d.items()): _store(dset, kv, '%05d' % i) pop_id(id(d)) return dset + # @sdebug + + def _store_pickle(group, a, name): + apic = pickle.dumps(a) + group[name] = np.string_(apic) + group[name].attrs['type'] = 'pickle' + return group[name] + # @sdebug def _store_None(group, a, name): dset = group.create_dataset(name, data=np.zeros((1,))) dset.attrs['type'] = 'None' return dset - # @sdebug - def _store_pickle(group, a, name): - apic = cPickle.dumps(a) - dset = group.create_dataset(name, data=np.asarray(apic), dtype=dt) - dset.attrs['type'] = 'pickle' - return dset - # @sdebug def _store_numpy_record_array(group, a, name): - apic = cPickle.dumps(a) - dset = group.create_dataset(name, data=np.asarray(apic), dtype=h5py.special_dtype(vlen=unicode)) - dset.attrs['type'] = 'record_array' - return dset + dumped_array = a.dumps() + group[name] =np.string_(dumped_array) + group[name].attrs['type'] = 'record_array' + return group[name] # @sdebug def _store(group, a, name): if type(a) is str: dset = _store_string(group, a, name) - elif type(a) is unicode: - dset = _store_unicode(group, a, name) elif type(a) is dict: dset = _store_dict(group, a, name) elif type(a) is OrderedDict: @@ -277,7 +267,7 @@ def _store(group, a, name): f.attrs['h5rw_version'] = h5options['H5RW_VERSION'] f.attrs['ctime'] = ctime f.attrs['mtime'] = mtime - for k, v in d.iteritems(): + for k, v in d.items(): # if the first group key exists, make an overwrite, i.e. delete group `k` # Otherwise it was not possible in this framework to write # into an existing file, where a key is already occupied, @@ -296,7 +286,7 @@ def h5write(filename, *args, **kwargs): h5write(filename, dict, var1=..., var2=...) Writes variables var1, var2, ... to file filename. The key-value - arguments have precedence on the provided dictionnary. + arguments have precedence on the provided dictionary. supported variable types are: * scalars @@ -305,8 +295,7 @@ def h5write(filename, *args, **kwargs): * lists * dictionaries - (if the option UNSUPPORTED is equal to 'pickle', any other type - is pickled and saved. UNSUPPORTED = 'ignore' silently eliminates + (Setting the option UNSUPPORTED equal to 'ignore' eliminates unsupported types. Default is 'fail', which raises an error.) The file mode can be chosen according to the h5py documentation. @@ -324,7 +313,7 @@ def h5append(filename, *args, **kwargs): h5append(filename, dict, var1=..., var2=...) Appends variables var1, var2, ... to file filename. The - key-value arguments have precedence on the provided dictionnary. + key-value arguments have precedence on the provided dictionary. supported variable types are: * scalars @@ -333,8 +322,7 @@ def h5append(filename, *args, **kwargs): * lists * dictionaries - (if the option UNSUPPORTED is equal to 'pickle', any other type - is pickled and saved. UNSUPPORTED = 'ignore' silently eliminates + (Setting the option UNSUPPORTED equal to 'ignore' eliminates unsupported types. Default is 'fail', which raises an error.) The file mode can be chosen according to the h5py documentation. @@ -407,7 +395,7 @@ def h5read(filename, *args, **kwargs): # Define helper functions def _load_dict_new(dset): d = {} - keys = dset.keys() + keys = list(dset.keys()) keys.sort() for k in keys: dk, dv = _load(dset[k]) @@ -426,7 +414,7 @@ def _load_dict(dset, depth): def _load_list(dset, depth): l = [] if depth > 0: - keys = dset.keys() + keys = list(dset.keys()) keys.sort() for k in keys: l.append(_load(dset[k], depth - 1)) @@ -453,24 +441,26 @@ def _load_ordered_dict(dset, depth): d[k] = _load(v, depth - 1) return d - def _load_numpy_record_array(dset): - #return cPickle.loads(dset.value.encode('utf-8')) - return cPickle.loads(dset[()].encode('utf-8')) - def _load_str(dset): - #return str(dset.value) return str(dset[()]) def _load_unicode(dset): - #return dset.value.decode('utf-8') return dset[()].decode('utf-8') def _load_pickle(dset): #return cPickle.loads(dset.value) - return cPickle.loads(dset[()]) + return pickle.loads(dset[()]) + + def _load_numpy_record_array(dset): + d = dset[()] + if isinstance(d, str): + d = d.encode() + return pickle.loads(d) def _load(dset, depth, sl=None): dset_type = dset.attrs.get('type', None) + if isinstance(dset_type, bytes): + dset_type = dset_type.decode() # Treat groups as dicts if (dset_type is None) and (type(dset) is h5py.Group): @@ -488,8 +478,6 @@ def _load(dset, depth, sl=None): val = val[sl] elif dset_type == 'ordered_dict': val = _load_ordered_dict(dset, depth) - elif dset_type == 'record_array': - val = _load_numpy_record_array(dset) elif dset_type == 'array': val = _load_numpy(dset, sl) elif dset_type == 'arraylist': @@ -508,6 +496,8 @@ def _load(dset, depth, sl=None): val = _load_str(dset) if sl is not None: val = val[sl] + elif dset_type == 'record_array': + val = _load_numpy_record_array(dset) elif dset_type == 'unicode': val = _load_unicode(dset) if sl is not None: @@ -537,7 +527,7 @@ def _load(dset, depth, sl=None): try: f = h5py.File(filename, 'r') except: - print 'Error when opening file %s.' % filename + print('Error when opening file %s.' % filename) raise else: with f: @@ -546,12 +536,12 @@ def _load(dset, depth, sl=None): # print('Warning: this file does not seem to follow h5read format.') ctime = f.attrs.get('ctime', None) if ctime is not None: - logger.debug('File created : ' + ctime) + logger.debug('File created : ' + str(ctime)) if len(args) == 0: # no input arguments - load everything if slice is not None: raise RuntimeError('A variable name must be given when slicing.') - key_list = f.keys() + key_list = list(f.keys()) else: if (len(args) == 1) and (type(args[0]) is list): # input argument is a list of object names @@ -610,7 +600,7 @@ def _format_dict(d, key, dset, isParam=False): def _format_list(d, key, dset): stringout = ' ' * key[0] + ' * %s [list %d]:\n' % (key[1], len(dset)) if d > 0: - keys = dset.keys() + keys = list(dset.keys()) keys.sort() for k in keys: stringout += _format(d - 1, (key[0] + indent, ''), dset[k]) @@ -619,7 +609,7 @@ def _format_list(d, key, dset): def _format_tuple(key, dset): stringout = ' ' * key[0] + ' * %s [tuple]:\n' % key[1] if d > 0: - keys = dset.keys() + keys = list(dset.keys()) keys.sort() for k in keys: stringout += _format(d - 1, (key[0] + indent, ''), dset[k]) @@ -678,10 +668,6 @@ def _format_unicode(key, dset): stringout = ' ' * key[0] + ' * ' + key[1] + ' [unicode = "' + s + '"]\n' return stringout - def _format_pickle(key, dset): - stringout = ' ' * key[0] + ' * ' + key[1] + ' [pickled object]\n' - return stringout - def _format_None(key, dset): stringout = ' ' * key[0] + ' * ' + key[1] + ' [None]\n' return stringout @@ -719,8 +705,6 @@ def _format(d, key, dset): stringout = _format_scalar(key, dset) elif dset_type == 'None': stringout = _format_None(key, dset) - elif dset_type == 'pickle': - stringout = _format_pickle(dset) elif dset_type is None: stringout = _format_numpy(key, dset) else: @@ -735,12 +719,12 @@ def _format(d, key, dset): if ctime is not None: print('File created : ' + ctime) if not path.endswith('/'): path += '/' - key_list = f[path].keys() + key_list = list(f[path].keys()) outstring = '' for k in key_list: outstring += _format(depth, (0, k), f[path + k]) - print outstring + print(outstring) # return string if output variable passed as option if output != None: diff --git a/ptypy/io/image_read.py b/ptypy/io/image_read.py index ecf8d40ae..dddf9c69f 100644 --- a/ptypy/io/image_read.py +++ b/ptypy/io/image_read.py @@ -47,7 +47,7 @@ def image_read(filename, *args, **kwargs): elif ext == '.h5': h5_image = h5read(filename, *args, **kwargs) def look_for_ndarray(d): - for k, v in d.iteritems(): + for k, v in d.items(): if isinstance(v, np.ndarray): return k, v elif isinstance(v, dict): diff --git a/ptypy/io/image_read.py~ b/ptypy/io/image_read.py~ deleted file mode 100644 index 184343f5a..000000000 --- a/ptypy/io/image_read.py~ +++ /dev/null @@ -1,66 +0,0 @@ -import sys -if sys.platform!='win32': - import cbfIO - from cbfIO import cbfread -import numpy as np -from imageIO import imread -from edfIO import edfread -from rawIO import rawread - -__all__ = ['image_read'] - - -def image_read(filename, *args, **kwargs): - """\ - Attempts to import image data from any file. - """ - import os - import glob - - use_imread = True - special_format = ['.raw','.cbf', '.edf', '.h5'] - - if glob.has_magic(filename): - # Extra check in case the filename's extension is a wildcard - all_ext = set([os.path.splitext(f)[1].lower() for f in glob.glob(filename)]) - subset = all_ext.intersection(special_format) - if len(subset) == 1: - ext = subset.pop() - use_imread = False - filename = os.path.splitext(filename)[0] + ext - else: - ext = os.path.splitext(filename)[1].lower() - if ext in special_format: - use_imread = False - if use_imread: - return imread(filename, *args, **kwargs) - if ext == '.cbf': - return cbfread(filename, *args, **kwargs) - elif ext == '.edf': - return edfread(filename, *args, **kwargs) - elif ext == '.raw': - return rawread(filename, *args, **kwargs) - elif ext == '.h5': - h5_image = h5read(filename, *args, **kwargs) - def look_for_ndarray(d): - for k,v in d.iteritems(): - if type(v) is np.ndarray: - return k,v - elif type(v) is type({}): - out = look_for_ndarray(v) - if out is not None: return (k,) +out - else: pass - return None - if isinstance(h5_image, list): - h5_arrays = [] - h5_metas = [] - for h5s in h5_image: - h5a = look_for_ndarray(h5s) - h5_arrays.append(h5a[-1]) - h5_metas.append({'filename':filename, 'path': '/'.join(h5a[0:-1])}) - return h5_arrays, h5_metas - else: - h5_array = look_for_ndarray(h5_image) - return h5_array[-1], {'filename':filename, 'path': '/'.join(h5_array[0:-1])} - else: - raise RuntimeError('Unkown file type') diff --git a/ptypy/io/interaction.py b/ptypy/io/interaction.py index e2b19a436..884619588 100644 --- a/ptypy/io/interaction.py +++ b/ptypy/io/interaction.py @@ -10,14 +10,13 @@ :license: GPLv2, see LICENSE for details. """ -from __future__ import print_function import zmq import time import string import random import sys from threading import Thread, Event -import Queue +import queue import numpy as np import re import json @@ -33,7 +32,7 @@ # DEBUG = print def ID_generator(size=6, chars=string.ascii_uppercase + string.digits): - """\ + """ Generate a random ID string made of capital letters and digits. size [default=6] is the length of the string. """ @@ -41,7 +40,7 @@ def ID_generator(size=6, chars=string.ascii_uppercase + string.digits): def is_str(s): - """\ + """ Test if s behaves like a string. """ try: @@ -53,7 +52,7 @@ def is_str(s): class NumpyEncoder(json.JSONEncoder): - """\ + """ Custom JSON Encoder class that take out numpy arrays from a structure and replace them with a code string. """ @@ -88,7 +87,7 @@ def default(self, obj): def numpy_replace(obj, arraylist): - """\ + """ Takes an object decoded by JSON and replaces the arrays where they should be. (this function is recursive). """ @@ -99,7 +98,7 @@ def numpy_replace(obj, arraylist): return obj elif isinstance(obj, dict): newobj = {} - for k, v in obj.iteritems(): + for k, v in obj.items(): newobj[k] = numpy_replace(v, arraylist) return newobj elif isinstance(obj, list): @@ -109,7 +108,7 @@ def numpy_replace(obj, arraylist): def numpy_zmq_send(out_socket, obj): - """\ + """ Send the given object using JSON, taking care of numpy arrays. """ @@ -142,7 +141,7 @@ def numpy_zmq_send(out_socket, obj): def numpy_zmq_recv(in_socket): - """\ + """ Receive a JSON object, taking care of numpy arrays """ numpy_container = in_socket.recv_json() @@ -152,7 +151,7 @@ def numpy_zmq_recv(in_socket): arraylist = [] for arrayinfo in numpy_container['arraylist']: msg = in_socket.recv() - buf = buffer(msg) + buf = memoryview(msg) arraylist.append(np.frombuffer(buf, dtype=arrayinfo['dtype']).reshape(arrayinfo['shape'])) return numpy_replace(message, arraylist) else: @@ -279,7 +278,7 @@ def __init__(self, pars=None, **kwargs): self.pingtime = time.time() # Command queue - self.queue = Queue.Queue() + self.queue = queue.Queue() # Initialize flags to communicate state between threads. self._need_process = False @@ -304,7 +303,7 @@ def __init__(self, pars=None, **kwargs): def make_ID_pool(self): - port_range = range(self.port+1,self.port+self.p.connections+1) + port_range = list(range(self.port+1,self.port+self.p.connections+1)) # Initial ID pool IDlist = [] # This loop ensures all IDs are unique @@ -312,7 +311,7 @@ def make_ID_pool(self): newID = ID_generator() if newID not in IDlist: IDlist.append(newID) - self.ID_pool = zip(IDlist, port_range) + self.ID_pool = list(zip(IDlist, port_range)) def activate(self): """ @@ -449,7 +448,7 @@ def _checkping(self): if now - self.pingtime > self.pinginterval: # Time to check todisconnect = [] - for ID, lastping in self.pings.iteritems(): + for ID, lastping in self.pings.items(): if now - lastping > self.pingtimeout: # Timeout! Force disconnection todisconnect.append(ID) @@ -518,7 +517,7 @@ def _cmd_avail(self, ID, args): Send available objects. """ DEBUG('Processing an AVAIL command') - return {'status': 'ok', 'avail': self.objects.keys()} + return {'status': 'ok', 'avail': list(self.objects.keys())} def _cmd_ping(self, ID, args): """\ @@ -599,7 +598,7 @@ def _process(self): while True: try: q = self.queue.get_nowait() - except Queue.Empty: + except queue.Empty: break # Keep track of ticket number @@ -666,7 +665,7 @@ def register(self, obj, name): For now this is equivalent to Interactor.object[name] = obj, but maybe use weakref in the future? """ - if self.objects.has_key(name): + if name in self.objects: logger.debug('Warning an object called %s already there.' % name) self.objects[name] = obj @@ -694,7 +693,6 @@ class Client(object): """ Basic but complete client to interact with the server. - Defaults: [address] @@ -724,7 +722,7 @@ class Client(object): doc = Interval with which to check pings, in seconds. [connection_timeout] - default = 3600000. + default = 3600000.0 type = float help = Timeout for dead server doc = Timeout for dead server, in milliseconds. @@ -732,40 +730,12 @@ class Client(object): """ def __init__(self, pars=None, **kwargs): - """ - Parameters - ---------- - pars : dict or Param - Parameter set for the client, see :py:attr:`DEFAULT` - - Keyword Arguments - ----------------- - address : str - Primary address of the remote server. - - port : int - Primary port of the remote server. - - poll_timeout : float - Network polling interval (in milliseconds!). - - pinginterval : float - Interval to check pings (in seconds). - - """ p = self.DEFAULT.copy() p.update(pars) p.update(kwargs) self.p = p - """ - # sanity check for port range: - if str(p.port_range)==p.port_range: - from ptypy.utils import str2range - p.port_range = str2range(p.port_range) - """ - self.req_address = p.address self.req_port = p.port self.poll_timeout = p.poll_timeout @@ -843,7 +813,7 @@ def _run(self): fulladdress = self.bind_address + ':' + str(self.bind_port) self.bind_socket = self.context.socket(zmq.SUB) self.bind_socket.connect(fulladdress) - self.bind_socket.setsockopt(zmq.SUBSCRIBE, "") + self.bind_socket.setsockopt_string(zmq.SUBSCRIBE, "") # Initialize poller self.poller = zmq.Poller() @@ -864,7 +834,7 @@ def _run(self): self._listen() def _listen(self): - """\ + """ Main event loop (running on a thread). """ while not self._stopping: @@ -906,7 +876,7 @@ def _listen(self): self.connected = False def _ping(self): - """\ + """ Send a ping """ now = time.time() @@ -916,19 +886,19 @@ def _ping(self): return def _send(self, out_socket, obj): - """\ + """ Send the given object using JSON, taking care of numpy arrays. """ numpy_zmq_send(out_socket, obj) def _recv(self, in_socket): - """\ + """ Receive a JSON object, taking care of numpy arrays """ return numpy_zmq_recv(in_socket) def _read_message(self): - """\ + """ Read the message sent by the server and store the accompanying data if needed. """ @@ -966,7 +936,7 @@ def flush(self): self.datatag = {} def poll(self, ticket=None, tag=None): - """\ + """ Returns true if the transaction for a given ticket is completed. If ticket and tag are None, returns true only if no transaction is pending """ @@ -979,7 +949,7 @@ def poll(self, ticket=None, tag=None): return ticket in self.completed def wait(self, ticket=None, tag=None, timeout=None): - """\ + """ Blocks and return True only when the transaction for a given ticket is completed. If ticket is None, returns only when no more transaction are pending. If timeout is a positive number, wait will return False after timeout seconds if the ticket(s) @@ -1002,13 +972,13 @@ def wait(self, ticket=None, tag=None, timeout=None): return False def newdata(self, ticket): - """\ + """ Meant to be replaced, e.g. to send signals to a GUI. """ pass def unexpected_ticket(self, ticket): - """\ + """ Used to deal with warnings sent by the server. """ logger.debug(str(ticket) + ': ' + str(self.data[ticket])) @@ -1029,7 +999,7 @@ def stop(self): self._thread.join(3) def avail(self): - """\ + """ Queries the server for the name of objects available. ! Synchronous call ! """ @@ -1039,7 +1009,7 @@ def avail(self): return self.last_reply def do(self, execstr, timeout=0, tag=None): - """\ + """ Modify and object using an exec string. This function returns the "ticket number" which identifies the object once it will have been transmitted. If timeout > 0 and the requested object has @@ -1059,7 +1029,7 @@ def do(self, execstr, timeout=0, tag=None): return ticket def get(self, evalstr, timeout=0, tag=None): - """\ + """ Requests an object (or part of it) using an eval string. This function returns the "ticket number" which identifies the object once it will have been transmitted. If timeout > 0 and the requested object has @@ -1079,7 +1049,7 @@ def get(self, evalstr, timeout=0, tag=None): return ticket def set(self, varname, varvalue, timeout=0, tag=None): - """\ + """ Sets an object named varname to the value varvalue. """ ticket = self.masterticket + 1 diff --git a/ptypy/simulations/__init__.py b/ptypy/simulations/__init__.py index df088f532..f32e110dd 100644 --- a/ptypy/simulations/__init__.py +++ b/ptypy/simulations/__init__.py @@ -7,6 +7,6 @@ :copyright: Copyright 2014 by the PTYPY team, see AUTHORS. :license: GPLv2, see LICENSE for details. """ -from ptysim_utils import * -import detector -from simscan import SimScan +from .ptysim_utils import * +from . import detector +from .simscan import SimScan diff --git a/ptypy/simulations/detector.py b/ptypy/simulations/detector.py index 1d13357c6..05742785b 100644 --- a/ptypy/simulations/detector.py +++ b/ptypy/simulations/detector.py @@ -101,7 +101,7 @@ def __init__(self,pars=None): self.shape = expect2(self.shape) self._make_mask() if self.center is None: - self.center = expect2(self._mask.shape)/2 + self.center = expect2(self._mask.shape)//2 def _update(self,pars=None): if pars is not None: diff --git a/ptypy/simulations/simscan.py b/ptypy/simulations/simscan.py index 7ec3cf857..9e7ca414e 100644 --- a/ptypy/simulations/simscan.py +++ b/ptypy/simulations/simscan.py @@ -13,7 +13,7 @@ if __name__ == "__main__": from ptypy import utils as u - from detector import Detector, conv + from .detector import Detector, conv from ptypy.core.data import PtyScan from ptypy.core.ptycho import Ptycho from ptypy.core.manager import Full as ScanModel @@ -23,7 +23,7 @@ from ptypy import defaults_tree else: from .. import utils as u - from detector import Detector, conv + from .detector import Detector, conv from ..core.data import PtyScan from ..core.ptycho import Ptycho from ..core.manager import Full as ScanModel @@ -166,7 +166,7 @@ def __init__(self, pars=None, **kwargs): # Simulate diffraction signal logger.info('Propagating exit waves.') - for name,pod in P.pods.iteritems(): + for name,pod in P.pods.items(): if not pod.active: continue pod.diff += conv(u.abs2(pod.fw(pod.exit)), self.info.psf) @@ -185,7 +185,7 @@ def __init__(self, pars=None, **kwargs): self.pos = {} - ID,Sdiff = P.diff.S.items()[0] + ID,Sdiff = list(P.diff.S.items())[0] logger.info('Collecting simulated `raw` data.') for view in Sdiff.views: ind = view.layer diff --git a/ptypy/test/core_tests/classes_test.py b/ptypy/test/core_tests/classes_test.py index 56ac708fc..7aae2856f 100644 --- a/ptypy/test/core_tests/classes_test.py +++ b/ptypy/test/core_tests/classes_test.py @@ -30,10 +30,6 @@ :copyright: Copyright 2014 by the PTYPY team, see AUTHORS. :license: GPLv2, see LICENSE for details. """ -# Python 2/3 compatibility -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function # Modules import unittest @@ -182,7 +178,7 @@ def test_post_dict_import(self): def test_to_dict(self): """Extract information from container object and store in a dict""" self.assertListEqual( - self.basic_base._to_dict().keys(), + list(self.basic_base._to_dict().keys()), self.basic_base.__slots__, 'Converting container object information to dictionary failed.' ) @@ -713,32 +709,32 @@ def test__imul__(self): 'Testing __imul__ function failed.' ) - def test__idiv__(self): - """Testing __idiv__ function""" + def test__truediv__(self): + """Testing __truediv__ function""" # Test container case # Not ideal, as value needs to be added first for reasonable check self.basic_container_ifte.__iadd__(24) - self.basic_container_ifte.__idiv__(self.basic_container_iftf) + self.basic_container_ifte.__truediv__(self.basic_container_iftf) self.assertTrue( np.array_equal( self.basic_container_ifte.storages['S0000'].data, np.array([[[1. + 0.j]]]) ), - 'Testing __idiv__ function failed.' + 'Testing __truediv__ function failed.' ) # Test scalar case # Not ideal, as value needs to be added first for reasonable check self.basic_container_dpt.__iadd__(25) - self.basic_container_dpt.__idiv__(25) + self.basic_container_dpt.__truediv__(25) self.assertTrue( np.array_equal( self.basic_container_dpt.storages['S0000'].data, np.array([[[1. + 0.j]]]) ), - 'Testing __idiv__ function failed.' + 'Testing __truediv__ function failed.' ) def test__lshift__(self): diff --git a/ptypy/test/core_tests/core_test.py b/ptypy/test/core_tests/core_test.py index f17f78889..30f5dd7c6 100644 --- a/ptypy/test/core_tests/core_test.py +++ b/ptypy/test/core_tests/core_test.py @@ -29,7 +29,7 @@ class CoreTest(unittest.TestCase): data = S1[V1] V1.coord = (0.28, 0.28) S1.update_views(V1) - print V1.storageID + print(V1.storageID) mn = S1[V1].mean() S1.fill_value = mn S1.reformat() diff --git a/ptypy/test/core_tests/geometry_bragg_test.py b/ptypy/test/core_tests/geometry_bragg_test.py index 73fcab3c5..f81da33bd 100644 --- a/ptypy/test/core_tests/geometry_bragg_test.py +++ b/ptypy/test/core_tests/geometry_bragg_test.py @@ -31,7 +31,7 @@ def testViews(self): positions = np.array([np.arange(10), np.arange(10), np.arange(10)]).T for pos_ in positions: View(C, storageID='S0', psize=.2, coord=pos_, shape=12) - S = C.storages.values()[0] + S = list(C.storages.values())[0] S.reformat() cov = np.array(np.real(S.get_view_coverage()), dtype=int) # some numerical checks diff --git a/ptypy/test/core_tests/geometry_test.py b/ptypy/test/core_tests/geometry_test.py index 445505c0c..84e4c868d 100644 --- a/ptypy/test/core_tests/geometry_test.py +++ b/ptypy/test/core_tests/geometry_test.py @@ -29,11 +29,11 @@ def set_up_farfield(self): def test_geometry_farfield_init(self): G = self.set_up_farfield() - print G.resolution + print(G.resolution) def test_geometry_farfield_resolution(self): G = self.set_up_farfield() - print G.resolution + print(G.resolution) assert (np.round(G.resolution*1e5,2) == np.array([1.30, 1.30])).all(), "geometry resolution incorrect for the far-field" def set_up_nearfield(self): diff --git a/ptypy/test/io_tests/file_saving_test.py b/ptypy/test/io_tests/file_saving_test.py index aaaf3d358..f4642086f 100644 --- a/ptypy/test/io_tests/file_saving_test.py +++ b/ptypy/test/io_tests/file_saving_test.py @@ -145,7 +145,7 @@ def test_output_file_saving_separate_save_run_kind_minimal(self): PtychoOutput = tu.EngineTestRunner(engine_params,propagator='farfield', output_path=outpath, output_file=None) file_path = outpath + 'reconstruction.h5' - print "now I am saving with save_run" + print("now I am saving with save_run") PtychoOutput.save_run(file_path, kind='minimal') diff --git a/ptypy/test/io_tests/h5rw_load_test.py b/ptypy/test/io_tests/h5rw_load_test.py index bca0ce680..bdc3c7786 100644 --- a/ptypy/test/io_tests/h5rw_load_test.py +++ b/ptypy/test/io_tests/h5rw_load_test.py @@ -11,7 +11,6 @@ import ptypy.io as io import ptypy.utils as u import numpy as np -import cPickle import collections @@ -136,5 +135,4 @@ def test_pickle_unsupported(self): io.h5write(self.filepath % "load_pickle_test", content=content) out = io.h5read(self.filepath % "load_pickle_test", "content")["content"] np.testing.assert_equal(type(out['pickle data']), type(content['pickle data'])) - np.testing.assert_equal(out['pickle data'].__dict__, content['pickle data'].__dict__) - + np.testing.assert_equal(out['pickle data'].__dict__, content['pickle data'].__dict__) \ No newline at end of file diff --git a/ptypy/test/io_tests/h5rw_store_test.py b/ptypy/test/io_tests/h5rw_store_test.py index 37cc9e320..1d39c8c41 100644 --- a/ptypy/test/io_tests/h5rw_store_test.py +++ b/ptypy/test/io_tests/h5rw_store_test.py @@ -11,9 +11,10 @@ import ptypy.io as io import ptypy.utils as u import numpy as np -import cPickle import collections +class owntype(object): + pass class H5rwStoreTest(unittest.TestCase): @@ -56,7 +57,7 @@ def test_store_ordered_dict(self): data["flower"] = 2.0 data['an array'] = np.ones((3,3)) content = {'ordered dict data': data} - print self.filepath % "store_ordered_dict_test" + print(self.filepath % "store_ordered_dict_test") try: io.h5write(self.filepath % "store_ordered_dict_test", content=content) except: @@ -109,7 +110,6 @@ def test_store_numpy_record_array(self): try: io.h5write(self.filepath % "store_record_array_test", content=content) except: - raise self.fail(msg="Couldn't store a record array type") def test_store_scalar(self): @@ -138,11 +138,8 @@ def test_store_STR_CONVERT(self): pass def test_fail_unsupported(self): - class owntype: - pass def test_func(): - data = owntype() content = {'Owntype data': data} io.h5write(self.filepath % "store_dummytype_test", content=content) @@ -152,9 +149,6 @@ def test_func(): def test_ignore_unsupported(self): io.h5options['UNSUPPORTED'] = 'ignore' - class owntype: - pass - def test_func(): data = owntype() content = {'Owntype data': data} @@ -164,13 +158,9 @@ def test_func(): except: self.fail(msg="This should not have produced an exception!") - def test_pickle_unsupported(self): io.h5options['UNSUPPORTED'] = 'pickle' - class owntype: - pass - def test_func(): data = owntype() content = {'pickle data': data} @@ -181,3 +171,5 @@ def test_func(): except: self.fail(msg="This should not have produced an exception!") +if __name__=='__main__': + unittest.main() \ No newline at end of file diff --git a/ptypy/test/io_tests/load_run_test.py b/ptypy/test/io_tests/load_run_test.py index 76c70c4d9..48b6930af 100644 --- a/ptypy/test/io_tests/load_run_test.py +++ b/ptypy/test/io_tests/load_run_test.py @@ -86,23 +86,23 @@ def test_load_run(self): # set_vals = P.p._to_dict(Recursive=True) # print set_vals # file_vals = content.pars._to_dict(Recursive=True) - # for name, val in file_vals.iteritems(): + # for name, val in file_vals.items(): # self.assertEqual(file_vals[name], set_vals[name]) # self.assertDictEqual(content.pars._to_dict(Recursive=True), set_vals) np.random.seed(1) b = Ptycho.load_run(file_path) np.testing.assert_equal(type(b), type(Pcomp)) - for name, st in b.mask.storages.iteritems(): + for name, st in b.mask.storages.items(): np.testing.assert_equal(st.data, P.mask.storages[name].data) - for name, st in b.diff.storages.iteritems(): + for name, st in b.diff.storages.items(): np.testing.assert_equal(st.data, P.diff.storages[name].data) - for name, st in b.probe.storages.iteritems(): + for name, st in b.probe.storages.items(): np.testing.assert_equal(st.data, P.probe.storages[name].data) - for name, st in b.obj.storages.iteritems(): + for name, st in b.obj.storages.items(): np.testing.assert_equal(st.data, P.obj.storages[name].data) diff --git a/ptypy/test/util_tests/descriptor_test.py b/ptypy/test/util_tests/descriptor_test.py index fad1803ef..60633c785 100644 --- a/ptypy/test/util_tests/descriptor_test.py +++ b/ptypy/test/util_tests/descriptor_test.py @@ -592,4 +592,4 @@ def test_load_json(self): pass if __name__ == "__main__": - unittest.main() \ No newline at end of file + unittest.main() diff --git a/ptypy/utils/__init__.py b/ptypy/utils/__init__.py index 5dc473e5d..e8e8b13b0 100644 --- a/ptypy/utils/__init__.py +++ b/ptypy/utils/__init__.py @@ -13,7 +13,7 @@ from .scripts import * from .parameters import Param, asParam from .verbose import * -from citations import * +from .citations import * from . import descriptor from . import parallel from .. import __has_matplotlib__ as hmpl diff --git a/ptypy/utils/array_utils.py b/ptypy/utils/array_utils.py index e641fb87e..2f373aaea 100644 --- a/ptypy/utils/array_utils.py +++ b/ptypy/utils/array_utils.py @@ -99,7 +99,7 @@ def rebin_2d(A, rebin=1): rebin """ sh = np.asarray(A.shape[-2:]) - newdim = sh / rebin + newdim = sh // rebin if not (sh % rebin == 0).all(): raise ValueError('Last two axes %s of input array `A` cannot be binned by %s' % (str(tuple(sh)),str(rebin))) else: @@ -170,7 +170,7 @@ def rebin(a, *args,**kwargs): Returns ------- out : ndarray - Rebined array. + Rebinned array. Examples -------- @@ -185,13 +185,13 @@ def rebin(a, *args,**kwargs): """ shape = a.shape lenShape = a.ndim - factor = np.asarray(shape)/np.asarray(args) + factor = np.asarray(shape)//np.asarray(args) evList = ['a.reshape('] + \ ['args[%d],factor[%d],'%(i,i) for i in range(lenShape)] + \ [')'] + ['.sum(%d)'%(i+1) for i in range(lenShape)] + \ ['*( 1.'] + ['/factor[%d]'%i for i in range(lenShape)] + [')'] if kwargs.get('verbose',False): - print ''.join(evList) + print(''.join(evList)) return eval(''.join(evList)) def _confine(A): @@ -381,9 +381,9 @@ def fill3D(A,B,offset=[0,0,0]): Ao[Ao<0]=0 Bo = -off.copy() Bo[Bo<0]=0 - print Ao,Bo + print(Ao,Bo) if (Bo > Blim).any() or (Ao > Alim).any(): - print "misfit" + print("misfit") pass else: A[Ao[0]:min(off[0]+Blim[0],Alim[0]),Ao[1]:min(off[1]+Blim[1],Alim[1]),Ao[2]:min(off[2]+Blim[2],Alim[2])] \ @@ -573,7 +573,7 @@ def crop_pad_axis(A,hplanes,axis=-1,roll=0,fillpar=0.0, filltype='scalar'): """ if np.isscalar(hplanes): hplanes=int(hplanes) - r=np.abs(hplanes) / 2 * np.sign(hplanes) + r=np.abs(hplanes) // 2 * np.sign(hplanes) l=hplanes - r elif len(hplanes)==2: l=int(hplanes[0]) diff --git a/ptypy/utils/descriptor.py b/ptypy/utils/descriptor.py index 37aad58b4..e587a9e57 100644 --- a/ptypy/utils/descriptor.py +++ b/ptypy/utils/descriptor.py @@ -24,7 +24,7 @@ __all__ = ['Descriptor', 'ArgParseDescriptor', 'EvalDescriptor'] -class CODES: +class CODES(object): PASS = 1 FAIL = 0 UNKNOWN = 2 @@ -97,7 +97,7 @@ def __init__(self, name, parent=None, separator='.'): @property def option_keys(self): - return self._all_options.keys() + return list(self._all_options.keys()) @property def is_child(self): @@ -381,16 +381,16 @@ def load_conf_parser(self, fbuffer, **kwargs): Keyword arguments are forwarded to `ConfigParser.RawConfigParser` """ - from ConfigParser import RawConfigParser as Parser + from configparser import RawConfigParser as Parser #kwargs['empty_lines_in_values'] = True # This will only work in Python3 parser = Parser(**kwargs) - parser.readfp(fbuffer) + parser.read_file(fbuffer) for num, sec in enumerate(parser.sections()): desc = self.new_child(name=sec, options=dict(parser.items(sec))) return parser - def from_string(self, s, **kwargs): + def from_string(self, s, strict=False, **kwargs): """ Load Parameter from string using Python's ConfigParser @@ -399,9 +399,9 @@ def from_string(self, s, **kwargs): Keyword arguments are forwarded to `ConfigParser.RawConfigParser` """ - from StringIO import StringIO + from io import StringIO s = textwrap.dedent(s) - return self.load_conf_parser(StringIO(s), **kwargs) + return self.load_conf_parser(StringIO(s), strict=strict, **kwargs) def save_conf_parser(self, fbuffer, print_optional=True): """ @@ -410,7 +410,7 @@ def save_conf_parser(self, fbuffer, print_optional=True): Each parameter occupies its own section. Separator characters in sections names map to a tree-hierarchy. """ - from ConfigParser import RawConfigParser as Parser + from configparser import RawConfigParser as Parser parser = Parser() for name, desc in self.descendants: parser.add_section(name) @@ -425,8 +425,8 @@ def to_string(self): """ Return the full content of descriptor as a string in configparser format. """ - import StringIO - s = StringIO.StringIO() + import io + s = io.StringIO() self.save_conf_parser(s) return s.getvalue().strip() @@ -434,13 +434,13 @@ def __str__(self): """ Pretty-print the Parameter options in ConfigParser format. """ - from ConfigParser import RawConfigParser as Parser - import StringIO + from configparser import RawConfigParser as Parser + import io parser = Parser() parser.add_section(self.name) for k, v in self.options.items(): parser.set(self.name, k, v) - s = StringIO.StringIO() + s = io.StringIO() parser.write(s) return s.getvalue().strip() @@ -489,7 +489,7 @@ def eval(self, val): """ try: return ast.literal_eval(val) - except ValueError or SyntaxError as e: + except (ValueError, SyntaxError) as e: msg = e.args[0] + ". could not read %s for parameter %s" % (val, self.path) raise ValueError(msg) except SyntaxError as e: @@ -570,7 +570,7 @@ def __call__(self, parser, namespace, values, option_string=None): return CustomAction # Add all arguments - for argname, desc in ndesc.iteritems(): + for argname, desc in ndesc.items(): if desc.name in excludes or argname in groups: continue @@ -829,7 +829,7 @@ def _walk(self, depth=0, pars=None, ignore_symlinks=False, ignore_wildcards=Fals return # Detect wildcard - wildcard = (self.children.keys() == ['*']) + wildcard = (list(self.children.keys()) == ['*']) # Grab or check children if wildcard: @@ -914,11 +914,17 @@ def check(self, pars, depth=99): if (lowlim is None) or (path not in pars) or (pars[path] is None): out[path]['lowlim'] = CODES.PASS else: - out[path]['lowlim'] = CODES.PASS if (pars[path] >= lowlim) else CODES.FAIL + if hasattr(pars[path], "__iter__"): + out[path]['lowlim'] = CODES.PASS if all([(ix>= lowlim) for ix in pars[path]]) else CODES.FAIL + else: + out[path]['lowlim'] = CODES.PASS if (pars[path] >= lowlim) else CODES.FAIL if uplim is None or pars[path] is None: out[path]['uplim'] = CODES.PASS else: - out[path]['uplim'] = CODES.PASS if (pars[path] <= uplim) else CODES.FAIL + if hasattr(pars[path], "__iter__"): + out[path]['uplim'] = CODES.PASS if all([(ix <= uplim) for ix in pars[path]]) else CODES.FAIL + else: + out[path]['uplim'] = CODES.PASS if (pars[path] <= uplim) else CODES.FAIL elif res['status'] == 'wrongtype': # Wrong type out[path]['type'] = CODES.INVALID diff --git a/ptypy/utils/math_utils.py b/ptypy/utils/math_utils.py index 79a61fd1f..f35e58268 100644 --- a/ptypy/utils/math_utils.py +++ b/ptypy/utils/math_utils.py @@ -127,8 +127,8 @@ def gaussian2D(size, std_x=1.0, std_y=1.0, off_x=0.0, off_y=0.0): raise RuntimeError('Input size has to be integer.') y, x = np.mgrid[0:size, 0:size] - x = x - size / 2 - y = y - size / 2 + x = x - size // 2 + y = y - size // 2 xpart = (x - off_x)**2 / (2 * std_x**2) ypart = (y - off_y)**2 / (2 * std_y**2) return np.exp(-(xpart + ypart)) / (2 * np.pi * std_x * std_y) @@ -158,7 +158,7 @@ def delxf(a, axis=-1, out=None): Derived array. """ nd = a.ndim - axis = range(nd)[axis] + axis = list(range(nd))[axis] slice1 = [slice(1, None) if i == axis else slice(None) for i in range(nd)] slice2 = [slice(None, -1) if i == axis else slice(None) for i in range(nd)] @@ -199,7 +199,7 @@ def delxb(a, axis=-1): """ nd = a.ndim - axis = range(nd)[axis] + axis = list(range(nd))[axis] slice1 = [slice(1, None) if i == axis else slice(None) for i in range(nd)] slice2 = [slice(None, -1) if i == axis else slice(None) for i in range(nd)] b = np.zeros_like(a) @@ -229,7 +229,7 @@ def delxc(a,axis=-1): Derived array. """ nd = a.ndim - axis = range(nd)[axis] + axis = list(range(nd))[axis] slice_middle = [slice(1,-1) if i==axis else slice(None) for i in range(nd)] b = delxf(a, axis) + delxb(a, axis) b[slice_middle] *= 0.5 diff --git a/ptypy/utils/misc.py b/ptypy/utils/misc.py index 765fcdf7a..5ec395316 100644 --- a/ptypy/utils/misc.py +++ b/ptypy/utils/misc.py @@ -42,13 +42,13 @@ def __init__(self,dct,name='pods'): self._record_factory_from_dict(dct) def _record_factory_from_dict(self,dct,suffix='_record'): - self._record_factory = namedtuple(self._table_name+suffix,dct.keys()) - self._record_default = self._record_factory._make(dct.values()) + self._record_factory = namedtuple(self._table_name+suffix,list(dct.keys())) + self._record_default = self._record_factory._make(list(dct.values())) self._record_dtype = [np.array(v).dtype for v in self._record_default] def new_table(self, records = 0): r = self._record_default - dtype = zip(r._fields,self._record_dtype) + dtype = list(zip(r._fields,self._record_dtype)) self._table = np.array([tuple(self._record_default)] * records,dtype) def new_fields(self,**kwargs): @@ -69,15 +69,15 @@ def new_fields(self,**kwargs): def pull_records(self,record_ids=None): if record_ids is None: - return map(self._record_factory._make, self._table) + return list(map(self._record_factory._make, self._table)) else: - return map(self._record_factory._make, self._table[record_ids]) + return list(map(self._record_factory._make, self._table[record_ids])) def add_records(self,records): """ Add records at the end of the table. """ start = len(self._table) stop = len(records)+start - record_ids = range(start,stop) + record_ids = list(range(start,stop)) self._table.resize((len(self._table)+len(records),)) self._table[start:stop]=records @@ -93,7 +93,7 @@ def select_func(self,func,fields=None): Arguments to the function are selected by `fields`. The search function will always receive the record_id as first argument. """ - a = range(len(self._table)) + a = list(range(len(self._table))) if fields is None: res = [n for n in a if func(a)] else: @@ -178,7 +178,7 @@ def str2range(s): elif len(il)==3: start, stop, step = il - return range(start,stop,step) + return list(range(start,stop,step)) def str2int(A): """ diff --git a/ptypy/utils/parallel.py b/ptypy/utils/parallel.py index f58b4aebc..eaf9c4156 100644 --- a/ptypy/utils/parallel.py +++ b/ptypy/utils/parallel.py @@ -207,7 +207,7 @@ def allreduceC(c): -------- ptypy.utils.parallel.allreduce """ - for s in c.S.itervalues(): + for s in c.S.values(): allreduce(s.data) def _MPIop(a, op, axis=None): @@ -578,7 +578,7 @@ def gather_dict(dct, target=0): # your turn to send l = len(dct) comm.send(l, dest=target,tag=9999) - for k,v in dct.iteritems(): + for k,v in dct.items(): #print rank,str(k),v #send(k, dest=target) comm.send(k, dest=target,tag=9999) @@ -899,7 +899,7 @@ def _gather_dict(dct, target=0): # your turn to send l = len(dct) comm.send(l, dest=target) - for item in dct.iteritems(): + for item in dct.items(): send(item, dest=target) barrier() diff --git a/ptypy/utils/parameters.py b/ptypy/utils/parameters.py index d40efb0c0..adc6a4dcc 100644 --- a/ptypy/utils/parameters.py +++ b/ptypy/utils/parameters.py @@ -48,7 +48,7 @@ def __init__(self, __d__=None, **kwargs): self.update(kwargs) def __getstate__(self): - return self.__dict__.items() + return list(self.__dict__.items()) def __setstate__(self, items): for key, val in items: @@ -111,7 +111,7 @@ def copy(self, depth=0): """ d = Param(self) if depth > 0: - for k, v in d.iteritems(): + for k, v in d.items(): if isinstance(v, self.__class__): d[k] = v.copy(depth - 1) return d @@ -125,8 +125,7 @@ def __dir__(self): True. """ if self._display_items_as_attributes: - return self.keys() - # return [item.__dict__.get('name',str(key)) for key,item in self.iteritems()] + return list(self.keys()) else: return [] @@ -169,7 +168,7 @@ def _k_v_update(k, v): # If an element is itself a dict, convert it to Param if Convert and hasattr(v, 'keys') and not isinstance(v, self.__class__): v = Param(v) - v.update(v.items(), in_place_depth - 1, Convert) + v.update(list(v.items()), in_place_depth - 1, Convert) # new key if k not in self: @@ -195,7 +194,7 @@ def _k_v_update(k, v): if __d__ is not None: if hasattr(__d__, 'keys'): # Iterate through dict-like argument - for k, v in __d__.iteritems(): + for k, v in __d__.items(): _k_v_update(k, v) else: @@ -203,7 +202,7 @@ def _k_v_update(k, v): for (k, v) in __d__: _k_v_update(k, v) - for k, v in kwargs.iteritems(): + for k, v in kwargs.items(): _k_v_update(k, v) return None @@ -216,7 +215,7 @@ def _to_dict(self, Recursive=False): return dict(self) else: d = dict(self) - for k, v in d.iteritems(): + for k, v in d.items(): if isinstance(v, self.__class__): d[k] = v._to_dict(Recursive) return d @@ -240,7 +239,7 @@ def validate_standard_param(sp, p=None, prefix=None): """ if p is None: good = True - for k, v in sp.iteritems(): + for k, v in sp.items(): if k.startswith('_'): continue if type(v) == type(sp): pref = k if prefix is None else '.'.join([prefix, k]) @@ -250,15 +249,15 @@ def validate_standard_param(sp, p=None, prefix=None): try: a, b, c = v if prefix is not None: - print ' %s.%s = %s' % (prefix, k, str(v)) + print(' %s.%s = %s' % (prefix, k, str(v))) else: - print ' %s = %s' % (k, str(v)) + print(' %s = %s' % (k, str(v))) except: good = False if prefix is not None: - print '!!! %s.%s = %s <--- Incorrect' % (prefix, k, str(v)) + print('!!! %s.%s = %s <--- Incorrect' % (prefix, k, str(v))) else: - print '!!! %s = %s <--- Incorrect' % (k, str(v)) + print('!!! %s = %s <--- Incorrect' % (k, str(v))) return good else: @@ -271,8 +270,8 @@ def format_standard_param(p): """ lines = [] if not validate_standard_param(p): - print 'Standard parameter does not' - for k, v in p.iteritems(): + print('Standard parameter does not') + for k, v in p.items(): if k.startswith('_'): continue if type(v) == type(p): sublines = format_standard_param(v) diff --git a/ptypy/utils/plot_client.py b/ptypy/utils/plot_client.py index bd2e5fa01..60d860334 100644 --- a/ptypy/utils/plot_client.py +++ b/ptypy/utils/plot_client.py @@ -228,7 +228,7 @@ def _initialize(self): log(self.log_level,'Client ready') # Get the list of object IDs - ob_IDs = self.client.get_now("Ptycho.obj.S.keys()") + ob_IDs = self.client.get_now("list(Ptycho.obj.S.keys())") log(self.log_level,'1 object to plot.' if len(ob_IDs) == 1 else '%d objects to plot.' % len(ob_IDs)) # Prepare the data requests @@ -240,7 +240,7 @@ def _initialize(self): self.cmd_dct["Ptycho.obj.S['%s'].center" % str(ID)] = [None, S, 'center'] # Get the list of probe IDs - pr_IDs = self.client.get_now("Ptycho.probe.S.keys()") + pr_IDs = self.client.get_now("list(Ptycho.probe.S.keys())") log(self.log_level,'1 probe to plot.' if len(pr_IDs) == 1 else '%d probes to plot.' % len(pr_IDs)) # Prepare the data requests @@ -265,7 +265,7 @@ def _request_data(self): """ Request all data to the server (asynchronous). """ - for cmd, item in self.cmd_dct.iteritems(): + for cmd, item in self.cmd_dct.items(): item[0] = self.client.get(cmd) def _store_data(self): @@ -273,7 +273,7 @@ def _store_data(self): Transfer all data from the client to local attributes. """ with self._lock: - for cmd, item in self.cmd_dct.iteritems(): + for cmd, item in self.cmd_dct.items(): item[1][item[2]] = self.client.data[item[0]] # An extra step for the error. This should be handled differently at some point. # self.error = np.array([info['error'].sum(0) for info in self.runtime.iter_info]) @@ -361,7 +361,7 @@ def simplify_aspect_ratios(sh): if layers is None: layers = cont.data.shape[0] if np.isscalar(layers): - layers = range(layers) + layers = list(range(layers)) plot.layers = layers plot.axes_index = len(num_shape_list) num_shape = [len(layers)*len(plot.auto_display)+int(plot.local_error), sh] @@ -390,7 +390,7 @@ def simplify_aspect_ratios(sh): if layers is None: layers = cont.data.shape[0] if np.isscalar(layers): - layers = range(layers) + layers = list(range(layers)) plot.layers = layers plot.axes_index = len(num_shape_list) num_shape = [len(layers)*len(plot.auto_display), sh] @@ -402,10 +402,8 @@ def simplify_aspect_ratios(sh): w, h, l, r, b, t = self.p.gridspecpars gs.update(wspace=w*sy, hspace=h*sx, left=l, right=r, bottom=b, top=t) self.draw() - plot_fig.hold(False) for axes in axes_list: for pl in axes: - pl.hold(False) plt.setp(pl.get_xticklabels(), fontsize=8) plt.setp(pl.get_yticklabels(), fontsize=8) self.plot_fig = plot_fig @@ -506,10 +504,9 @@ def plot_error(self): err_fmag = error[:, 0] err_phot = error[:, 1] err_exit = error[:, 2] - axis.hold(False) + axis.clear() fmag = err_fmag/np.max(err_fmag) axis.plot(fmag, label='err_fmag %2.2f%% of %.2e' % (fmag[-1]*100, np.max(err_fmag))) - axis.hold(True) phot = err_phot/np.max(err_phot) axis.plot(phot, label='err_phot %2.2f%% of %.2e' % (phot[-1]*100, np.max(err_phot))) ex = err_exit/np.max(err_exit) @@ -749,9 +746,9 @@ def plot_shrinkwrap(self): def plot_object(self): - data = self.ob.values()[0]['data'][0] - center = self.ob.values()[0]['center'] - psize = self.ob.values()[0]['psize'] + data = list(self.ob.values())[0]['data'][0] + center = list(self.ob.values())[0]['center'] + psize = list(self.ob.values())[0]['psize'] lims_r3 = (-center[0] * psize[0], (data.shape[0] - center[0]) * psize[0]) lims_r1 = (-center[1] * psize[1], (data.shape[1] - center[1]) * psize[1]) lims_r2 = (-center[2] * psize[2], (data.shape[2] - center[2]) * psize[2]) @@ -842,7 +839,7 @@ def spawn_MPLClient(client_pars, autoplot_pars, home=None): except KeyboardInterrupt: pass finally: - print 'Stopping plot client...' + print('Stopping plot client...') mplc.pc.stop() if __name__ =='__main__': diff --git a/ptypy/utils/plot_utils.py b/ptypy/utils/plot_utils.py index 5a5a0974d..08002dd57 100644 --- a/ptypy/utils/plot_utils.py +++ b/ptypy/utils/plot_utils.py @@ -69,10 +69,10 @@ def __init__(self, timeout, message): def run(self): sys.stdout.flush() if self.timeout < 0: - raw_input(self.message) + input(self.message) else: if self.message is not None: - print self.message + print(self.message) time.sleep(self.timeout) self.ct = False @@ -122,10 +122,10 @@ def pause(timeout=-1, message=None): if timeout < 0: if message is None: message = 'Paused. Hit return to continue.' - raw_input(message) + input(message) else: if message is not None: - print message + print(message) time.sleep(timeout) # FIXME: Is this still needed? @@ -743,9 +743,9 @@ def _update(self,renew_image=False): # determine number of points. v, h = self.shape steps = [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 1500, 2000, 3000] - Nindex = steps[max([v / s <= 4 for s in steps].index(True) - 1, 0)] + Nindex = steps[max([v // s <= 4 for s in steps].index(True) - 1, 0)] self.ax.yaxis.set_major_locator(mpl.ticker.IndexLocator(Nindex, 0.5)) - Nindex = steps[max([h / s <= 4 for s in steps].index(True) - 1, 0)] + Nindex = steps[max([h // s <= 4 for s in steps].index(True) - 1, 0)] self.ax.xaxis.set_major_locator(mpl.ticker.IndexLocator(Nindex, 0.5)) else: self.ax.images[0].set_data(pilim) diff --git a/ptypy/utils/scripts.py b/ptypy/utils/scripts.py index 19e763236..afbf74c53 100644 --- a/ptypy/utils/scripts.py +++ b/ptypy/utils/scripts.py @@ -7,8 +7,8 @@ :license: GPLv2, see LICENSE for details. """ import numpy as np -import parallel -import urllib2 # TODO: make compatible with python 3.5 +from . import parallel +import urllib.request, urllib.error, urllib.parse from scipy import ndimage as ndi from . import array_utils as au @@ -364,7 +364,7 @@ def png2mpg(listoffiles, framefile='frames.txt', fps=5, bitrate=2000, try: os.remove(line) except OSError: - print OSError + print(OSError) print('Removing %s failed .. continuing' % line) continue nff.close() @@ -463,7 +463,7 @@ def rectint(x, a, b): z = ind[1] + 1j*ind[0] spokeint, spokestep = np.linspace(0.0 * np.pi, 1.0 * np.pi, - spokes / 2, + spokes // 2, False, True) spokeint += spokestep / 2 @@ -557,7 +557,7 @@ def radial_distribution(A, radii=None): """ if radii is None: - radii = range(1, np.min(A.shape) / 2) + radii = list(range(1, np.min(A.shape) // 2)) coords = np.indices(A.shape) - np.reshape(mass_center(A), (A.ndim,) + A.ndim * (1,)) @@ -607,7 +607,7 @@ def stxm_analysis(storage, probe=None): t2 = 0. # Pick a single probe view for preparation purpose: v = s.views[0] - pp = v.pods.values()[0].pr_view + pp = list(v.pods.values())[0].pr_view if probe is None: pr = np.abs(pp.data) # .sum(0) elif np.isscalar(probe): @@ -618,7 +618,7 @@ def stxm_analysis(storage, probe=None): assert (pr.shape == pp.shape[-2:]), 'stxm probe has not the same shape as a view to this storage' for v in s.views: - pod = v.pods.values()[0] + pod = list(v.pods.values())[0] if not pod.active: continue t = pod.diff.sum() @@ -632,7 +632,7 @@ def stxm_analysis(storage, probe=None): m = mass_center(pod.diff) # + 1. q = pod.di_view.storage._to_phys(m) dpc_row[ss] += q[0] * v.psize[0] * pr * 2 * np.pi / pod.geometry.lz - dpc_col[ss] += q[1] * v.psize[1] * pr * 2 * np.pi/pod.geometry.lz + dpc_col[ss] += q[1] * v.psize[1] * pr * 2 * np.pi / pod.geometry.lz trans[ss] += np.sqrt(t) * pr nrm[ss] += pr @@ -697,7 +697,7 @@ def load_from_ptyr(filename, what='probe', ID=None, layer=None): else: address = 'content/' + str(what) conti = io.h5read(filename, address)[address] - storage = conti.values()[0] + storage = list(conti.values())[0] if layer is None: return storage['data'] else: @@ -794,15 +794,16 @@ def cxro_iref(formula, energy, density=-1, npts=100): url = cxro_iref.cxro_server + '/cgi-bin/getdb.pl' #u.logger.info('Querying CRXO database...') - req = urllib2.Request(url, data) - response = urllib2.urlopen(req) - t = response.read() + data = data.encode("utf-8") + req = urllib.request.Request(url) + response = urllib.request.urlopen(req, data=data) + t = response.read().decode() datafile = t[t.find('/tmp/'):].split('"')[0] url = cxro_iref.cxro_server + datafile - req = urllib2.Request(url) - response = urllib2.urlopen(req) - data = response.read() + req = urllib.request.Request(url) + response = urllib.request.urlopen(req) + data = response.read().decode() d = data.split('\n') dt = np.array([[float(x) for x in dd.split()] for dd in d[2:] if dd]) diff --git a/ptypy/utils/verbose.py b/ptypy/utils/verbose.py index dbb556a0d..6e6ae300c 100644 --- a/ptypy/utils/verbose.py +++ b/ptypy/utils/verbose.py @@ -4,7 +4,7 @@ Use as: from verbose import logger -logger.warn('This is a warning') +logger.warning('This is a warning') logger.info('This is an information') ... @@ -151,14 +151,14 @@ def _(label, value): def headerline(info='',align = 'c',fill='-'): li = len(info) if li>=60: - return headerline(info[li/2:],align,fill)+'\n'+headerline(info[:li/2],align,fill) + return headerline(info[li//2:],align,fill)+'\n'+headerline(info[:li//2],align,fill) else: if li != 0: li+=2 info = ' '+info+' ' empty = LINEMAX-li if align=='c': - left = empty/2 + left = empty // 2 right = empty-left elif align=='l': left = 4 @@ -196,7 +196,7 @@ def _format_dict(label, level, obj): header+= ' %s(%d)' % (extra,len(obj)) + hn if level <= depth: #level +=1 - for k,v in obj.iteritems(): + for k,v in obj.items(): header += _format(k,level+1,v) return header @@ -236,7 +236,7 @@ def _format_None(key,level, obj): return _(key, level, obj)[0] + ' None\n' def _format(key,level, obj): - if hasattr(obj,'iteritems'): + if hasattr(obj,'items'): stringout = _format_dict(key,level, obj) elif type(obj) is np.ndarray: stringout = _format_numpy(key,level, obj) diff --git a/release_notes.md b/release_notes.md index bac602b98..a028abaf1 100644 --- a/release_notes.md +++ b/release_notes.md @@ -1,4 +1,16 @@ -# Ptypy 0.3 release notes +# PtyPy 0.4 release notes + +After quite some work we announce ptypy 0.4. Apart from including all the fixes and improvements from 0.3.0 to 0.3.1, it includes two bigger changes + 1. Ptypy has now been converted to python 3 and will be **python 3 only** in future. The python 2 version will not be actively maintained anymore, we keep a branch for it for a while but we don't expect to put in many fixes and certainly not anny new features. Team work by Julio, Alex, Bjoern and Aaron. + *Please note: all branches that haven’t been converted to python 3 by the end of 2019 will most likely be removed during 2020.* Please rebase your effort on version 0.4. If you need help rebasing your efforts, please let us know soon. + 2. Position correction is now supported in most engines. It has been implemented by Wilhelm Eschen following the annealing approach introduced by A.M. Maiden et al. (Ultramicroscopy, Volume 120, 2012, Pages 64-72). Bjoern, Benedikt and Aaron helped refine and test it. + +## Roadmap + +The next release will focus on scalability for HPC applications and GPU acceleration. + + +# PtyPy 0.3 release notes We are happy to announce that ptypy 0.3 is now out. If you have been using the ptypy 0.2 (from the master branch), the transition should be smooth but far from automatic - see below. The essence of this new release is 1. a redesign of ptypy's internal structure, especially the introduction of an extendable [`ScanModel`](https://github.com/ptycho/ptypy/blob/master/ptypy/core/manager.py), which should make new ideas and new algorithms easier to implement (a big collective effort involving A. Björling, A. Parsons, B. Enders and P. Thibault), diff --git a/scripts/ptypy.csv2cp b/scripts/ptypy.csv2cp index 2f9eb071b..19b9890ba 100644 --- a/scripts/ptypy.csv2cp +++ b/scripts/ptypy.csv2cp @@ -31,7 +31,7 @@ def print_param(entry, parent=None, depth=50): else: entry_name = '' if entry.children and depth > 0: - for childname, child in entry.children.iteritems(): + for childname, child in entry.children.items(): print_param(child, entry_name, depth=depth-1) return diff --git a/scripts/ptypy.inspect b/scripts/ptypy.inspect index 59b6da59d..da57c0bf8 100644 --- a/scripts/ptypy.inspect +++ b/scripts/ptypy.inspect @@ -1,20 +1,19 @@ #!/usr/bin/env python2 -import sys +from ptypy import utils as u +from ptypy import io import argparse parser = argparse.ArgumentParser(description='Shows a summary of the content of h5 compatible file (.ptyr,.ptyd) in terminal') parser.add_argument('h5file', type=str, help='path to hdf5 compatible file') -parser.add_argument('-p','--path',dest='path', type=str, help='path within that hdf5 compatible file',default='/') -parser.add_argument('--report', dest='report', action='store_true' , - help='use ptypy.utils.verbose.report instead of ptypy.io.h5info (it will load everything to ram).') -parser.add_argument('-d','--max-depth', dest='depth', type=int , - help='maximum depth for inspection (not implemented yet)') -args=parser.parse_args() -from ptypy import utils as u -from ptypy import io +parser.add_argument('-p', '--path', dest='path', type=str, help='path within that hdf5 compatible file', default='/') +parser.add_argument('--report', dest='report', action='store_true', + help='use ptypy.utils.verbose.report instead of ptypy.io.h5info (it will load everything to ram).') +parser.add_argument('-d', '--max-depth', dest='depth', type=int, + help='maximum depth for inspection (not implemented yet)') +args = parser.parse_args() if args.report: - print u.verbose.report(io.h5read(args.h5file,args.path,depth=args.depth).values()[0],noheader=True) + print(u.verbose.report(list(io.h5read(args.h5file, args.path, depth=args.depth).values())[0], noheader=True)) else: - io.h5info(args.h5file,args.path, depth=args.depth) + io.h5info(args.h5file, args.path, depth=args.depth) diff --git a/scripts/ptypy.plot b/scripts/ptypy.plot index 6f2ca4cc9..a644695aa 100644 --- a/scripts/ptypy.plot +++ b/scripts/ptypy.plot @@ -33,7 +33,7 @@ header = io.h5read(filename,'header')['header'] if str(header['kind']) == 'fullflat': raise NotImplementedError('Loading specific data from flattened dump not yet supported') else: - content = io.h5read(filename,'content').values()[0]#['content'] + content = list(io.h5read(filename,'content').values())[0]#['content'] runtime = content['runtime'] probes = u.Param() probes.update(content['probe'], Convert = True) diff --git a/scripts/ptypy.run b/scripts/ptypy.run index f2fd78eaf..4726e36b9 100644 --- a/scripts/ptypy.run +++ b/scripts/ptypy.run @@ -50,7 +50,7 @@ def _byteify(data, ignore_dicts = False): if isinstance(data, dict) and not ignore_dicts: return { _byteify(key, ignore_dicts=True): _byteify(value, ignore_dicts=True) - for key, value in data.iteritems() + for key, value in data.items() } # if it's anything else, return it in its original form return data diff --git a/setup.py b/setup.py index 216c9f497..8973af37b 100644 --- a/setup.py +++ b/setup.py @@ -13,8 +13,8 @@ """ MAJOR = 0 -MINOR = 3 -MICRO = 1 +MINOR = 4 +MICRO = 0 ISRELEASED = True VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO) @@ -50,6 +50,7 @@ def write_version_py(filename='ptypy/version.py'): if __name__ == '__main__': write_version_py() + write_version_py('doc/version.py') try: execfile('ptypy/version.py') vers = version @@ -61,7 +62,7 @@ def write_version_py(filename='ptypy/version.py'): version=VERSION, author='Pierre Thibault, Bjoern Enders, Martin Dierolf and others', description='Ptychographic reconstruction toolbox', - long_description=file('README.rst', 'r').read(), + long_description=open('README.rst', 'r').read(), #install_requires = ['numpy>=1.8',\ #'h5py>=2.2',\ #'matplotlib>=1.3',\ diff --git a/templates/bragg_field_of_view.py b/templates/bragg_field_of_view.py index e8a9ed19e..4376e6ebc 100644 --- a/templates/bragg_field_of_view.py +++ b/templates/bragg_field_of_view.py @@ -41,10 +41,10 @@ S_true = P.model.scans['scan01'].ptyscan.simulated_object # We can grab the object storage from the Ptycho instance -S = P.obj.storages.values()[0] +S = list(P.obj.storages.values())[0] # Similarly, we can find a view of the probe -probeView = P.probe.views.values()[0] +probeView = list(P.probe.views.values())[0] # Let's define an object view to study objView = S.views[1] @@ -64,7 +64,7 @@ # coordinate system, which is good but hard to understand. We can # convert to orthogonal (z, x, y) space by using a method on the # geometry object, found from any of the pods. -geo = P.pods.values()[0].geometry +geo = list(P.pods.values())[0].geometry S_display_cart = geo.coordinate_shift(S_display, input_system='natural', input_space='real', keep_dims=True) # Plot some slices diff --git a/templates/minimal_prep_and_run_ML_Poisson.py b/templates/minimal_prep_and_run_ML_Poisson.py index f7c6ae227..374622217 100644 --- a/templates/minimal_prep_and_run_ML_Poisson.py +++ b/templates/minimal_prep_and_run_ML_Poisson.py @@ -19,7 +19,7 @@ p.io.home = "/tmp/ptypy/" p.io.autosave = None p.io.autoplot = u.Param() -p.io.autoplot.active = False +p.io.autoplot.active = True # max 100 frames (128x128px) of diffraction data p.scans = u.Param() diff --git a/templates/minimal_prep_and_run_probe_from_array.py b/templates/minimal_prep_and_run_probe_from_array.py index 33f9ae910..b089bc443 100644 --- a/templates/minimal_prep_and_run_probe_from_array.py +++ b/templates/minimal_prep_and_run_probe_from_array.py @@ -31,7 +31,7 @@ p.scans.MF.data.save = None p.scans.MF.illumination = u.Param() p.scans.MF.illumination.model = np.random.rand(*u.expect2(p.scans.MF.data.shape)) -print type(p.scans.MF.illumination.model) +print(type(p.scans.MF.illumination.model)) # position distance in fraction of illumination frame p.scans.MF.data.density = 0.2 # total number of photon in empty beam diff --git a/templates/on_the_fly_ptyd.py b/templates/on_the_fly_ptyd.py index 23aed062f..4752e2bf9 100644 --- a/templates/on_the_fly_ptyd.py +++ b/templates/on_the_fly_ptyd.py @@ -33,7 +33,7 @@ data.orientation = None # optionally validate the parameter tree -u.descriptor.defaults_tree['scandata.MoonFlowerScan'].validate(data) +ptypy.defaults_tree['scandata.MoonFlowerScan'].validate(data) # create PtyScan instance MF = ptypy.core.data.MoonFlowerScan(data) diff --git a/templates/pars_few_alldoc.py b/templates/pars_few_alldoc.py index 3457e499f..10f160397 100644 --- a/templates/pars_few_alldoc.py +++ b/templates/pars_few_alldoc.py @@ -10,7 +10,7 @@ ### Ptypy parameter tree ### -p = Param() +p = u.Param() # Verbosity level diff --git a/templates/position_refinement.py b/templates/position_refinement.py index 9e8d16b8e..2395c8ffb 100644 --- a/templates/position_refinement.py +++ b/templates/position_refinement.py @@ -56,7 +56,7 @@ a = 0. coords = [] -for pname, pod in P.pods.iteritems(): +for pname, pod in P.pods.items(): # Save real position coords.append(np.copy(pod.ob_view.coord)) before = pod.ob_view.coord diff --git a/templates/probe_sharing.py b/templates/probe_sharing.py index 17b93101d..caa2a4dd6 100644 --- a/templates/probe_sharing.py +++ b/templates/probe_sharing.py @@ -81,7 +81,7 @@ def make_sample(outpath): P = Ptycho(p,level=3) -s1, s2 = P.probe.storages.values() +s1, s2 = list(P.probe.storages.values()) # Transfer views for v in s2.views: v.storage = s1 diff --git a/templates/ptypy_laser_logo_focussed_632nm.py b/templates/ptypy_laser_logo_focussed_632nm.py index fc2dd15b7..937265217 100644 --- a/templates/ptypy_laser_logo_focussed_632nm.py +++ b/templates/ptypy_laser_logo_focussed_632nm.py @@ -4,6 +4,7 @@ import ptypy.simulations as sim import numpy as np + ### PTYCHO PARAMETERS p = u.Param() p.verbose_level = 3 @@ -42,8 +43,9 @@ sim.illumination.propagation.parallel = 0.03 sim.illumination.propagation.spot_size = None +ptypy_path = ptypy.__file__.strip('ptypy.__init__.py') sim.sample = u.Param() -sim.sample.model = -u.rgb2complex(u.imload('../resources/ptypy_logo_1M.png')[::-1,:,:-1]) +sim.sample.model = -u.rgb2complex(u.imload('%s/resources/ptypy_logo_1M.png' % ptypy_path)[::-1,:,:-1]) sim.sample.process = u.Param() sim.sample.process.offset = (0,0) sim.sample.process.zoom = 0.5 diff --git a/tutorial/bragg3d_initial.py b/tutorial/bragg3d_initial.py index 5f480cd71..fdf0e3dcd 100644 --- a/tutorial/bragg3d_initial.py +++ b/tutorial/bragg3d_initial.py @@ -25,7 +25,7 @@ # three dimensions. The first element of the shape is the number of # rocking curve positions, the first element of the psize denotes theta # step in degrees. -print g +print(g) # Set up scan positions along y, perpendicular to the incoming beam and # to the thin layer stripes. @@ -170,7 +170,7 @@ scaling.data[:] = 1 / scaling.data # then iterate with the appropriate update rule for i in range(100): - print i + print(i) criterion_ = 0.0 obj_error_ = 0.0 for j in range(len(views)): @@ -206,7 +206,7 @@ errors = [] ferrors = [] for i in range(10): - print i + print(i) ferrors_ = [] for j in range(len(views)): exit_ = views[j].data * probeView.data @@ -255,7 +255,7 @@ # iterate for i in range(100): - print i + print(i) ferrors_ = [] # fourier update, updates all the exit waves for j in range(len(views)): @@ -286,4 +286,4 @@ plt.draw() plt.pause(.01) -plt.show() \ No newline at end of file +plt.show() diff --git a/tutorial/minimal_script.py b/tutorial/minimal_script.py index 94cfc33ca..34492b115 100644 --- a/tutorial/minimal_script.py +++ b/tutorial/minimal_script.py @@ -65,14 +65,6 @@ # ptychographic scan. # PtyPy is designed to support reconstruction from mutliple scans. -# The ``scan`` branch of the tree holds all *common* parameters for scans -# and acts as a default template when there are more than one scan. -# Scan-specific parameters have to be placed in another branch called ``scans``. -# If there is only one scan, parameters can be given in either branch. -# In this tutorial we do not bother to enter parameters here so we leave the branch empty -# (It will be filled with the defaults of :py:data:`.scan` ) . -p.scan = u.Param() - # Each individual scan is represented by a branch in ``scans``. The parameters # in these branches are those that differ from the *defaults* in the ``scan`` # branch mentioned above. diff --git a/tutorial/ownengine.py b/tutorial/ownengine.py index a363ed6f3..5e3da091d 100644 --- a/tutorial/ownengine.py +++ b/tutorial/ownengine.py @@ -40,7 +40,7 @@ P = ptypy.core.Ptycho(p, level=2) # A quick look at the diffraction data -diff_storage = P.diff.storages.values()[0] +diff_storage = list(P.diff.storages.values())[0] fig = u.plot_storage(diff_storage, 0, slices=(slice(2), slice(None), slice(None)), modulus='log') fig.savefig('ownengine_%d.png' % fig.number, dpi=300) # Plot of simulated diffraction data for the first two positions. @@ -51,8 +51,8 @@ # Probe and object are not so exciting to look at for now. As default, # probes are initialized with an aperture like support. -probe_storage = P.probe.storages.values()[0] -fig = u.plot_storage(P.probe.S.values()[0], 1) +probe_storage = list(P.probe.storages.values())[0] +fig = u.plot_storage(list(P.probe.S.values())[0], 1) fig.savefig('ownengine_%d.png' % fig.number, dpi=300) # Plot of the starting guess for the probe. @@ -70,7 +70,7 @@ def fourier_update(pods): import numpy as np - pod = pods.values()[0] + pod = list(pods.values())[0] # Get Magnitude and Mask mask = pod.mask modulus = np.sqrt(np.abs(pod.diff)) @@ -79,13 +79,13 @@ def fourier_update(pods): err = 0. Dphi = {} # Propagate the exit waves - for gamma, pod in pods.iteritems(): + for gamma, pod in pods.items(): Dphi[gamma] = pod.fw(2*pod.probe*pod.object - pod.exit) Imodel += np.abs(Dphi[gamma] * Dphi[gamma].conj()) # Calculate common correction factor factor = (1-mask) + mask * modulus / (np.sqrt(Imodel) + 1e-10) # Apply correction and propagate back - for gamma, pod in pods.iteritems(): + for gamma, pod in pods.items(): df = pod.bw(factor*Dphi[gamma]) - pod.probe*pod.object pod.exit += df err += np.mean(np.abs(df*df.conj())) @@ -99,7 +99,7 @@ def probe_update(probe, norm, pods, fill=0.): """ probe *= fill norm << fill + 1e-10 - for name, pod in pods.iteritems(): + for name, pod in pods.items(): if not pod.active: continue probe[pod.pr_view] += pod.object.conj() * pod.exit norm[pod.pr_view] += pod.object * pod.object.conj() @@ -115,7 +115,7 @@ def object_update(obj, norm, pods, fill=0.): """ obj *= fill norm << fill + 1e-10 - for pod in pods.itervalues(): + for pod in pods.values(): if not pod.active: continue pod.object += pod.probe.conj() * pod.exit norm[pod.ob_view] += pod.probe * pod.probe.conj() @@ -131,7 +131,7 @@ def iterate(Ptycho, num): for i in range(num): err = 0 # fourier update - for di_view in Ptycho.diff.V.itervalues(): + for di_view in Ptycho.diff.V.values(): if not di_view.active: continue err += fourier_update(di_view.pods) # probe update @@ -140,7 +140,7 @@ def iterate(Ptycho, num): object_update(Ptycho.obj, obj_norm, Ptycho.pods) # print error errors.append(err) - if i % 3==0: print err + if i % 3==0: print(err) # cleanup P.obj.delete_copy() P.probe.delete_copy() @@ -153,14 +153,14 @@ def iterate(Ptycho, num): # We note that the error (here only displayed for 3 iterations) is # already declining. That is a good sign. # Let us have a look how the probe has developed. -fig = u.plot_storage(P.probe.S.values()[0], 2) +fig = u.plot_storage(list(P.probe.S.values())[0], 2) fig.savefig('ownengine_%d.png' % fig.number, dpi=300) # Plot of the reconstructed probe after 9 iterations. We observe that # the actaul illumination of the sample must be larger than the initial # guess. # Looks like the probe is on a good way. How about the object? -fig = u.plot_storage(P.obj.S.values()[0], 3, slices='0,120:-120,120:-120') +fig = u.plot_storage(list(P.obj.S.values())[0], 3, slices='0,120:-120,120:-120') fig.savefig('ownengine_%d.png' % fig.number, dpi=300) # Plot of the reconstructed object after 9 iterations. It is not quite # clear what object is reconstructed @@ -170,13 +170,13 @@ def iterate(Ptycho, num): # Error is still on a steady descent. Let us look at the final # reconstructed probe and object. -fig = u.plot_storage(P.probe.S.values()[0], 4) +fig = u.plot_storage(list(P.probe.S.values())[0], 4) fig.savefig('ownengine_%d.png' % fig.number, dpi=300) # Plot of the reconstructed probe after a total of 45 iterations. # It's a moon ! -fig = u.plot_storage(P.obj.S.values()[0], 5, slices='0,120:-120,120:-120') +fig = u.plot_storage(list(P.obj.S.values())[0], 5, slices='0,120:-120,120:-120') fig.savefig('ownengine_%d.png' % fig.number, dpi=300) # Plot of the reconstructed object after a total of 45 iterations. # It's a bunch of flowers ! diff --git a/tutorial/ptypyclasses.py b/tutorial/ptypyclasses.py index e37b6a629..40ae7d941 100644 --- a/tutorial/ptypyclasses.py +++ b/tutorial/ptypyclasses.py @@ -59,15 +59,15 @@ # As we haven't specified an ID the Container class picks one for ``S1`` # In this case that will be ``S0000`` where the *S* refers to the class type. -print S1.ID +print(S1.ID) # Let's have a look at what kind of data Storage holds. -print S1.formatted_report()[0] +print(S1.formatted_report()[0]) # Apart from the ID on the left we discover a few other entries, for # example the quantity ``psize`` which refers to the physical pixel size # for the last two dimensions of the stored data. -print S1.psize +print(S1.psize) # Many attributes of a :any:`Storage` are in fact *properties*. Changing # their value may have an impact on other methods or attributes of the @@ -76,19 +76,19 @@ # that creates coordinate grids for the last two dimensions (see also # :py:func:`ptypy.utils.array_utils.grids`) y, x = S1.grids() -print y -print x +print(y) +print(x) # which are coordinate grids for vertical and horizontal axes respectively. # We note that these coordinates have their center at -print S1.center +print(S1.center) # Now we change a few properties. For example, S1.center = (2, 2) S1.psize = 0.1 g = S1.grids() -print g[0] -print g[1] +print(g[0]) +print(g[1]) # We observe that the center has in fact moved one pixel up and one left. # The :py:func:`~ptypy.core.classes.Storage.center` property uses pixel @@ -98,15 +98,15 @@ # which shifts the center to a new position. S1.origin -= 0.12 y, x = S1.grids() -print y -print x -print S1.center +print(y) +print(x) +print(S1.center) # Up until now our actual *data* numpy array located at ``S1.data`` is # still filled with ones, i.e. flat. We can use # :any:`Storage.fill` to fill that container with an array. S1.fill(x+y) -print S1.data +print(S1.data) # We can have visual check on the data using # :py:func:`~ptypy.utils.plot_utils.plot_storage` @@ -122,7 +122,7 @@ # :any:`View` instance. The View invocation is a bit more complex. from ptypy.core.classes import DEFAULT_ACCESSRULE ar = DEFAULT_ACCESSRULE.copy() -print ar +print(ar) # Let's say we want a 4x4 view on Storage ``S1`` around the origin. # We set @@ -137,35 +137,35 @@ V1 = View(C1, ID=None, accessrule=ar) # We see that a number of the accessrule items appear in the View now. -print V1.shape -print V1.coord -print V1.storageID +print(V1.shape) +print(V1.coord) +print(V1.storageID) # A few others were set by the automatic update of Storage ``S1``. -print V1.psize -print V1.storage +print(V1.psize) +print(V1.storage) # The update also set new attributes of the View which all start with # a lower ``d`` and are locally cached information about data access. -print V1.dlayer, V1.dlow, V1.dhigh +print(V1.dlayer, V1.dlow, V1.dhigh) # Finally, we can retrieve the data subset # by applying the View to the storage. data = S1[V1] -print data +print(data) # It does not matter if we apply the View to Storage ``S1`` or the # container ``C1``, or use the View internal # View.\ :py:meth:`~ptypy.core.classes.View.data` property. -print np.allclose(data, C1[V1]) -print np.allclose(data, V1.data) +print(np.allclose(data, C1[V1])) +print(np.allclose(data, V1.data)) # The first access yielded a similar result because the # :py:attr:`~ptypy.core.classes.View.storageID` ``S0000`` is in ``C1`` # and the second acces method worked because it uses the View's # :py:attr:`~ptypy.core.classes.View.storage` attribute. -print V1.storage is S1 -print V1.storageID in C1.S.keys() +print(V1.storage is S1) +print(V1.storageID in C1.S.keys()) # We observe that the coordinate [0.0,0.0] is not part of the grid # in S1 anymore. Consequently, the View was put as close to [0.0,0.0] @@ -174,7 +174,7 @@ # :py:meth:`~ptypy.core.classes.View.dcoord` is the closest data coordinate. # The difference is held by :py:meth:`~ptypy.core.classes.View.sp` such # that a subpixel correction may be applied if needed (future release) -print V1.dcoord, V1.pcoord, V1.sp +print(V1.dcoord, V1.pcoord, V1.sp) # .. note:: # Please note that we cannot guarantee any API stability for other @@ -187,32 +187,32 @@ # unwanted feedback loops. V1.coord = (0.08, 0.08) S1.update_views(V1) -print V1.dcoord, V1.pcoord, V1.sp +print(V1.dcoord, V1.pcoord, V1.sp) # We observe that the high range limit of the View is close to the border # of the data buffer. -print V1.dhigh +print(V1.dhigh) # What happens if we push the coordinate further? V1.coord = (0.28, 0.28) S1.update_views(V1) -print V1.dhigh +print(V1.dhigh) # Now the higher range limit of the View is off bounds for sure. # Applying this View to the Storage may lead to undesired behavior, i.e. # array concatenation or data access errors. -print S1[V1] -print S1[V1].shape, V1.shape +print(S1[V1]) +print(S1[V1].shape, V1.shape) # One important feature of the :any:`Storage` class is that it can detect # all out-of-bounds accesses and reformat the data buffer accordingly. # A simple call to # *Storage*.\ :py:meth:`~ptypy.core.classes.Storage.reformat` should do. -print S1.shape +print(S1.shape) mn = S1[V1].mean() S1.fill_value = mn S1.reformat() -print S1.shape +print(S1.shape) # Oh no, the Storage data buffer has shrunk! But don't worry, that is # intended behavior. A call to *.reformat()* crops and pads the data @@ -226,7 +226,7 @@ V2 = View(C1, ID=None, accessrule=ar2) S1.fill_value = 0. S1.reformat() -print S1.shape +print(S1.shape) # Ok, we note that the buffer has grown in size. Now, we give the new # View some copied values of the other view to make the View appear @@ -262,6 +262,6 @@ V3=View(C1, ID=None, accessrule=ar) # Finally we have a look at the mischief we managed so far. -print C1.formatted_report() +print(C1.formatted_report()) diff --git a/tutorial/scipt2rst.py b/tutorial/scipt2rst.py index 8fc2fb9bd..4c0f26b14 100644 --- a/tutorial/scipt2rst.py +++ b/tutorial/scipt2rst.py @@ -1,12 +1,12 @@ import sys -import StringIO +import io import contextlib """ @contextlib.contextmanager def stdoutIO(stdout=None): old = sys.stdout if stdout is None: - stdout = StringIO.StringIO() + stdout = io.StringIO() sys.stdout = stdout yield stdout sys.stdout = old diff --git a/tutorial/simupod.py b/tutorial/simupod.py index 627ded6aa..c1bab0ccc 100644 --- a/tutorial/simupod.py +++ b/tutorial/simupod.py @@ -58,11 +58,11 @@ class Base2(Base): # example, we find forward and backward propagators at ``G.propagator.fw`` # and ``G.propagator.bw``. It has also calculated the appropriate # pixel size in the sample plane (aka resolution), -print G.resolution +print(G.resolution) # which sets the shifting frame to be of the following size: fsize = G.shape * G.resolution -print "%.2fx%.2fmm" % tuple(fsize*1e3) +print("%.2fx%.2fmm" % tuple(fsize*1e3)) # Create probing illumination # --------------------------- @@ -111,7 +111,7 @@ class Base2(Base): # photons to 1 billion for pp in [pr, pr2, pr3]: pp.data *= np.sqrt(1e9/np.sum(pp.data*pp.data.conj())) -print u.norm2(pr.data) +print(u.norm2(pr.data)) # and we quickly check if the propagation works. ill = pr.data[0] @@ -164,7 +164,7 @@ class Base2(Base): # include all Views. Conveniently, this can be initiated from the top # with Container.\ :py:meth:`~ptypy.core.classes.Container.reformat` P.obj.reformat() -print P.obj.formatted_report() +print(P.obj.formatted_report()) # At last we fill the object Storage ``S00`` with a complex transmission. # Again there is a convenience transmission function in the resources @@ -179,8 +179,8 @@ class Base2(Base): # A single coherent propagation in |ptypy| is represented by # an instance of the :py:class:`~ptypy.core.classes.POD` class. -print POD.__doc__ -print POD.__init__.__doc__ +print(POD.__doc__) +print(POD.__init__.__doc__) # For creating a single POD we need a # :py:class:`~ptypy.core.classes.View` to *probe*, *object*, @@ -193,7 +193,7 @@ class Base2(Base): P.mask = Container(P, 'Cmask', data_type='real') # We start with one POD and its views. -objviews = P.obj.views.values() +objviews = list(P.obj.views.values()) obview = objviews[0] # We construct the probe View. @@ -237,7 +237,7 @@ class Base2(Base): # The result of the calculation above is stored in the appropriate # storage of ``P.exit``. # Therefore we can use this command to plot the result. -exit_storage = P.exit.storages.values()[0] +exit_storage = list(P.exit.storages.values())[0] fig = u.plot_storage(exit_storage, 6) fig.savefig('%s_%d.png' % (scriptname, fig.number), dpi=300) # Simulated exit wave using a pod @@ -246,7 +246,7 @@ class Base2(Base): pod.diff = np.abs(pod.fw(pod.exit))**2 # The result is stored in the diffraction container. -diff_storage = P.diff.storages.values()[0] +diff_storage = list(P.diff.storages.values())[0] fig = u.plot_storage(diff_storage, 7, modulus='log') fig.savefig('%s_%d.png' % (scriptname, fig.number), dpi=300) diff --git a/tutorial/subclassptyscan.py b/tutorial/subclassptyscan.py index a592129ae..b26c98eed 100644 --- a/tutorial/subclassptyscan.py +++ b/tutorial/subclassptyscan.py @@ -204,7 +204,7 @@ def load_positions(self): # The last step is to overwrite the actual loading of data. # Loading happens (MPI-compatible) in # :py:meth:`~ptypy.core.data.PtyScan.load` -print PtyScan.load.__doc__ +print(PtyScan.load.__doc__) # Load seems a bit more complex than ``self.load_positions`` for its # return values. However, we can opt-out of providing weights (masks) @@ -281,8 +281,8 @@ def load(self, indices): # with :py:func:`ptypy.utils.verbose.report`. The information is # concatenated, but the length of iterables or dicts is always indicated # in parantheses. -print u.verbose.report(NPS.auto(80), noheader=True) -print u.verbose.report(NPS.auto(80), noheader=True) +print(u.verbose.report(NPS.auto(80), noheader=True)) +print(u.verbose.report(NPS.auto(80), noheader=True)) # We observe the second chunk was not 80 frames deep but 34 # as we only had 114 frames of data. @@ -302,5 +302,5 @@ def load(self, indices): # We can analyse the saved ``npy.ptyd`` with # :py:func:`~ptypy.io.h5IO.h5info` from ptypy.io import h5info -print h5info(NPS.info.dfile) +print(h5info(NPS.info.dfile))