diff --git a/.pylintrc b/.pylintrc index 6bb0238..f7eb5b1 100644 --- a/.pylintrc +++ b/.pylintrc @@ -63,87 +63,12 @@ confidence= # --enable=similarities". If you want to run only the classes checker, but have # no Warning level messages displayed, use "--disable=all --enable=classes # --disable=W". -disable=print-statement, - parameter-unpacking, - unpacking-in-except, - old-raise-syntax, - backtick, - long-suffix, - old-ne-operator, - old-octal-literal, - import-star-module-level, - non-ascii-bytes-literal, - raw-checker-failed, - bad-inline-option, - locally-disabled, - file-ignored, +disable=file-ignored, suppressed-message, useless-suppression, - deprecated-pragma, use-symbolic-message-instead, - apply-builtin, - basestring-builtin, - buffer-builtin, - cmp-builtin, - coerce-builtin, - execfile-builtin, - file-builtin, - long-builtin, - raw_input-builtin, - reduce-builtin, - standarderror-builtin, - unicode-builtin, - xrange-builtin, - coerce-method, - delslice-method, - getslice-method, - setslice-method, - no-absolute-import, - old-division, - dict-iter-method, - dict-view-method, - next-method-called, - metaclass-assignment, - indexing-exception, - raising-string, - reload-builtin, - oct-method, - hex-method, - nonzero-method, - cmp-method, - input-builtin, - round-builtin, - intern-builtin, - unichr-builtin, - map-builtin-not-iterating, - zip-builtin-not-iterating, - range-builtin-not-iterating, - filter-builtin-not-iterating, - using-cmp-argument, - eq-without-hash, - div-method, - idiv-method, - rdiv-method, - exception-message-attribute, - invalid-str-codec, - sys-max-int, - bad-python3-import, - deprecated-string-function, - deprecated-str-translate-call, - deprecated-itertools-function, - deprecated-types-field, - next-method-defined, - dict-items-not-iterating, - dict-keys-not-iterating, - dict-values-not-iterating, - deprecated-operator-function, - deprecated-urllib-function, - xreadlines-attribute, - deprecated-sys-function, - exception-escape, - comprehension-escape, - fixme, # disabled as TODOs would show up as warnings - logging-fstring-interpolation # fstrings inside logging + fixme, # disabled as TODOs would show up as warnings + logging-fstring-interpolation # fstrings inside logging # Enable the message, report, category or checker with the given id(s). You can # either give multiple identifier separated by comma (,) or put this option @@ -331,12 +256,13 @@ max-line-length=100 # Maximum number of lines in a module. max-module-lines=1000 +; Pylint 2.6: The no-space-check option has been removed, # List of optional constructs for which whitespace checking is disabled. `dict- # separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}. # `trailing-comma` allows a space between comma and closing bracket: (a, ). # `empty-line` allows space-only lines. -no-space-check=trailing-comma, - dict-separator +; no-space-check=trailing-comma, +; dict-separator # Allow the body of a class to be on the same line as the declaration if body # contains single statement. diff --git a/.readthedocs.yml b/.readthedocs.yml new file mode 100644 index 0000000..ed1d30b --- /dev/null +++ b/.readthedocs.yml @@ -0,0 +1,32 @@ +# .readthedocs.yml +# Read the Docs configuration file for Sphinx projects +# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details + +# Required +version: 2 + +# Set the OS, Python version and other tools you might need +build: + os: ubuntu-22.04 + tools: + python: "3.8" + +# Build documentation in the docs/source/ directory with Sphinx +sphinx: + configuration: docs/source/conf.py + +# Optionally build your docs in additional formats such as PDF and ePub +# formats: +# - pdf + +# Optionally set the version of Python and requirements required to build your docs +# See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html +python: + version: 3.8 + install: + - requirements: requirements.txt + - path: . + # - method: pip + # path: . + # extra_requirements: + # - docs \ No newline at end of file diff --git a/_tutorial_notebooks/1A Startup_example_hfss_files/startup_example_script.py b/_tutorial_notebooks/1A Startup_example_hfss_files/startup_example_script.py index 0467682..ab3e408 100644 --- a/_tutorial_notebooks/1A Startup_example_hfss_files/startup_example_script.py +++ b/_tutorial_notebooks/1A Startup_example_hfss_files/startup_example_script.py @@ -15,24 +15,37 @@ from pyEPR import ProjectInfo, DistributedAnalysis, QuantumAnalysis # 1. Project and design. Open link to HFSS controls. -project_info = ProjectInfo('c:/sims', - project_name = 'two_qubit_one_cavity', # Project file name (string). "None" will get the current active one. - design_name = 'Alice_Bob' # Design name (string). "None" will get the current active one. - ) +project_info = ProjectInfo( + "c:/sims", + project_name="two_qubit_one_cavity", # Project file name (string). "None" will get the current active one. + design_name="Alice_Bob", # Design name (string). "None" will get the current active one. +) # 2a. Junctions. Specify junctions in HFSS model -project_info.junctions['jAlice'] = {'Lj_variable':'LJAlice', 'rect':'qubitAlice', 'line': 'alice_line', 'length':0.0001} -project_info.junctions['jBob'] = {'Lj_variable':'LJBob', 'rect':'qubitBob', 'line': 'bob_line', 'length':0.0001} +project_info.junctions["jAlice"] = { + "Lj_variable": "LJAlice", + "rect": "qubitAlice", + "line": "alice_line", + "length": 0.0001, +} +project_info.junctions["jBob"] = { + "Lj_variable": "LJBob", + "rect": "qubitBob", + "line": "bob_line", + "length": 0.0001, +} # 2b. Dissipative elements. -project_info.dissipative['dielectrics_bulk'] = ['si_substrate'] # supply names here, there are more options in project_info.dissipative. -project_info.dissipative['dielectric_surfaces'] = ['interface'] +project_info.dissipative["dielectrics_bulk"] = [ + "si_substrate" +] # supply names here, there are more options in project_info.dissipative. +project_info.dissipative["dielectric_surfaces"] = ["interface"] # 3. Run analysis epr_hfss = DistributedAnalysis(project_info) epr_hfss.do_EPR_analysis() # 4. Hamiltonian analysis -epr = QuantumAnalysis(epr_hfss.data_filename) -epr.analyze_all_variations(cos_trunc = 8, fock_trunc = 7) -epr.plot_hamiltonian_results() \ No newline at end of file +epr = QuantumAnalysis(epr_hfss.data_filename) +epr.analyze_all_variations(cos_trunc=8, fock_trunc=7) +epr.plot_hamiltonian_results() diff --git a/pyEPR/__config_user_old.py b/pyEPR/__config_user_old.py index 1e90320..4f7fe1d 100644 --- a/pyEPR/__config_user_old.py +++ b/pyEPR/__config_user_old.py @@ -12,50 +12,40 @@ from . import Dict config = Dict( - # Folder to save result data to. # PLEASE CHANGE THIS - root_dir=r'C:\data-pyEPR', - + root_dir=r"C:\data-pyEPR", # Loss properties of various materials and surfaces dissipation=Dict( - ################################################## # Bulk dielectric # refs: https://arxiv.org/abs/1308.1743 # http://arxiv.org/pdf/1509.01854.pdf tan_delta_sapp=1e-6, # tan(delta) for bulk surface - epsi=10, # dielectric - + epsi=10, # dielectric ################################################## # Surface dielectric # ref: http://arxiv.org/pdf/1509.01854.pdf - # Surface dielectric (dirt) thickness # units: meters th=3e-9, - # Surface dielectric (dirt) constant # units: relative permittivity eps_r=10, - # Surface dielectric (dirt) loss tangent # units: unitless, since this is tan(delta) tan_delta_surf=1e-3, - ################################################## # Thin-film surface loss # units: Ohms # ref: https://arxiv.org/abs/1308.1743 surface_Rs=250e-9, - ################################################## # Seam current loss # units: per Ohm meter; i.e., seam conductance # ref: http://arxiv.org/pdf/1509.01119.pdf gseam=1.0e3, ), - ansys=Dict( # method_calc_P_mj sets the method used to calculate the participation ratio in eigenmode. # Valid values: @@ -66,16 +56,14 @@ # Current is integrated average of J_surf by default: (zkm 3/29/16) # Will calculate the Pj matrix for the selected modes for the given junctions # junc_rect array & length of junctions - method_calc_P_mj='line_voltage', - + method_calc_P_mj="line_voltage", ), - plotting=Dict( # Default color map for plotting. Better if made into a string name # taken from matplotlib.cm - default_color_map='viridis', # pylint: disable=no-member + default_color_map="viridis", # pylint: disable=no-member ), ) -__all__ = ['config'] +__all__ = ["config"] diff --git a/pyEPR/__init__.py b/pyEPR/__init__.py index 34253b7..27349a4 100644 --- a/pyEPR/__init__.py +++ b/pyEPR/__init__.py @@ -68,7 +68,7 @@ # pylint: disable= wrong-import-position, invalid-name # Compatibility with python 2.7 and 3 -#from __future__ import division, print_function, absolute_import +# from __future__ import division, print_function, absolute_import import logging import warnings @@ -82,14 +82,18 @@ __author__ = "Zlatko Minev, Zaki Leghas, and the pyEPR team" __copyright__ = "Copyright 2015-2020, pyEPR team" __credits__ = [ - "Zlatko Minev", "Zaki Leghtas,", "Phil Rheinhold", "Asaf Diringer", - "Will Livingston", "Steven Touzard" + "Zlatko Minev", + "Zaki Leghtas,", + "Phil Rheinhold", + "Asaf Diringer", + "Will Livingston", + "Steven Touzard", ] __license__ = "BSD-3-Clause" __version__ = "0.9.0" __maintainer__ = "Zlatko K. Minev and Asaf Diringer" __email__ = "zlatko.minev@aya.yale.edu" -__url__ = r'https://github.com/zlatko-minev/pyEPR' +__url__ = r"https://github.com/zlatko-minev/pyEPR" __status__ = "Dev-Production" ############################################################################## @@ -100,9 +104,10 @@ ############################################################################## # Set up logging -- only on first loading of module, not on reloading. -logger = logging.getLogger('pyEPR') # singleton +logger = logging.getLogger("pyEPR") # singleton if not len(logger.handlers): from .toolbox._logging import set_up_logger + set_up_logger(logger) del set_up_logger @@ -112,13 +117,15 @@ try: import pandas as pd - warnings.filterwarnings('ignore', - category=pd.io.pytables.PerformanceWarning) + + warnings.filterwarnings("ignore", category=pd.io.pytables.PerformanceWarning) del pd except (ImportError, ModuleNotFoundError): if config.internal.warn_missing_import: - logger.warning("IMPORT WARNING: `pandas` python package not found. %s", - config.internal.error_msg_missing_import) + logger.warning( + "IMPORT WARNING: `pandas` python package not found. %s", + config.internal.error_msg_missing_import, + ) # Check for a few usually troublesome packages if config.internal.warn_missing_import: @@ -126,16 +133,20 @@ # Check for qutip try: import qutip + del qutip except (ImportError, ModuleNotFoundError): logger.warning( """IMPORT WARNING: `qutip` package not found. Numerical diagonalization will not work. Please install, e.g.: $ conda install -c conda-forge qutip - %s""", config.internal.error_msg_missing_import) + %s""", + config.internal.error_msg_missing_import, + ) try: import pythoncom + del pythoncom except (ImportError, ModuleNotFoundError): logger.warning( @@ -144,27 +155,34 @@ It is used in communicating with HFSS on PCs. If you wish to do this, please set it up. For Linux, check the HFSS python linux files for the com module used. It is equivalent, and can be used just as well. - %s""", config.internal.error_msg_missing_import) + %s""", + config.internal.error_msg_missing_import, + ) try: from win32com.client import Dispatch, CDispatch + del Dispatch del CDispatch except (ImportError, ModuleNotFoundError): logger.warning( """IMPORT WARNING: Could not load from 'win32com.client'. The communication to hfss won't work. If you want to use it, you need to set it up. - %s""", config.internal.error_msg_missing_import) + %s""", + config.internal.error_msg_missing_import, + ) try: import pint # units + del pint except (ImportError, ModuleNotFoundError): logger.error( """IMPORT ERROR: Python package 'pint' could not be loaded. It is used in communicating with HFSS. Try: $ conda install -c conda-forge pint \n%s""", - config.internal.error_msg_missing_import) + config.internal.error_msg_missing_import, + ) # remove unused del Path, warnings, logging @@ -178,25 +196,31 @@ from . import core from .ansys import parse_units, parse_units_user, parse_entry -from .core import ProjectInfo, DistributedAnalysis, QuantumAnalysis,\ - Project_Info, pyEPR_HFSSAnalysis, pyEPR_Analysis # names to be deprecated +from .core import ( + ProjectInfo, + DistributedAnalysis, + QuantumAnalysis, + Project_Info, + pyEPR_HFSSAnalysis, + pyEPR_Analysis, +) # names to be deprecated __all__ = [ - 'logger', - 'config', - 'toolbox', - 'calcs', - 'ansys', - 'core', - 'ProjectInfo', - 'DistributedAnalysis', - 'QuantumAnalysis', - 'Project_Info', - 'pyEPR_HFSSAnalysis', - 'pyEPR_Analysis', # names to be deprecated - 'parse_units', - 'parse_units_user', - 'parse_entry' + "logger", + "config", + "toolbox", + "calcs", + "ansys", + "core", + "ProjectInfo", + "DistributedAnalysis", + "QuantumAnalysis", + "Project_Info", + "pyEPR_HFSSAnalysis", + "pyEPR_Analysis", # names to be deprecated + "parse_units", + "parse_units_user", + "parse_entry", ] # TODO: Add "about" method. Add to tutorial diff --git a/pyEPR/_config_default.py b/pyEPR/_config_default.py index 9438672..a0915cd 100644 --- a/pyEPR/_config_default.py +++ b/pyEPR/_config_default.py @@ -15,15 +15,13 @@ from . import Dict # If we are reloading the package, then config will already be defined, then do not overwrite it. -__config_defined__ = 'config' in locals() +__config_defined__ = "config" in locals() -config = Dict( # pylint: disable=invalid-name - +config = Dict( # pylint: disable=invalid-name # Folder to save result data to. - root_dir=r'C:\data-pyEPR', - save_format=r'%Y-%m-%d %H-%M-%S', - + root_dir=r"C:\data-pyEPR", + save_format=r"%Y-%m-%d %H-%M-%S", ansys=Dict( # method_calc_P_mj sets the method used to calculate the participation ratio in eigenmode. # Valid values: @@ -34,14 +32,11 @@ # Current is integrated average of J_surf by default: (zkm 3/29/16) # Will calculate the Pj matrix for the selected modes for the given junctions # junc_rect array & length of junctions - method_calc_P_mj='line_voltage', - + method_calc_P_mj="line_voltage", # To save or not the mesh statistics from an HFSS run save_mesh_stats=True, ), - - epr = Dict( - + epr=Dict( # Define the participation renormalization method # False : no extra renormalization to enforce # can be more problematic for large pj, when sim isn't well converged @@ -50,82 +45,64 @@ # 2 : use enforcement of U_J_total to be U_mode-U_H (i.e., 1) # only when the total participation is above a certain threshold # preferred method. - renorm_pj = 2, + renorm_pj=2, ), - # Loss properties of various materials and surfaces dissipation=Dict( - ################################################## # Bulk dielectric # refs: https://arxiv.org/abs/1308.1743 # http://arxiv.org/pdf/1509.01854.pdf tan_delta_sapp=1e-6, # tan(delta) for bulk surface - epsi=10, # dielectric - + epsi=10, # dielectric ################################################## # Surface dielectric # ref: http://arxiv.org/pdf/1509.01854.pdf - # Surface dielectric (dirt) thickness # units: meters th=3e-9, - # Surface dielectric (dirt) constant # units: relative permittivity eps_r=10, - # Surface dielectric (dirt) loss tangent # units: unitless, since this is tan(delta) tan_delta_surf=1e-3, - ################################################## # Thin-film surface loss # units: Ohms # ref: https://arxiv.org/abs/1308.1743 surface_Rs=250e-9, - ################################################## # Seam current loss # units: per Ohm meter; i.e., seam conductance # ref: http://arxiv.org/pdf/1509.01119.pdf gseam=1.0e3, ), - plotting=Dict( # Default color map for plotting. Better if made into a string name # taken from matplotlib.cm - default_color_map='viridis', # pylint: disable=no-member + default_color_map="viridis", # pylint: disable=no-member ), - # Not to be used by the user. Just internal internal=Dict( - # Are we using ipython ipython=None, - # Error message for loading packages error_msg_missing_import="""\N{face with head-bandage} If you need a part of pyEPR that uses this package, then please install it. Then add it to the system path (if needed). See online setup instructions at http://www.github.com/zlatko-minev/pyEPR""", - # Warn on missing import warn_missing_import=False, ), - # Logging log=Dict( - # '%(name)s - %(levelname)s - %(message)s\n ::%(pathname)s:%(lineno)d: %(funcName)s\n') - format='%(levelname)s %(asctime)s [%(funcName)s]: %(message)s', - - datefmt='%I:%M%p', #'%I:%M%p %Ss' - - level='INFO' - ) - + format="%(levelname)s %(asctime)s [%(funcName)s]: %(message)s", + datefmt="%I:%M%p", #'%I:%M%p %Ss' + level="INFO", + ), ) @@ -142,7 +119,7 @@ def is_using_ipython(): return False -def update_recursive(d:collections.abc.Mapping, u:collections.abc.Mapping): +def update_recursive(d: collections.abc.Mapping, u: collections.abc.Mapping): """Recursive update of dictionaries. Arguments: @@ -159,6 +136,7 @@ def update_recursive(d:collections.abc.Mapping, u:collections.abc.Mapping): d[k] = v return d + def get_config(): """Returns the config pointer. @@ -172,15 +150,16 @@ def get_config(): Dict : the config dictionary """ if __config_defined__: - #print('Config is already defined.') # not sure we ever make it here + # print('Config is already defined.') # not sure we ever make it here return config else: # Config is only loaded for the first time, set it up. - #print('First time load of config') + # print('First time load of config') # Update with user config from . import _config_user + _config = update_recursive(config, _config_user.config) # Add to config any bootup params @@ -189,4 +168,4 @@ def get_config(): return config -__all__ = ['get_config'] +__all__ = ["get_config"] diff --git a/pyEPR/_config_user.py b/pyEPR/_config_user.py index 932bcd9..035b9c7 100644 --- a/pyEPR/_config_user.py +++ b/pyEPR/_config_user.py @@ -17,67 +17,48 @@ from . import Dict -config = Dict( # pylint: disable=invalid-name - +config = Dict( # pylint: disable=invalid-name # Folder to save result data to. # PLEASE CHANGE THIS - root_dir=r'C:\data-pyEPR', # Not all machines have a D drive so substituting D with C here - + root_dir=r"C:\data-pyEPR", # Not all machines have a D drive so substituting D with C here # Loss properties of various materials and surfaces dissipation=Dict( - ################################################## # Bulk dielectric # refs: https://arxiv.org/abs/1308.1743 # http://arxiv.org/pdf/1509.01854.pdf tan_delta_sapp=1e-6, # tan(delta) for bulk surface - epsi=10, # dielectric - + epsi=10, # dielectric ################################################## # Surface dielectric # ref: http://arxiv.org/pdf/1509.01854.pdf - # Surface dielectric (dirt) thickness # units: meters th=3e-9, - # Surface dielectric (dirt) constant # units: relative permittivity eps_r=10, - # Surface dielectric (dirt) loss tangent # units: unitless, since this is tan(delta) tan_delta_surf=1e-3, - ################################################## # Surface object specific dielectric properties. # These will override ones above when applicable dielectric_surfaces=Dict( - trace=Dict( - tan_delta_surf=0.001, - th=5e-9, - eps_r=10 - ), - gap=Dict( - tan_delta_surf=0.001, - th=2e-9, - eps_r=10 - ) + trace=Dict(tan_delta_surf=0.001, th=5e-9, eps_r=10), + gap=Dict(tan_delta_surf=0.001, th=2e-9, eps_r=10), ), - ################################################## # Thin-film surface loss # units: Ohms # ref: https://arxiv.org/abs/1308.1743 surface_Rs=250e-9, - ################################################## # Seam current loss # units: per Ohm meter; i.e., seam conductance # ref: http://arxiv.org/pdf/1509.01119.pdf gseam=1.0e3, ), - ansys=Dict( # method_calc_P_mj sets the method used to calculate the participation ratio in eigenmode. # Valid values: @@ -88,16 +69,14 @@ # Current is integrated average of J_surf by default: (zkm 3/29/16) # Will calculate the Pj matrix for the selected modes for the given junctions # junc_rect array & length of junctions - method_calc_P_mj='line_voltage', - + method_calc_P_mj="line_voltage", ), - plotting=Dict( # Default color map for plotting. Better if made into a string name # taken from matplotlib.cm - default_color_map='viridis', # pylint: disable=no-member + default_color_map="viridis", # pylint: disable=no-member ), ) -__all__ = ['config'] +__all__ = ["config"] diff --git a/pyEPR/ansys.py b/pyEPR/ansys.py index f95759d..27ecae8 100644 --- a/pyEPR/ansys.py +++ b/pyEPR/ansys.py @@ -1,4 +1,4 @@ -''' +""" pyEPR.ansys 2014-present @@ -10,10 +10,10 @@ Originally contributed by Phil Reinhold. Developed further by Zlatko Minev, Zaki Leghtas, and the pyEPR team. For the base version of hfss.py, see https://github.com/PhilReinhold/pyHFSS -''' +""" # Python 2.7 and 3 compatibility -from __future__ import (division, print_function) +from __future__ import division, print_function from typing import List @@ -41,41 +41,37 @@ try: import pythoncom except (ImportError, ModuleNotFoundError): - pass #raise NameError ("pythoncom module not installed. Please install.") + pass # raise NameError ("pythoncom module not installed. Please install.") try: # TODO: Replace `win32com` with Linux compatible package. # See Ansys python files in IronPython internal. from win32com.client import Dispatch, CDispatch except (ImportError, ModuleNotFoundError): - pass #raise NameError ("win32com module not installed. Please install.") + pass # raise NameError ("win32com module not installed. Please install.") try: from pint import UnitRegistry + ureg = UnitRegistry() Q = ureg.Quantity except (ImportError, ModuleNotFoundError): - pass # raise NameError ("Pint module not installed. Please install.") + pass # raise NameError ("Pint module not installed. Please install.") ############################################################################## ### -BASIS_ORDER = { - "Zero Order": 0, - "First Order": 1, - "Second Order": 2, - "Mixed Order": -1 -} +BASIS_ORDER = {"Zero Order": 0, "First Order": 1, "Second Order": 2, "Mixed Order": -1} # UNITS # LENGTH_UNIT --- HFSS UNITS # #Assumed default input units for ansys hfss -LENGTH_UNIT = 'meter' +LENGTH_UNIT = "meter" # LENGTH_UNIT_ASSUMED --- USER UNITS # if a user inputs a blank number with no units in `parse_fix`, # we can assume the following using -LENGTH_UNIT_ASSUMED = 'mm' +LENGTH_UNIT_ASSUMED = "mm" def simplify_arith_expr(expr): @@ -123,10 +119,10 @@ def extract_value_dim(expr): def parse_entry(entry, convert_to_unit=LENGTH_UNIT): - ''' + """ Should take a list of tuple of list... of int, float or str... For iterables, returns lists - ''' + """ if not isinstance(entry, list) and not isinstance(entry, tuple): return extract_value_unit(entry, convert_to_unit) else: @@ -138,14 +134,14 @@ def parse_entry(entry, convert_to_unit=LENGTH_UNIT): def fix_units(x, unit_assumed=None): - ''' + """ Convert all numbers to string and append the assumed units if needed. For an iterable, returns a list - ''' + """ unit_assumed = LENGTH_UNIT_ASSUMED if unit_assumed is None else unit_assumed if isinstance(x, str): # Check if there are already units defined, assume of form 2.46mm or 2.0 or 4. - if x[-1].isdigit() or x[-1] == '.': # number + if x[-1].isdigit() or x[-1] == ".": # number return x + unit_assumed else: # units are already applied return x @@ -160,7 +156,7 @@ def fix_units(x, unit_assumed=None): def parse_units(x): - ''' + """ Convert number, string, and lists/arrays/tuples to numbers scaled in HFSS units. @@ -168,28 +164,27 @@ def parse_units(x): Assumes input units LENGTH_UNIT_ASSUMED = mm [USER UNITS] [USER UNITS] ----> [HFSS UNITS] - ''' + """ return parse_entry(fix_units(x)) def unparse_units(x): - ''' - Undo effect of parse_unit. + """ + Undo effect of parse_unit. - Converts to LENGTH_UNIT_ASSUMED = mm [USER UNITS] - Assumes input units LENGTH_UNIT = meters [HFSS UNITS] + Converts to LENGTH_UNIT_ASSUMED = mm [USER UNITS] + Assumes input units LENGTH_UNIT = meters [HFSS UNITS] - [HFSS UNITS] ----> [USER UNITS] - ''' - return parse_entry(fix_units(x, unit_assumed=LENGTH_UNIT), - LENGTH_UNIT_ASSUMED) + [HFSS UNITS] ----> [USER UNITS] + """ + return parse_entry(fix_units(x, unit_assumed=LENGTH_UNIT), LENGTH_UNIT_ASSUMED) def parse_units_user(x): - ''' - Convert from user assumed units to user assumed units - [USER UNITS] ----> [USER UNITS] - ''' + """ + Convert from user assumed units to user assumed units + [USER UNITS] ----> [USER UNITS] + """ return parse_entry(fix_units(x, LENGTH_UNIT_ASSUMED), LENGTH_UNIT_ASSUMED) @@ -255,9 +250,9 @@ def _add_release_fn(fn): def release(): - ''' + """ Release COM connection to Ansys. - ''' + """ global _release_fns for fn in _release_fns: fn() @@ -292,25 +287,27 @@ def make_str_prop(name, prop_tab=None, prop_server=None): def make_int_prop(name, prop_tab=None, prop_server=None): - return make_prop(name, - prop_tab=prop_tab, - prop_server=prop_server, - prop_args=["MustBeInt:=", True]) + return make_prop( + name, + prop_tab=prop_tab, + prop_server=prop_server, + prop_args=["MustBeInt:=", True], + ) def make_float_prop(name, prop_tab=None, prop_server=None): - return make_prop(name, - prop_tab=prop_tab, - prop_server=prop_server, - prop_args=["MustBeInt:=", False]) + return make_prop( + name, + prop_tab=prop_tab, + prop_server=prop_server, + prop_args=["MustBeInt:=", False], + ) def make_prop(name, prop_tab=None, prop_server=None, prop_args=None): - def set_prop(self, - value, - prop_tab=prop_tab, - prop_server=prop_server, - prop_args=prop_args): + def set_prop( + self, value, prop_tab=prop_tab, prop_server=prop_server, prop_args=prop_args + ): prop_tab = self.prop_tab if prop_tab is None else prop_tab prop_server = self.prop_server if prop_server is None else prop_server if isinstance(prop_tab, types.FunctionType): @@ -319,16 +316,19 @@ def set_prop(self, prop_server = prop_server(self) if prop_args is None: prop_args = [] - self.prop_holder.ChangeProperty([ - "NAME:AllTabs", + self.prop_holder.ChangeProperty( [ - "NAME:" + prop_tab, ["NAME:PropServers", prop_server], + "NAME:AllTabs", [ - "NAME:ChangedProps", - ["NAME:" + name, "Value:=", value] + prop_args - ] + "NAME:" + prop_tab, + ["NAME:PropServers", prop_server], + [ + "NAME:ChangedProps", + ["NAME:" + name, "Value:=", value] + prop_args, + ], + ], ] - ]) + ) def get_prop(self, prop_tab=prop_tab, prop_server=prop_server): prop_tab = self.prop_tab if prop_tab is None else prop_tab @@ -342,42 +342,40 @@ def get_prop(self, prop_tab=prop_tab, prop_server=prop_server): return property(get_prop, set_prop) -def set_property(prop_holder, - prop_tab, - prop_server, - name, - value, - prop_args=None): - ''' +def set_property(prop_holder, prop_tab, prop_server, name, value, prop_args=None): + """ More general non obj oriented, functional version prop_args = [] by default - ''' + """ if not isinstance(prop_server, list): prop_server = [prop_server] - return prop_holder.ChangeProperty([ - "NAME:AllTabs", + return prop_holder.ChangeProperty( [ - "NAME:" + prop_tab, ["NAME:PropServers", *prop_server], + "NAME:AllTabs", [ - "NAME:ChangedProps", - ["NAME:" + name, "Value:=", value] + (prop_args or []) - ] + "NAME:" + prop_tab, + ["NAME:PropServers", *prop_server], + [ + "NAME:ChangedProps", + ["NAME:" + name, "Value:=", value] + (prop_args or []), + ], + ], ] - ]) + ) class HfssApp(COMWrapper): - def __init__(self, ProgID='AnsoftHfss.HfssScriptInterface'): - ''' - Connect to IDispatch-based COM object. - Parameter is the ProgID or CLSID of the COM object. - This is found in the regkey. + def __init__(self, ProgID="AnsoftHfss.HfssScriptInterface"): + """ + Connect to IDispatch-based COM object. + Parameter is the ProgID or CLSID of the COM object. + This is found in the regkey. - Version changes for Ansys HFSS for the main object - v2016 - 'Ansoft.ElectronicsDesktop' - v2017 and subsequent - 'AnsoftHfss.HfssScriptInterface' + Version changes for Ansys HFSS for the main object + v2016 - 'Ansoft.ElectronicsDesktop' + v2017 and subsequent - 'AnsoftHfss.HfssScriptInterface' - ''' + """ super(HfssApp, self).__init__() self._app = Dispatch(ProgID) @@ -451,7 +449,7 @@ def new_project(self): return HfssProject(self, self._desktop.NewProject()) def open_project(self, path): - ''' returns error if already open ''' + """returns error if already open""" return HfssProject(self, self._desktop.OpenProject(path)) def set_active_project(self, name): @@ -491,7 +489,7 @@ def __init__(self, desktop, project): super(HfssProject, self).__init__() self.parent = desktop self._project = project - #self.name = project.GetName() + # self.name = project.GetName() self._ansys_version = self.parent.version def close(self): @@ -522,7 +520,7 @@ def rename_design(self, design, rename): if design in self.get_designs(): design.rename_design(design.name, rename) else: - raise ValueError('%s design does not exist' % design.name) + raise ValueError("%s design does not exist" % design.name) def duplicate_design(self, target, source): src_design = self.get_design(source) @@ -532,7 +530,7 @@ def get_variable_names(self): return [VariableString(s) for s in self._project.GetVariables()] def get_variables(self): - """ Returns the project variables only, which start with $. These are global variables. """ + """Returns the project variables only, which start with $. These are global variables.""" return { VariableString(s): self.get_variable_value(s) for s in self._project.GetVariables() @@ -542,20 +540,27 @@ def get_variable_value(self, name): return self._project.GetVariableValue(name) def create_variable(self, name, value): - self._project.ChangeProperty([ - "NAME:AllTabs", + self._project.ChangeProperty( [ - "NAME:ProjectVariableTab", - ["NAME:PropServers", "ProjectVariables"], + "NAME:AllTabs", [ - "Name:NewProps", + "NAME:ProjectVariableTab", + ["NAME:PropServers", "ProjectVariables"], [ - "NAME:" + name, "PropType:=", "VariableProp", - "UserDef:=", True, "Value:=", value - ] - ] + "Name:NewProps", + [ + "NAME:" + name, + "PropType:=", + "VariableProp", + "UserDef:=", + True, + "Value:=", + value, + ], + ], + ], ] - ]) + ) def set_variable(self, name, value): if name not in self._project.GetVariables(): @@ -568,16 +573,19 @@ def get_path(self): if self._project: return self._project.GetPath() else: - raise Exception('''Error: HFSS Project does not have a path. - Either there is no HFSS project open, or it is not saved.''') + raise Exception( + """Error: HFSS Project does not have a path. + Either there is no HFSS project open, or it is not saved.""" + ) def new_design(self, design_name, solution_type, design_type="HFSS"): design_name_int = increment_name( - design_name, [d.GetName() for d in self._project.GetDesigns()]) + design_name, [d.GetName() for d in self._project.GetDesigns()] + ) return HfssDesign( self, - self._project.InsertDesign(design_type, design_name_int, - solution_type, "")) + self._project.InsertDesign(design_type, design_name_int, solution_type, ""), + ) def get_design(self, name): return HfssDesign(self, self._project.GetDesign(name)) @@ -629,9 +637,9 @@ def __init__(self, project, design): self.solution_type = design.GetSolutionType() except Exception as e: logger.debug( - f'Exception occurred at design.GetSolutionType() {e}. Assuming Q3D design' + f"Exception occurred at design.GetSolutionType() {e}. Assuming Q3D design" ) - self.solution_type = 'Q3D' + self.solution_type = "Q3D" if design is None: return @@ -644,8 +652,7 @@ def __init__(self, project, design): self._modeler = design.SetActiveEditor("3D Modeler") self._optimetrics = design.GetModule("Optimetrics") self._mesh = design.GetModule("MeshSetup") - self.modeler = HfssModeler(self, self._modeler, self._boundaries, - self._mesh) + self.modeler = HfssModeler(self, self._modeler, self._boundaries, self._mesh) self.optimetrics = Optimetrics(self) def add_message(self, message: str, severity: int = 0): @@ -662,19 +669,31 @@ def add_message(self, message: str, severity: int = 0): def save_screenshot(self, path: str = None, show: bool = True): if not path: - path = Path().absolute() / 'ansys.png' # TODO find better + path = Path().absolute() / "ansys.png" # TODO find better self._modeler.ExportModelImageToFile( str(path), 0, 0, # can be 0 For the default, use 0, 0. For higher resolution, set desired and , for example for 8k export as: 7680, 4320. [ - "NAME:SaveImageParams", "ShowAxis:=", "True", "ShowGrid:=", - "True", "ShowRuler:=", "True", "ShowRegion:=", "Default", - "Selections:=", "", "Orientation:=", "" - ]) + "NAME:SaveImageParams", + "ShowAxis:=", + "True", + "ShowGrid:=", + "True", + "ShowRuler:=", + "True", + "ShowRegion:=", + "Default", + "Selections:=", + "", + "Orientation:=", + "", + ], + ) if show: from IPython.display import display, Image + display(Image(str(path))) return path @@ -708,8 +727,7 @@ def get_setup(self, name=None): if name is None: name = setups[0] elif name not in setups: - raise EnvironmentError("Setup {} not found: {}".format( - name, setups)) + raise EnvironmentError("Setup {} not found: {}".format(name, setups)) if self.solution_type == "Eigenmode": return HfssEMSetup(self, name) @@ -720,102 +738,176 @@ def get_setup(self, name=None): elif self.solution_type == "Q3D": return AnsysQ3DSetup(self, name) - def create_q3d_setup(self, - freq_ghz=5., - name="Setup", - save_fields=False, - enabled=True, - max_passes=15, - min_passes=2, - min_converged_passes=2, - percent_error=0.5, - percent_refinement=30, - auto_increase_solution_order=True, - solution_order="High", - solver_type='Iterative'): + def create_q3d_setup( + self, + freq_ghz=5.0, + name="Setup", + save_fields=False, + enabled=True, + max_passes=15, + min_passes=2, + min_converged_passes=2, + percent_error=0.5, + percent_refinement=30, + auto_increase_solution_order=True, + solution_order="High", + solver_type="Iterative", + ): name = increment_name(name, self.get_setup_names()) - self._setup_module.InsertSetup("Matrix", [ - f"NAME:{name}", "AdaptiveFreq:=", f"{freq_ghz}GHz", "SaveFields:=", - save_fields, "Enabled:=", enabled, + self._setup_module.InsertSetup( + "Matrix", [ - "NAME:Cap", "MaxPass:=", max_passes, "MinPass:=", min_passes, - "MinConvPass:=", min_converged_passes, "PerError:=", - percent_error, "PerRefine:=", percent_refinement, - "AutoIncreaseSolutionOrder:=", auto_increase_solution_order, - "SolutionOrder:=", solution_order, "Solver Type:=", solver_type - ] - ]) + f"NAME:{name}", + "AdaptiveFreq:=", + f"{freq_ghz}GHz", + "SaveFields:=", + save_fields, + "Enabled:=", + enabled, + [ + "NAME:Cap", + "MaxPass:=", + max_passes, + "MinPass:=", + min_passes, + "MinConvPass:=", + min_converged_passes, + "PerError:=", + percent_error, + "PerRefine:=", + percent_refinement, + "AutoIncreaseSolutionOrder:=", + auto_increase_solution_order, + "SolutionOrder:=", + solution_order, + "Solver Type:=", + solver_type, + ], + ], + ) return AnsysQ3DSetup(self, name) - def create_dm_setup(self, - freq_ghz=1, - name="Setup", - max_delta_s=0.1, - max_passes=10, - min_passes=1, - min_converged=1, - pct_refinement=30, - basis_order=-1): + def create_dm_setup( + self, + freq_ghz=1, + name="Setup", + max_delta_s=0.1, + max_passes=10, + min_passes=1, + min_converged=1, + pct_refinement=30, + basis_order=-1, + ): name = increment_name(name, self.get_setup_names()) - self._setup_module.InsertSetup("HfssDriven", [ - "NAME:" + name, "Frequency:=", - str(freq_ghz) + "GHz", "MaxDeltaS:=", max_delta_s, - "MaximumPasses:=", max_passes, "MinimumPasses:=", min_passes, - "MinimumConvergedPasses:=", min_converged, "PercentRefinement:=", - pct_refinement, "IsEnabled:=", True, "BasisOrder:=", basis_order - ]) + self._setup_module.InsertSetup( + "HfssDriven", + [ + "NAME:" + name, + "Frequency:=", + str(freq_ghz) + "GHz", + "MaxDeltaS:=", + max_delta_s, + "MaximumPasses:=", + max_passes, + "MinimumPasses:=", + min_passes, + "MinimumConvergedPasses:=", + min_converged, + "PercentRefinement:=", + pct_refinement, + "IsEnabled:=", + True, + "BasisOrder:=", + basis_order, + ], + ) return HfssDMSetup(self, name) - def create_dt_setup(self, - freq_ghz=1, - name="Setup", - max_delta_s=0.1, - max_passes=10, - min_passes=1, - min_converged=1, - pct_refinement=30, - basis_order=-1): + def create_dt_setup( + self, + freq_ghz=1, + name="Setup", + max_delta_s=0.1, + max_passes=10, + min_passes=1, + min_converged=1, + pct_refinement=30, + basis_order=-1, + ): name = increment_name(name, self.get_setup_names()) - self._setup_module.InsertSetup("HfssDriven", [ - "NAME:" + name, "Frequency:=", - str(freq_ghz) + "GHz", "MaxDeltaS:=", max_delta_s, - "MaximumPasses:=", max_passes, "MinimumPasses:=", min_passes, - "MinimumConvergedPasses:=", min_converged, "PercentRefinement:=", - pct_refinement, "IsEnabled:=", True, "BasisOrder:=", basis_order - ]) + self._setup_module.InsertSetup( + "HfssDriven", + [ + "NAME:" + name, + "Frequency:=", + str(freq_ghz) + "GHz", + "MaxDeltaS:=", + max_delta_s, + "MaximumPasses:=", + max_passes, + "MinimumPasses:=", + min_passes, + "MinimumConvergedPasses:=", + min_converged, + "PercentRefinement:=", + pct_refinement, + "IsEnabled:=", + True, + "BasisOrder:=", + basis_order, + ], + ) return HfssDTSetup(self, name) - def create_em_setup(self, - name="Setup", - min_freq_ghz=1, - n_modes=1, - max_delta_f=0.1, - max_passes=10, - min_passes=1, - min_converged=1, - pct_refinement=30, - basis_order=-1): + def create_em_setup( + self, + name="Setup", + min_freq_ghz=1, + n_modes=1, + max_delta_f=0.1, + max_passes=10, + min_passes=1, + min_converged=1, + pct_refinement=30, + basis_order=-1, + ): name = increment_name(name, self.get_setup_names()) - self._setup_module.InsertSetup("HfssEigen", [ - "NAME:" + name, "MinimumFrequency:=", - str(min_freq_ghz) + "GHz", "NumModes:=", n_modes, "MaxDeltaFreq:=", - max_delta_f, "ConvergeOnRealFreq:=", True, "MaximumPasses:=", - max_passes, "MinimumPasses:=", min_passes, - "MinimumConvergedPasses:=", min_converged, "PercentRefinement:=", - pct_refinement, "IsEnabled:=", True, "BasisOrder:=", basis_order - ]) + self._setup_module.InsertSetup( + "HfssEigen", + [ + "NAME:" + name, + "MinimumFrequency:=", + str(min_freq_ghz) + "GHz", + "NumModes:=", + n_modes, + "MaxDeltaFreq:=", + max_delta_f, + "ConvergeOnRealFreq:=", + True, + "MaximumPasses:=", + max_passes, + "MinimumPasses:=", + min_passes, + "MinimumConvergedPasses:=", + min_converged, + "PercentRefinement:=", + pct_refinement, + "IsEnabled:=", + True, + "BasisOrder:=", + basis_order, + ], + ) return HfssEMSetup(self, name) def delete_setup(self, name): if name in self.get_setup_names(): self._setup_module.DeleteSetups(name) - def delete_full_variation(self, - DesignVariationKey="All", - del_linked_data=False): + def delete_full_variation(self, DesignVariationKey="All", del_linked_data=False): """ DeleteFullVariation Use: Use to selectively make deletions or delete all solution data. @@ -847,42 +939,49 @@ def create_variable(self, name, value, postprocessing=False): else: variableprop = "VariableProp" - self._design.ChangeProperty([ - "NAME:AllTabs", + self._design.ChangeProperty( [ - "NAME:LocalVariableTab", - ["NAME:PropServers", "LocalVariables"], + "NAME:AllTabs", [ - "Name:NewProps", + "NAME:LocalVariableTab", + ["NAME:PropServers", "LocalVariables"], [ - "NAME:" + name, "PropType:=", variableprop, - "UserDef:=", True, "Value:=", value - ] - ] + "Name:NewProps", + [ + "NAME:" + name, + "PropType:=", + variableprop, + "UserDef:=", + True, + "Value:=", + value, + ], + ], + ], ] - ]) + ) - def _variation_string_to_variable_list(self, - variation_string: str, - for_prop_server=True): + def _variation_string_to_variable_list( + self, variation_string: str, for_prop_server=True + ): """Example: - Takes - "Cj='2fF' Lj='13.5nH'" - for for_prop_server=True into - [['NAME:Cj', 'Value:=', '2fF'], ['NAME:Lj', 'Value:=', '13.5nH']] - or for for_prop_server=False into - [['Cj', '2fF'], ['Lj', '13.5nH']] + Takes + "Cj='2fF' Lj='13.5nH'" + for for_prop_server=True into + [['NAME:Cj', 'Value:=', '2fF'], ['NAME:Lj', 'Value:=', '13.5nH']] + or for for_prop_server=False into + [['Cj', '2fF'], ['Lj', '13.5nH']] """ s = variation_string - s = s.split(' ') + s = s.split(" ") s = [s1.strip().strip("''").split("='") for s1 in s] if for_prop_server: local, project = [], [] for arr in s: - to_add = [f'NAME:{arr[0]}', "Value:=", arr[1]] - if arr[0][0] == '$': + to_add = [f"NAME:{arr[0]}", "Value:=", arr[1]] + if arr[0][0] == "$": project += [to_add] # global variable else: local += [to_add] # local variable @@ -903,27 +1002,32 @@ def set_variables(self, variation_string: str): assert isinstance(variation_string, str) content = ["NAME:ChangedProps"] - local, project = self._variation_string_to_variable_list( - variation_string) - #print('\nlocal=', local, '\nproject=', project) + local, project = self._variation_string_to_variable_list(variation_string) + # print('\nlocal=', local, '\nproject=', project) if len(project) > 0: - self._design.ChangeProperty([ - "NAME:AllTabs", + self._design.ChangeProperty( [ - "NAME:ProjectVariableTab", - ["NAME:PropServers", "ProjectVariables"], content + project + "NAME:AllTabs", + [ + "NAME:ProjectVariableTab", + ["NAME:PropServers", "ProjectVariables"], + content + project, + ], ] - ]) + ) if len(local) > 0: - self._design.ChangeProperty([ - "NAME:AllTabs", + self._design.ChangeProperty( [ - "NAME:LocalVariableTab", - ["NAME:PropServers", "LocalVariables"], content + local + "NAME:AllTabs", + [ + "NAME:LocalVariableTab", + ["NAME:PropServers", "LocalVariables"], + content + local, + ], ] - ]) + ) def set_variable(self, name: str, value: str, postprocessing=False): """Warning: THis is case sensitive, @@ -950,28 +1054,30 @@ def set_variable(self, name: str, value: str, postprocessing=False): return VariableString(name) def get_variable_value(self, name): - """ Can only access the design variables, i.e., the local ones - Cannot access the project (global) variables, which start with $. """ + """Can only access the design variables, i.e., the local ones + Cannot access the project (global) variables, which start with $.""" return self._design.GetVariableValue(name) def get_variable_names(self): - """ Returns the local design variables. - Does not return the project (global) variables, which start with $. """ + """Returns the local design variables. + Does not return the project (global) variables, which start with $.""" return [ - VariableString(s) for s in self._design.GetVariables() + - self._design.GetPostProcessingVariables() + VariableString(s) + for s in self._design.GetVariables() + + self._design.GetPostProcessingVariables() ] def get_variables(self): - """ Returns dictionary of local design variables and their values. - Does not return the project (global) variables and their values, - whose names start with $. """ - local_variables = self._design.GetVariables( - ) + self._design.GetPostProcessingVariables() + """Returns dictionary of local design variables and their values. + Does not return the project (global) variables and their values, + whose names start with $.""" + local_variables = ( + self._design.GetVariables() + self._design.GetPostProcessingVariables() + ) return {lv: self.get_variable_value(lv) for lv in local_variables} def copy_design_variables(self, source_design): - ''' does not check that variables are all present ''' + """does not check that variables are all present""" # don't care about values source_variables = source_design.get_variables() @@ -993,16 +1099,16 @@ def _evaluate_variable_expression(self, expr, units): except SyntaxError: return Q(expr).to(units).magnitude - sub_exprs = { - fs: self.get_variable_value(fs.name) - for fs in sexp.free_symbols - } + sub_exprs = {fs: self.get_variable_value(fs.name) for fs in sexp.free_symbols} return float( - sexp.subs({ - fs: self._evaluate_variable_expression(e, units) - for fs, e in sub_exprs.items() - })) + sexp.subs( + { + fs: self._evaluate_variable_expression(e, units) + for fs, e in sub_exprs.items() + } + ) + ) def eval_expr(self, expr, units="mm"): return str(self._evaluate_variable_expression(expr, units)) + units @@ -1011,7 +1117,7 @@ def Clear_Field_Clac_Stack(self): self._fields_calc.CalcStack("Clear") def clean_up_solutions(self): - self._design.DeleteFullVariation('All', True) # Delete existing solutions + self._design.DeleteFullVariation("All", True) # Delete existing solutions class HfssSetup(HfssPropertyObject): @@ -1041,13 +1147,13 @@ def __init__(self, design, setup: str): self._solutions = design._solutions self.name = setup self.solution_name = setup + " : LastAdaptive" - #self.solution_name_pass = setup + " : AdaptivePass" + # self.solution_name_pass = setup + " : AdaptivePass" self.prop_server = "AnalysisSetup:" + setup self.expression_cache_items = [] self._ansys_version = self.parent._ansys_version def analyze(self, name=None): - ''' + """ Use: Solves a single solution setup and all of its frequency sweeps. Command: Right-click a solution setup in the project tree, and then click Analyze on the shortcut menu. @@ -1058,14 +1164,14 @@ def analyze(self, name=None): Will block the until the analysis is completely done. Will raise a com_error if analysis is aborted in HFSS. - ''' + """ if name is None: name = self.name - logger.info(f'Analyzing setup {name}') + logger.info(f"Analyzing setup {name}") return self.parent._design.Analyze(name) def solve(self, name=None): - ''' + """ Use: Performs a blocking simulation. The next script command will not be executed until the simulation is complete. @@ -1085,21 +1191,23 @@ def solve(self, name=None): HFSS abort: still returns 0 , since termination by user. - ''' + """ if name is None: name = self.name return self.parent._design.Solve(name) - def insert_sweep(self, - start_ghz, - stop_ghz, - count=None, - step_ghz=None, - name="Sweep", - type="Fast", - save_fields=False): - - if not type in ['Fast', 'Interpolating', 'Discrete']: + def insert_sweep( + self, + start_ghz, + stop_ghz, + count=None, + step_ghz=None, + name="Sweep", + type="Fast", + save_fields=False, + ): + + if not type in ["Fast", "Interpolating", "Discrete"]: logger.error( "insert_sweep: Error type was not in ['Fast', 'Interpolating', 'Discrete']" ) @@ -1107,45 +1215,62 @@ def insert_sweep(self, name = increment_name(name, self.get_sweep_names()) params = [ "NAME:" + name, - "IsEnabled:=", True, - "Type:=", type, - "SaveFields:=", save_fields, - "SaveRadFields:=", False, + "IsEnabled:=", + True, + "Type:=", + type, + "SaveFields:=", + save_fields, + "SaveRadFields:=", + False, # "GenerateFieldsForAllFreqs:=" - "ExtrapToDC:=", False, + "ExtrapToDC:=", + False, ] # not sure when exactly this changed between 2016 and 2019 - if self._ansys_version >= '2019': + if self._ansys_version >= "2019": if count: - params.extend([ - "RangeType:=", 'LinearCount', "RangeStart:=", - f"{start_ghz:f}GHz", "RangeEnd:=", f"{stop_ghz:f}GHz", - "RangeCount:=", count - ]) + params.extend( + [ + "RangeType:=", + "LinearCount", + "RangeStart:=", + f"{start_ghz:f}GHz", + "RangeEnd:=", + f"{stop_ghz:f}GHz", + "RangeCount:=", + count, + ] + ) if step_ghz: - params.extend([ - "RangeType:=", 'LinearStep', "RangeStart:=", - f"{start_ghz:f}GHz", "RangeEnd:=", f"{stop_ghz:f}GHz", - "RangeStep:=", step_ghz - ]) + params.extend( + [ + "RangeType:=", + "LinearStep", + "RangeStart:=", + f"{start_ghz:f}GHz", + "RangeEnd:=", + f"{stop_ghz:f}GHz", + "RangeStep:=", + step_ghz, + ] + ) if (count and step_ghz) or ((not count) and (not step_ghz)): logger.error( - 'ERROR: you should provide either step_ghz or count \ + "ERROR: you should provide either step_ghz or count \ when inserting an HFSS driven model freq sweep. \ - YOu either provided both or neither! See insert_sweep.') + YOu either provided both or neither! See insert_sweep." + ) else: - params.extend([ - "StartValue:=", - "%fGHz" % start_ghz, "StopValue:=", - "%fGHz" % stop_ghz - ]) + params.extend( + ["StartValue:=", "%fGHz" % start_ghz, "StopValue:=", "%fGHz" % stop_ghz] + ) if step_ghz is not None: - params.extend([ - "SetupType:=", "LinearSetup", "StepSize:=", - "%fGHz" % step_ghz - ]) + params.extend( + ["SetupType:=", "LinearSetup", "StepSize:=", "%fGHz" % step_ghz] + ) else: params.extend(["SetupType:=", "LinearCount", "Count:=", count]) @@ -1156,31 +1281,30 @@ def insert_sweep(self, def delete_sweep(self, name): self._setup_module.DeleteSweep(self.name, name) - -# def add_fields_convergence_expr(self, expr, pct_delta, phase=0): -# """note: because of hfss idiocy, you must call "commit_convergence_exprs" -# after adding all exprs""" -# assert isinstance(expr, NamedCalcObject) -# self.expression_cache_items.append( -# ["NAME:CacheItem", -# "Title:=", expr.name+"_conv", -# "Expression:=", expr.name, -# "Intrinsics:=", "Phase='{}deg'".format(phase), -# "IsConvergence:=", True, -# "UseRelativeConvergence:=", 1, -# "MaxConvergenceDelta:=", pct_delta, -# "MaxConvergeValue:=", "0.05", -# "ReportType:=", "Fields", -# ["NAME:ExpressionContext"]]) - -# def commit_convergence_exprs(self): -# """note: this will eliminate any convergence expressions not added -# through this interface""" -# args = [ -# "NAME:"+self.name, -# ["NAME:ExpressionCache", self.expression_cache_items] -# ] -# self._setup_module.EditSetup(self.name, args) + # def add_fields_convergence_expr(self, expr, pct_delta, phase=0): + # """note: because of hfss idiocy, you must call "commit_convergence_exprs" + # after adding all exprs""" + # assert isinstance(expr, NamedCalcObject) + # self.expression_cache_items.append( + # ["NAME:CacheItem", + # "Title:=", expr.name+"_conv", + # "Expression:=", expr.name, + # "Intrinsics:=", "Phase='{}deg'".format(phase), + # "IsConvergence:=", True, + # "UseRelativeConvergence:=", 1, + # "MaxConvergenceDelta:=", pct_delta, + # "MaxConvergeValue:=", "0.05", + # "ReportType:=", "Fields", + # ["NAME:ExpressionContext"]]) + + # def commit_convergence_exprs(self): + # """note: this will eliminate any convergence expressions not added + # through this interface""" + # args = [ + # "NAME:"+self.name, + # ["NAME:ExpressionCache", self.expression_cache_items] + # ] + # self._setup_module.EditSetup(self.name, args) def get_sweep_names(self): return self._setup_module.GetSweeps(self.name) @@ -1192,110 +1316,134 @@ def get_sweep(self, name=None): if name is None: name = sweeps[0] elif name not in sweeps: - raise EnvironmentError("Sweep {} not found in {}".format( - name, sweeps)) + raise EnvironmentError("Sweep {} not found in {}".format(name, sweeps)) return HfssFrequencySweep(self, name) def add_fields_convergence_expr(self, expr, pct_delta, phase=0): """note: because of hfss idiocy, you must call "commit_convergence_exprs" after adding all exprs""" assert isinstance(expr, NamedCalcObject) - self.expression_cache_items.append([ - "NAME:CacheItem", "Title:=", expr.name + "_conv", "Expression:=", - expr.name, "Intrinsics:=", "Phase='{}deg'".format(phase), - "IsConvergence:=", True, "UseRelativeConvergence:=", 1, - "MaxConvergenceDelta:=", pct_delta, "MaxConvergeValue:=", "0.05", - "ReportType:=", "Fields", ["NAME:ExpressionContext"] - ]) + self.expression_cache_items.append( + [ + "NAME:CacheItem", + "Title:=", + expr.name + "_conv", + "Expression:=", + expr.name, + "Intrinsics:=", + "Phase='{}deg'".format(phase), + "IsConvergence:=", + True, + "UseRelativeConvergence:=", + 1, + "MaxConvergenceDelta:=", + pct_delta, + "MaxConvergeValue:=", + "0.05", + "ReportType:=", + "Fields", + ["NAME:ExpressionContext"], + ] + ) def commit_convergence_exprs(self): """note: this will eliminate any convergence expressions not added through this interface""" args = [ "NAME:" + self.name, - ["NAME:ExpressionCache", self.expression_cache_items] + ["NAME:ExpressionCache", self.expression_cache_items], ] self._setup_module.EditSetup(self.name, args) def get_convergence(self, variation="", pre_fn_args=[], overwrite=True): - ''' + """ Returns converge as a dataframe Variation should be in the form variation = "scale_factor='1.2001'" ... - ''' + """ # TODO: (Daniel) I think this data should be store in a more comfortable datatype (dictionary maybe?) # Write file temp = tempfile.NamedTemporaryFile() temp.close() - temp = temp.name + '.conv' - self.parent._design.ExportConvergence(self.name, variation, - *pre_fn_args, temp, overwrite) + temp = temp.name + ".conv" + self.parent._design.ExportConvergence( + self.name, variation, *pre_fn_args, temp, overwrite + ) # Read File temp = Path(temp) if not temp.is_file(): logger.error( - f'''ERROR! Error in trying to read temporary convergence file. + f"""ERROR! Error in trying to read temporary convergence file. `get_convergence` did not seem to have the file written {str(temp)}. Perhaps there was no convergence? Check to see if there is a CONV available for this current variation. If the nominal design is not solved, it will not have a CONV., but will show up as a variation Check for error messages in HFSS. - Retuning None''') - return None, '' + Retuning None""" + ) + return None, "" text = temp.read_text() # Parse file - text2 = text.split(r'==================') + text2 = text.split(r"==================") if len(text) >= 3: - df = pd.read_csv(io.StringIO(text2[3].strip()), - sep='|', - skipinitialspace=True, - index_col=0).drop('Unnamed: 3', axis=1) + df = pd.read_csv( + io.StringIO(text2[3].strip()), + sep="|", + skipinitialspace=True, + index_col=0, + ).drop("Unnamed: 3", axis=1) else: - logger.error(f'ERROR IN reading in {temp}:\n{text}') + logger.error(f"ERROR IN reading in {temp}:\n{text}") df = None return df, text def get_mesh_stats(self, variation=""): - ''' variation should be in the form - variation = "scale_factor='1.2001'" ... - ''' + """variation should be in the form + variation = "scale_factor='1.2001'" ... + """ temp = tempfile.NamedTemporaryFile() temp.close() # print(temp.name0 # seems broken in 2016 because of extra text added to the top of the file - self.parent._design.ExportMeshStats(self.name, variation, - temp.name + '.mesh', True) + self.parent._design.ExportMeshStats( + self.name, variation, temp.name + ".mesh", True + ) try: - df = pd.read_csv(temp.name + '.mesh', - delimiter='|', - skipinitialspace=True, - skiprows=7, - skipfooter=1, - skip_blank_lines=True, - engine='python') - df = df.drop('Unnamed: 9', axis=1) + df = pd.read_csv( + temp.name + ".mesh", + delimiter="|", + skipinitialspace=True, + skiprows=7, + skipfooter=1, + skip_blank_lines=True, + engine="python", + ) + df = df.drop("Unnamed: 9", axis=1) except Exception as e: print("ERROR in MESH reading operation.") print(e) print( - 'ERROR! Error in trying to read temporary MESH file ' + - temp.name + - '\n. Check to see if there is a mesh available for this current variation.\ + "ERROR! Error in trying to read temporary MESH file " + + temp.name + + "\n. Check to see if there is a mesh available for this current variation.\ If the nominal design is not solved, it will not have a mesh., \ - but will show up as a variation.') + but will show up as a variation." + ) df = None return df def get_profile(self, variation=""): fn = tempfile.mktemp() self.parent._design.ExportProfile(self.name, variation, fn, False) - df = pd.read_csv(fn, - delimiter='\t', - skipinitialspace=True, - skiprows=6, - skipfooter=1, - skip_blank_lines=True, - engine='python') + df = pd.read_csv( + fn, + delimiter="\t", + skipinitialspace=True, + skiprows=6, + skipfooter=1, + skip_blank_lines=True, + engine="python", + ) # just broken down by new lines return df @@ -1307,14 +1455,15 @@ class HfssDMSetup(HfssSetup): """ Driven modal setup """ + solution_freq = make_float_prop("Solution Freq") delta_s = make_float_prop("Delta S") solver_type = make_str_prop("Solver Type") def setup_link(self, linked_setup): - ''' - type: linked_setup - ''' + """ + type: linked_setup + """ args = [ "NAME:" + self.name, [ @@ -1335,7 +1484,7 @@ def setup_link(self, linked_setup): self._setup_module.EditSetup(self.name, args) def _map_variables_by_name(self): - ''' does not check that variables are all present ''' + """does not check that variables are all present""" # don't care about values project_variables = self.parent.parent.get_variable_names() design_variables = self.parent.get_variable_names() @@ -1353,8 +1502,8 @@ def _map_variables_by_name(self): def get_solutions(self): return HfssDMDesignSolutions(self, self.parent._solutions) -class HfssDTSetup(HfssDMSetup): +class HfssDTSetup(HfssDMSetup): def get_solutions(self): return HfssDTDesignSolutions(self, self.parent._solutions) @@ -1363,6 +1512,7 @@ class HfssEMSetup(HfssSetup): """ Eigenmode setup """ + min_freq = make_float_prop("Min Freq") n_modes = make_int_prop("Modes") delta_f = make_float_prop("Delta F") @@ -1375,38 +1525,40 @@ class AnsysQ3DSetup(HfssSetup): """ Q3D setup """ + prop_tab = "CG" max_pass = make_int_prop("Max. Number of Passes") min_pass = make_int_prop("Min. Number of Passes") pct_error = make_int_prop("Percent Error") - frequency = make_str_prop("Adaptive Freq", 'General') # e.g., '5GHz' + frequency = make_str_prop("Adaptive Freq", "General") # e.g., '5GHz' n_modes = 0 # for compatibility with eigenmode def get_frequency_Hz(self): - return int(ureg(self.frequency).to('Hz').magnitude) + return int(ureg(self.frequency).to("Hz").magnitude) def get_solutions(self): return HfssQ3DDesignSolutions(self, self.parent._solutions) def get_convergence(self, variation=""): - ''' + """ Returns df # Triangle Delta % Pass 1 164 NaN - ''' - return super().get_convergence(variation, pre_fn_args=['CG']) + """ + return super().get_convergence(variation, pre_fn_args=["CG"]) def get_matrix( - self, - variation='', - pass_number=0, - frequency=None, - MatrixType='Maxwell', - solution_kind='LastAdaptive', # AdaptivePass - ACPlusDCResistance=False, - soln_type="C"): - ''' + self, + variation="", + pass_number=0, + frequency=None, + MatrixType="Maxwell", + solution_kind="LastAdaptive", # AdaptivePass + ACPlusDCResistance=False, + soln_type="C", + ): + """ Arguments: ----------- variation: an empty string returns nominal variation. @@ -1421,29 +1573,45 @@ def get_matrix( Returns: --------------------- df_cmat, user_units, (df_cond, units_cond), design_variation - ''' + """ if frequency is None: frequency = self.get_frequency_Hz() temp = tempfile.NamedTemporaryFile() temp.close() - path = temp.name + '.txt' + path = temp.name + ".txt" # , , , , , , # , , , , , , # - logger.info(f'Exporting matrix data to ({path}, {soln_type}, {variation}, ' - f'{self.name}:{solution_kind}, ' - '"Original", "ohm", "nH", "fF", ' - f'"mSie", {frequency}, {MatrixType}, ' - f'{pass_number}, {ACPlusDCResistance}') - self.parent._design.ExportMatrixData(path, soln_type, variation, - f'{self.name}:{solution_kind}', - "Original", "ohm", "nH", "fF", - "mSie", frequency, MatrixType, - pass_number, ACPlusDCResistance) - - df_cmat, user_units, (df_cond, units_cond), design_variation = \ - self.load_q3d_matrix(path) + logger.info( + f"Exporting matrix data to ({path}, {soln_type}, {variation}, " + f"{self.name}:{solution_kind}, " + '"Original", "ohm", "nH", "fF", ' + f'"mSie", {frequency}, {MatrixType}, ' + f"{pass_number}, {ACPlusDCResistance}" + ) + self.parent._design.ExportMatrixData( + path, + soln_type, + variation, + f"{self.name}:{solution_kind}", + "Original", + "ohm", + "nH", + "fF", + "mSie", + frequency, + MatrixType, + pass_number, + ACPlusDCResistance, + ) + + ( + df_cmat, + user_units, + (df_cond, units_cond), + design_variation, + ) = self.load_q3d_matrix(path) return df_cmat, user_units, (df_cond, units_cond), design_variation @staticmethod @@ -1490,43 +1658,46 @@ def _readin_Q3D_matrix(path: str): text = Path(path).read_text() - s1 = text.split('Capacitance Matrix') + s1 = text.split("Capacitance Matrix") assert len(s1) == 2, "Could not split text to `Capacitance Matrix`" - s2 = s1[1].split('Conductance Matrix') + s2 = s1[1].split("Conductance Matrix") + + df_cmat = pd.read_csv( + io.StringIO(s2[0].strip()), + delim_whitespace=True, + skipinitialspace=True, + index_col=0, + ) + units = re.findall(r"C Units:(.*?),", text)[0] - df_cmat = pd.read_csv(io.StringIO(s2[0].strip()), - delim_whitespace=True, - skipinitialspace=True, - index_col=0) - units = re.findall(r'C Units:(.*?),', text)[0] - if len(s2) > 1: - df_cond = pd.read_csv(io.StringIO(s2[1].strip()), - delim_whitespace=True, - skipinitialspace=True, - index_col=0) - units_cond = re.findall(r'G Units:(.*?)\n', text)[0] + df_cond = pd.read_csv( + io.StringIO(s2[1].strip()), + delim_whitespace=True, + skipinitialspace=True, + index_col=0, + ) + units_cond = re.findall(r"G Units:(.*?)\n", text)[0] else: df_cond = None units_cond = None - var = re.findall(r'DesignVariation:(.*?)\n', - text) # this changed circa v2020 + var = re.findall(r"DesignVariation:(.*?)\n", text) # this changed circa v2020 if len(var) < 1: # didnt find - var = re.findall(r'Design Variation:(.*?)\n', text) + var = re.findall(r"Design Variation:(.*?)\n", text) if len(var) < 1: # didnt find # May not be present if there are no design variations to begin # with and no variables in the design. - pass #logger.error(f'Failed to parse Q3D matrix Design Variation:\nFile:{path}\nText:{text}') + pass # logger.error(f'Failed to parse Q3D matrix Design Variation:\nFile:{path}\nText:{text}') - var = [''] + var = [""] design_variation = var[0] return df_cmat, units, design_variation, df_cond, units_cond @staticmethod - def load_q3d_matrix(path, user_units='fF'): + def load_q3d_matrix(path, user_units="fF"): """Load Q3D capacitance file exported as Maxwell matrix. Exports also conductance conductance. Units are read in automatically and converted to user units. @@ -1539,34 +1710,39 @@ def load_q3d_matrix(path, user_units='fF'): dataframes: df_cmat, df_cond """ - df_cmat, Cunits, design_variation, df_cond, units_cond = AnsysQ3DSetup._readin_Q3D_matrix( - path) + ( + df_cmat, + Cunits, + design_variation, + df_cond, + units_cond, + ) = AnsysQ3DSetup._readin_Q3D_matrix(path) # Unit convert q = ureg.parse_expression(Cunits).to(user_units) df_cmat = df_cmat * q.magnitude # scale to user units - #print("Imported capacitance matrix with UNITS: [%s] now converted to USER UNITS:[%s] from file:\n\t%s"%(Cunits, user_units, path)) + # print("Imported capacitance matrix with UNITS: [%s] now converted to USER UNITS:[%s] from file:\n\t%s"%(Cunits, user_units, path)) return df_cmat, user_units, (df_cond, units_cond), design_variation class HfssDesignSolutions(COMWrapper): def __init__(self, setup, solutions): - ''' + """ :type setup: HfssSetup - ''' + """ super(HfssDesignSolutions, self).__init__() self.parent = setup self._solutions = solutions self._ansys_version = self.parent._ansys_version def get_valid_solution_list(self): - ''' - Gets all available solution names that exist in a design. - Return example: - ('Setup1 : AdaptivePass', 'Setup1 : LastAdaptive') - ''' + """ + Gets all available solution names that exist in a design. + Return example: + ('Setup1 : AdaptivePass', 'Setup1 : LastAdaptive') + """ return self._solutions.GetValidISolutionList() def list_variations(self, setup_name: str = None): @@ -1594,13 +1770,13 @@ def list_variations(self, setup_name: str = None): class HfssEMDesignSolutions(HfssDesignSolutions): def eigenmodes(self, lv=""): - ''' + """ Returns the eigenmode data of freq and kappa/2p - ''' + """ fn = tempfile.mktemp() - #print(self.parent.solution_name, lv, fn) + # print(self.parent.solution_name, lv, fn) self._solutions.ExportEigenmodes(self.parent.solution_name, lv, fn) - data = np.genfromtxt(fn, dtype='str') + data = np.genfromtxt(fn, dtype="str") # Update to Py 3: # np.loadtxt and np.genfromtxt operate in byte mode, which is the default string type in Python 2. # But Python 3 uses unicode, and marks bytestrings with this b. @@ -1654,8 +1830,8 @@ def eigenmodes(self, lv=""): self._solutions.ExportEigenmodes(soln_name, ['Pass:=5'], fn) # ['Pass:=5'] fails can do with '' """ - def set_mode(self, n, phase=0, FieldType='EigenStoredEnergy'): - ''' + def set_mode(self, n, phase=0, FieldType="EigenStoredEnergy"): + """ Indicates which source excitations should be used for fields post processing. HFSS>Fields>Edit Sources @@ -1666,46 +1842,50 @@ def set_mode(self, n, phase=0, FieldType='EigenStoredEnergy'): No error is thrown if a number exceeding number of modes is set FieldType -- EigenStoredEnergy or EigenPeakElecticField - ''' + """ n_modes = int(self.parent.n_modes) if n < 1: - err = f'ERROR: You tried to set a mode < 1. {n}/{n_modes}' + err = f"ERROR: You tried to set a mode < 1. {n}/{n_modes}" logger.error(err) raise Exception(err) if n > n_modes: - err = f'ERROR: You tried to set a mode > number of modes {n}/{n_modes}' + err = f"ERROR: You tried to set a mode > number of modes {n}/{n_modes}" logger.error(err) raise Exception(err) - if self._ansys_version >= '2019': + if self._ansys_version >= "2019": # THIS WORKS FOR v2019R2 self._solutions.EditSources( - [["FieldType:=", "EigenPeakElectricField"], - [ - "Name:=", "Modes", "Magnitudes:=", - ["1" if i + 1 == n else "0" for i in range(n_modes)], - "Phases:=", - [ - str(phase) if i + 1 == n else "0" - for i in range(n_modes) - ] - ]]) + [ + ["FieldType:=", "EigenPeakElectricField"], + [ + "Name:=", + "Modes", + "Magnitudes:=", + ["1" if i + 1 == n else "0" for i in range(n_modes)], + "Phases:=", + [str(phase) if i + 1 == n else "0" for i in range(n_modes)], + ], + ] + ) else: # The syntax has changed for AEDT 18.2. # see https://ansyshelp.ansys.com/account/secured?returnurl=/Views/Secured/Electronics/v195//Subsystems/HFSS/Subsystems/HFSS%20Scripting/HFSS%20Scripting.htm self._solutions.EditSources( - "EigenStoredEnergy", ["NAME:SourceNames", "EigenMode"], - ["NAME:Modes", n_modes], ["NAME:Magnitudes"] + - [1 if i + 1 == n else 0 - for i in range(n_modes)], ["NAME:Phases"] + - [phase if i + 1 == n else 0 for i in range(n_modes)], - ["NAME:Terminated"], ["NAME:Impedances"]) + "EigenStoredEnergy", + ["NAME:SourceNames", "EigenMode"], + ["NAME:Modes", n_modes], + ["NAME:Magnitudes"] + [1 if i + 1 == n else 0 for i in range(n_modes)], + ["NAME:Phases"] + [phase if i + 1 == n else 0 for i in range(n_modes)], + ["NAME:Terminated"], + ["NAME:Impedances"], + ) def has_fields(self, variation_string=None): - ''' + """ Determine if fields exist for a particular solution. variation_string : str | None @@ -1713,21 +1893,16 @@ def has_fields(self, variation_string=None): the string of variables, such as "Cj='2fF' Lj='12.75nH'" If None, gets the nominal variation - ''' + """ if variation_string is None: variation_string = self.parent.parent.get_nominal_variation() return bool( - self._solutions.HasFields(self.parent.solution_name, - variation_string)) - - def create_report(self, - plot_name, - xcomp, - ycomp, - params, - pass_name='LastAdaptive'): - ''' + self._solutions.HasFields(self.parent.solution_name, variation_string) + ) + + def create_report(self, plot_name, xcomp, ycomp, params, pass_name="LastAdaptive"): + """ pass_name: AdaptivePass, LastAdaptive Example @@ -1739,24 +1914,32 @@ def create_report(self, ycomp = [f"re(Mode({i}))" for i in range(1,1+epr_hfss.n_modes)] params = ["Pass:=", ["All"]]+variation setup.create_report("Freq. vs. pass", "Pass", ycomp, params, pass_name='AdaptivePass') - ''' + """ assert isinstance(ycomp, list) assert isinstance(params, list) setup = self.parent reporter = setup._reporter return reporter.CreateReport( - plot_name, "Eigenmode Parameters", "Rectangular Plot", - f"{setup.name} : {pass_name}", [], params, - ["X Component:=", xcomp, "Y Component:=", ycomp], []) + plot_name, + "Eigenmode Parameters", + "Rectangular Plot", + f"{setup.name} : {pass_name}", + [], + params, + ["X Component:=", xcomp, "Y Component:=", ycomp], + [], + ) class HfssDMDesignSolutions(HfssDesignSolutions): pass + class HfssDTDesignSolutions(HfssDesignSolutions): pass + class HfssQ3DDesignSolutions(HfssDesignSolutions): pass @@ -1790,7 +1973,7 @@ def get_network_data(self, formats): formats = formats.split(",") formats = [f.upper() for f in formats] - fmts_lists = {'S': [], 'Y': [], 'Z': []} + fmts_lists = {"S": [], "Y": [], "Z": []} for f in formats: fmts_lists[f[0]].append((int(f[1]), int(f[2]))) @@ -1801,8 +1984,18 @@ def get_network_data(self, formats): if list: fn = tempfile.mktemp() self.parent._solutions.ExportNetworkData( - [], self.parent.name + " : " + self.name, 2, fn, ["all"], - False, 0, data_type, -1, 1, 15) + [], + self.parent.name + " : " + self.name, + 2, + fn, + ["all"], + False, + 0, + data_type, + -1, + 1, + 15, + ) with open(fn) as f: f.readline() colnames = f.readline().split() @@ -1813,10 +2006,8 @@ def get_network_data(self, formats): # TODO: If Ansys version is 2019, use 'Real' and 'Imag' # in place of 'Re' and 'Im for i, j in list: - real_idx = colnames.index("%s[%d,%d]_Re" % - (data_type, i, j)) - imag_idx = colnames.index("%s[%d,%d]_Im" % - (data_type, i, j)) + real_idx = colnames.index("%s[%d,%d]_Re" % (data_type, i, j)) + imag_idx = colnames.index("%s[%d,%d]_Im" % (data_type, i, j)) c_arr = array[:, real_idx] + 1j * array[:, imag_idx] ret[formats.index("%s%d%d" % (data_type, i, j))] = c_arr @@ -1826,13 +2017,17 @@ def create_report(self, name, expr): existing = self.parent._reporter.GetAllReportNames() name = increment_name(name, existing) var_names = self.parent.parent.get_variable_names() - var_args = sum([["%s:=" % v_name, ["Nominal"]] - for v_name in var_names], []) + var_args = sum([["%s:=" % v_name, ["Nominal"]] for v_name in var_names], []) self.parent._reporter.CreateReport( - name, "Modal Solution Data", "Rectangular Plot", - self.solution_name, ["Domain:=", "Sweep"], + name, + "Modal Solution Data", + "Rectangular Plot", + self.solution_name, + ["Domain:=", "Sweep"], ["Freq:=", ["All"]] + var_args, - ["X Component:=", "Freq", "Y Component:=", [expr]], []) + ["X Component:=", "Freq", "Y Component:=", [expr]], + [], + ) return HfssReport(self.parent.parent, name) def get_report_arrays(self, expr): @@ -1857,7 +2052,7 @@ def export_to_file(self, filename): def get_arrays(self): fn = tempfile.mktemp(suffix=".csv") self.export_to_file(fn) - return np.loadtxt(fn, skiprows=1, delimiter=',').transpose() + return np.loadtxt(fn, skiprows=1, delimiter=",").transpose() # warning for python 3 probably need to use genfromtxt @@ -1876,6 +2071,7 @@ class Optimetrics(COMWrapper): Note that running optimetrics requires the license for Optimetrics by Ansys. """ + def __init__(self, design): super(Optimetrics, self).__init__() @@ -1904,20 +2100,22 @@ def solve_setup(self, setup_name: str): """ return self._optimetrics.SolveSetup(setup_name) - def create_setup(self, - variable, - swp_params, - name="ParametricSetup1", - swp_type='linear_step', - setup_name=None, - save_fields=True, - copy_mesh=True, - solve_with_copied_mesh_only=True, - setup_type='parametric'): + def create_setup( + self, + variable, + swp_params, + name="ParametricSetup1", + swp_type="linear_step", + setup_name=None, + save_fields=True, + copy_mesh=True, + solve_with_copied_mesh_only=True, + setup_type="parametric", + ): """ Inserts a new parametric setup of one variable. Either with sweep definition or from file. - + *Synchronized* sweeps (more than one variable changing at once) can be implemented by giving a list of variables to ``variable`` and corresponding lists to ``swp_params`` and ``swp_type``. @@ -1953,7 +2151,7 @@ def create_setup(self, For swp_type='linear_step' swp_params is start, stop, step: swp_params = ("12.8nH", "13.6nH", "0.2nH") - + All other types swp_params is start, stop, count: swp_params = ("12.8nH", "13.6nH", 4) The definition of count varies amongst the available types. @@ -1980,20 +2178,21 @@ def create_setup(self, f"Inserting optimetrics setup `{name}` for simulation setup: `{setup_name}`" ) - if setup_type == 'parametric': + if setup_type == "parametric": type_map = { - 'linear_count': 'LINC', - 'decade_count': 'DEC', - 'octave_count': 'OCT', - 'exponential_count': 'ESTP', + "linear_count": "LINC", + "decade_count": "DEC", + "octave_count": "OCT", + "exponential_count": "ESTP", } - valid_swp_types = {'single_value', 'linear_step'} | set(type_map.keys()) + valid_swp_types = {"single_value", "linear_step"} | set(type_map.keys()) if isinstance(variable, Iterable) and not isinstance(variable, str): # synchronized sweep, check that data is in correct format - assert len(swp_params) == len(swp_type) == len(variable), \ - 'Incorrect swp_params or swp_type format for synchronised sweep.' + assert ( + len(swp_params) == len(swp_type) == len(variable) + ), "Incorrect swp_params or swp_type format for synchronised sweep." synchronize = True else: # convert all to lists as we can reuse same code for synchronized @@ -2007,63 +2206,96 @@ def create_setup(self, else: swp_str = list() for i, e in enumerate(swp_type): - if e == 'single_value': + if e == "single_value": # Single takes string of single variable no swp_type_name swp_str.append(f"{swp_params[i]}") else: # correct number of inputs - assert len(swp_params[i]) == 3, "Incorrect number of sweep parameters." + assert ( + len(swp_params[i]) == 3 + ), "Incorrect number of sweep parameters." # Not checking for compatible unit types - if e == 'linear_step': + if e == "linear_step": swp_type_name = "LIN" else: # counts needs to be an integer number - assert isinstance(swp_params[i][2], int), "Count must be integer." + assert isinstance( + swp_params[i][2], int + ), "Count must be integer." swp_type_name = type_map[e] # prepare the string to pass to Ansys - swp_str.append(f"{swp_type_name} {swp_params[i][0]} {swp_params[i][1]} {swp_params[i][2]}") + swp_str.append( + f"{swp_type_name} {swp_params[i][0]} {swp_params[i][1]} {swp_params[i][2]}" + ) - self._optimetrics.InsertSetup("OptiParametric", [ - f"NAME:{name}", "IsEnabled:=", True, + self._optimetrics.InsertSetup( + "OptiParametric", [ - "NAME:ProdOptiSetupDataV2", - "SaveFields:=", - save_fields, - "CopyMesh:=", - copy_mesh, - "SolveWithCopiedMeshOnly:=", - solve_with_copied_mesh_only, - ], ["NAME:StartingPoint"], "Sim. Setups:=", [setup_name], - [ - "NAME:Sweeps", - *[[ - "NAME:SweepDefinition", "Variable:=", var_name, "Data:=", - swp, "OffsetF1:=", False, "Synchronize:=", int(synchronize) - ] for var_name, swp in zip(variable, swp_str)] - ], ["NAME:Sweep Operations"], ["NAME:Goals"] - ]) - elif setup_type == 'parametric_file': + f"NAME:{name}", + "IsEnabled:=", + True, + [ + "NAME:ProdOptiSetupDataV2", + "SaveFields:=", + save_fields, + "CopyMesh:=", + copy_mesh, + "SolveWithCopiedMeshOnly:=", + solve_with_copied_mesh_only, + ], + ["NAME:StartingPoint"], + "Sim. Setups:=", + [setup_name], + [ + "NAME:Sweeps", + *[ + [ + "NAME:SweepDefinition", + "Variable:=", + var_name, + "Data:=", + swp, + "OffsetF1:=", + False, + "Synchronize:=", + int(synchronize), + ] + for var_name, swp in zip(variable, swp_str) + ], + ], + ["NAME:Sweep Operations"], + ["NAME:Goals"], + ], + ) + elif setup_type == "parametric_file": # Uses the file name as the swp_params filename = swp_params - self._optimetrics.ImportSetup("OptiParametric", + self._optimetrics.ImportSetup( + "OptiParametric", [ - f"NAME:{name}", - filename, - ]) - self._optimetrics.EditSetup(f"{name}", + f"NAME:{name}", + filename, + ], + ) + self._optimetrics.EditSetup( + f"{name}", [ f"NAME:{name}", - [ - "NAME:ProdOptiSetupDataV2", - "SaveFields:=" , save_fields, - "CopyMesh:=" , copy_mesh, - "SolveWithCopiedMeshOnly:=", solve_with_copied_mesh_only, - ], - ]) + [ + "NAME:ProdOptiSetupDataV2", + "SaveFields:=", + save_fields, + "CopyMesh:=", + copy_mesh, + "SolveWithCopiedMeshOnly:=", + solve_with_copied_mesh_only, + ], + ], + ) else: raise NotImplementedError() @@ -2081,43 +2313,44 @@ def __init__(self, design, modeler, boundaries, mesh): def set_units(self, units, rescale=True): self._modeler.SetModelUnits( - ["NAME:Units Parameter", "Units:=", units, "Rescale:=", rescale]) + ["NAME:Units Parameter", "Units:=", units, "Rescale:=", rescale] + ) def get_units(self): """Get the model units. - Return Value: A string contains current model units. """ + Return Value: A string contains current model units.""" return str(self._modeler.GetModelUnits()) - def get_all_properties(self, obj_name, PropTab='Geometry3DAttributeTab'): - ''' - Get all properties for modeler PropTab, PropServer - ''' + def get_all_properties(self, obj_name, PropTab="Geometry3DAttributeTab"): + """ + Get all properties for modeler PropTab, PropServer + """ PropServer = obj_name properties = {} for key in self._modeler.GetProperties(PropTab, PropServer): - properties[key] = self._modeler.GetPropertyValue( - PropTab, PropServer, key) + properties[key] = self._modeler.GetPropertyValue(PropTab, PropServer, key) return properties def _attributes_array( - self, - name=None, - nonmodel=False, - wireframe=False, - color=None, - transparency=0.9, - material=None, # str - solve_inside=None, # bool - coordinate_system="Global"): + self, + name=None, + nonmodel=False, + wireframe=False, + color=None, + transparency=0.9, + material=None, # str + solve_inside=None, # bool + coordinate_system="Global", + ): arr = ["NAME:Attributes", "PartCoordinateSystem:=", coordinate_system] if name is not None: arr.extend(["Name:=", name]) if nonmodel or wireframe: - flags = 'NonModel' if nonmodel else '' # can be done smarter + flags = "NonModel" if nonmodel else "" # can be done smarter if wireframe: - flags += '#' if len(flags) > 0 else '' - flags += 'Wireframe' + flags += "#" if len(flags) > 0 else "" + flags += "Wireframe" arr.extend(["Flags:=", flags]) if color is not None: @@ -2134,12 +2367,8 @@ def _attributes_array( def _selections_array(self, *names): return ["NAME:Selections", "Selections:=", ",".join(names)] - def mesh_length(self, - name_mesh, - objects: list, - MaxLength='0.1mm', - **kwargs): - ''' + def mesh_length(self, name_mesh, objects: list, MaxLength="0.1mm", **kwargs): + """ "RefineInside:=" , False, "Enabled:=" , True, "RestrictElem:=" , False, @@ -2149,21 +2378,22 @@ def mesh_length(self, Example use: modeler.assign_mesh_length('mesh2', ["Q1_mesh"], MaxLength=0.1) - ''' + """ assert isinstance(objects, list) - arr = [ - f"NAME:{name_mesh}", "Objects:=", objects, 'MaxLength:=', MaxLength - ] + arr = [f"NAME:{name_mesh}", "Objects:=", objects, "MaxLength:=", MaxLength] ops = [ - 'RefineInside', 'Enabled', 'RestrictElem', 'NumMaxElem', - 'RestrictLength' + "RefineInside", + "Enabled", + "RestrictElem", + "NumMaxElem", + "RestrictLength", ] for key, val in kwargs.items(): if key in ops: - arr += [key + ':=', str(val)] + arr += [key + ":=", str(val)] else: - logger.error('KEY `{key}` NOT IN ops!') + logger.error("KEY `{key}` NOT IN ops!") self._mesh.AssignLengthOp(arr) @@ -2172,31 +2402,38 @@ def mesh_reassign(self, name_mesh, objects: list): self._mesh.ReassignOp(name_mesh, ["Objects:=", objects]) def mesh_get_names(self, kind="Length Based"): - ''' "Length Based", "Skin Depth Based", ...''' + """ "Length Based", "Skin Depth Based", ...""" return list(self._mesh.GetOperationNames(kind)) def mesh_get_all_props(self, mesh_name): # TODO: make mesh tis own class with properties - prop_tab = 'MeshSetupTab' - prop_server = f'MeshSetup:{mesh_name}' - prop_names = self.parent._design.GetProperties('MeshSetupTab', - prop_server) + prop_tab = "MeshSetupTab" + prop_server = f"MeshSetup:{mesh_name}" + prop_names = self.parent._design.GetProperties("MeshSetupTab", prop_server) dic = {} for name in prop_names: - dic[name] = self._modeler.GetPropertyValue(prop_tab, prop_server, - name) + dic[name] = self._modeler.GetPropertyValue(prop_tab, prop_server, name) return dic def draw_box_corner(self, pos, size, **kwargs): - name = self._modeler.CreateBox([ - "NAME:BoxParameters", "XPosition:=", - str(pos[0]), "YPosition:=", - str(pos[1]), "ZPosition:=", - str(pos[2]), "XSize:=", - str(size[0]), "YSize:=", - str(size[1]), "ZSize:=", - str(size[2]) - ], self._attributes_array(**kwargs)) + name = self._modeler.CreateBox( + [ + "NAME:BoxParameters", + "XPosition:=", + str(pos[0]), + "YPosition:=", + str(pos[1]), + "ZPosition:=", + str(pos[2]), + "XSize:=", + str(size[0]), + "YSize:=", + str(size[1]), + "ZSize:=", + str(size[2]), + ], + self._attributes_array(**kwargs), + ) return Box(name, self, pos, size) def draw_box_center(self, pos, size, **kwargs): @@ -2231,35 +2468,49 @@ def draw_polyline(self, points, closed=True, **kwargs): pointsStr = ["NAME:PolylinePoints"] indexsStr = ["NAME:PolylineSegments"] for ii, point in enumerate(points): - pointsStr.append([ - "NAME:PLPoint", "X:=", - str(point[0]), "Y:=", - str(point[1]), "Z:=", - str(point[2]) - ]) - indexsStr.append([ - "NAME:PLSegment", "SegmentType:=", "Line", "StartIndex:=", ii, - "NoOfPoints:=", 2 - ]) + pointsStr.append( + [ + "NAME:PLPoint", + "X:=", + str(point[0]), + "Y:=", + str(point[1]), + "Z:=", + str(point[2]), + ] + ) + indexsStr.append( + [ + "NAME:PLSegment", + "SegmentType:=", + "Line", + "StartIndex:=", + ii, + "NoOfPoints:=", + 2, + ] + ) if closed: - pointsStr.append([ - "NAME:PLPoint", "X:=", - str(points[0][0]), "Y:=", - str(points[0][1]), "Z:=", - str(points[0][2]) - ]) - params_closed = [ - "IsPolylineCovered:=", True, "IsPolylineClosed:=", True - ] + pointsStr.append( + [ + "NAME:PLPoint", + "X:=", + str(points[0][0]), + "Y:=", + str(points[0][1]), + "Z:=", + str(points[0][2]), + ] + ) + params_closed = ["IsPolylineCovered:=", True, "IsPolylineClosed:=", True] else: indexsStr = indexsStr[:-1] - params_closed = [ - "IsPolylineCovered:=", True, "IsPolylineClosed:=", False - ] + params_closed = ["IsPolylineCovered:=", True, "IsPolylineClosed:=", False] name = self._modeler.CreatePolyline( ["NAME:PolylineParameters", *params_closed, pointsStr, indexsStr], - self._attributes_array(**kwargs)) + self._attributes_array(**kwargs), + ) if closed: return Polyline(name, self, points) @@ -2270,16 +2521,26 @@ def draw_rect_corner(self, pos, x_size=0, y_size=0, z_size=0, **kwargs): size = [x_size, y_size, z_size] assert 0 in size axis = "XYZ"[size.index(0)] - w_idx, h_idx = {'X': (1, 2), 'Y': (2, 0), 'Z': (0, 1)}[axis] - - name = self._modeler.CreateRectangle([ - "NAME:RectangleParameters", "XStart:=", - str(pos[0]), "YStart:=", - str(pos[1]), "ZStart:=", - str(pos[2]), "Width:=", - str(size[w_idx]), "Height:=", - str(size[h_idx]), "WhichAxis:=", axis - ], self._attributes_array(**kwargs)) + w_idx, h_idx = {"X": (1, 2), "Y": (2, 0), "Z": (0, 1)}[axis] + + name = self._modeler.CreateRectangle( + [ + "NAME:RectangleParameters", + "XStart:=", + str(pos[0]), + "YStart:=", + str(pos[1]), + "ZStart:=", + str(pos[2]), + "Width:=", + str(size[w_idx]), + "Height:=", + str(size[h_idx]), + "WhichAxis:=", + axis, + ], + self._attributes_array(**kwargs), + ) return Rect(name, self, pos, size) def draw_rect_center(self, pos, x_size=0, y_size=0, z_size=0, **kwargs): @@ -2296,18 +2557,32 @@ def draw_rect_center(self, pos, x_size=0, y_size=0, z_size=0, **kwargs): z_size (int, optional): Width along the z direction]. Defaults to 0. """ corner_pos = [ - var(p) - var(s) / 2. for p, s in zip(pos, [x_size, y_size, z_size]) + var(p) - var(s) / 2.0 for p, s in zip(pos, [x_size, y_size, z_size]) ] - return self.draw_rect_corner(corner_pos, x_size, y_size, z_size, - **kwargs) + return self.draw_rect_corner(corner_pos, x_size, y_size, z_size, **kwargs) def draw_cylinder(self, pos, radius, height, axis, **kwargs): assert axis in "XYZ" - return self._modeler.CreateCylinder([ - "NAME:CylinderParameters", "XCenter:=", pos[0], "YCenter:=", - pos[1], "ZCenter:=", pos[2], "Radius:=", radius, "Height:=", - height, "WhichAxis:=", axis, "NumSides:=", 0 - ], self._attributes_array(**kwargs)) + return self._modeler.CreateCylinder( + [ + "NAME:CylinderParameters", + "XCenter:=", + pos[0], + "YCenter:=", + pos[1], + "ZCenter:=", + pos[2], + "Radius:=", + radius, + "Height:=", + height, + "WhichAxis:=", + axis, + "NumSides:=", + 0, + ], + self._attributes_array(**kwargs), + ) def draw_cylinder_center(self, pos, radius, height, axis, **kwargs): axis_idx = ["X", "Y", "Z"].index(axis) @@ -2315,82 +2590,160 @@ def draw_cylinder_center(self, pos, radius, height, axis, **kwargs): edge_pos[axis_idx] = var(pos[axis_idx]) - var(height) / 2 return self.draw_cylinder(edge_pos, radius, height, axis, **kwargs) - def draw_wirebond(self, - pos, - ori, - width, - height='0.1mm', - z=0, - wire_diameter="0.02mm", - NumSides=6, - **kwargs): - ''' - Args: - pos: 2D position vector (specify center point) - ori: should be normed - z: z position - - # TODO create Wirebond class - position is the origin of one point - ori is the orientation vector, which gets normalized - ''' + def draw_wirebond( + self, + pos, + ori, + width, + height="0.1mm", + z=0, + wire_diameter="0.02mm", + NumSides=6, + **kwargs, + ): + """ + Args: + pos: 2D position vector (specify center point) + ori: should be normed + z: z position + + # TODO create Wirebond class + position is the origin of one point + ori is the orientation vector, which gets normalized + """ p = np.array(pos) o = np.array(ori) - pad1 = p - o * width / 2. - name = self._modeler.CreateBondwire([ - "NAME:BondwireParameters", "WireType:=", "Low", "WireDiameter:=", - wire_diameter, "NumSides:=", NumSides, "XPadPos:=", pad1[0], - "YPadPos:=", pad1[1], "ZPadPos:=", z, "XDir:=", ori[0], "YDir:=", - ori[1], "ZDir:=", 0, "Distance:=", width, "h1:=", height, "h2:=", - "0mm", "alpha:=", "80deg", "beta:=", "80deg", "WhichAxis:=", "Z" - ], self._attributes_array(**kwargs)) + pad1 = p - o * width / 2.0 + name = self._modeler.CreateBondwire( + [ + "NAME:BondwireParameters", + "WireType:=", + "Low", + "WireDiameter:=", + wire_diameter, + "NumSides:=", + NumSides, + "XPadPos:=", + pad1[0], + "YPadPos:=", + pad1[1], + "ZPadPos:=", + z, + "XDir:=", + ori[0], + "YDir:=", + ori[1], + "ZDir:=", + 0, + "Distance:=", + width, + "h1:=", + height, + "h2:=", + "0mm", + "alpha:=", + "80deg", + "beta:=", + "80deg", + "WhichAxis:=", + "Z", + ], + self._attributes_array(**kwargs), + ) return name - def draw_region(self, - Padding, - PaddingType="Percentage Offset", - name='Region', - material="\"vacuum\""): + def draw_region( + self, + Padding, + PaddingType="Percentage Offset", + name="Region", + material='"vacuum"', + ): """ - PaddingType : 'Absolute Offset', "Percentage Offset" + PaddingType : 'Absolute Offset', "Percentage Offset" """ # TODO: Add option to modify these RegionAttributes = [ - "NAME:Attributes", "Name:=", name, "Flags:=", "Wireframe#", - "Color:=", "(255 0 0)", "Transparency:=", 1, - "PartCoordinateSystem:=", "Global", "UDMId:=", "", - "IsAlwaysHiden:=", False, "MaterialValue:=", material, - "SolveInside:=", True + "NAME:Attributes", + "Name:=", + name, + "Flags:=", + "Wireframe#", + "Color:=", + "(255 0 0)", + "Transparency:=", + 1, + "PartCoordinateSystem:=", + "Global", + "UDMId:=", + "", + "IsAlwaysHiden:=", + False, + "MaterialValue:=", + material, + "SolveInside:=", + True, ] - self._modeler.CreateRegion([ - "NAME:RegionParameters", "+XPaddingType:=", PaddingType, - "+XPadding:=", Padding[0][0], "-XPaddingType:=", PaddingType, - "-XPadding:=", Padding[0][1], "+YPaddingType:=", PaddingType, - "+YPadding:=", Padding[1][0], "-YPaddingType:=", PaddingType, - "-YPadding:=", Padding[1][1], "+ZPaddingType:=", PaddingType, - "+ZPadding:=", Padding[2][0], "-ZPaddingType:=", PaddingType, - "-ZPadding:=", Padding[2][1] - ], RegionAttributes) + self._modeler.CreateRegion( + [ + "NAME:RegionParameters", + "+XPaddingType:=", + PaddingType, + "+XPadding:=", + Padding[0][0], + "-XPaddingType:=", + PaddingType, + "-XPadding:=", + Padding[0][1], + "+YPaddingType:=", + PaddingType, + "+YPadding:=", + Padding[1][0], + "-YPaddingType:=", + PaddingType, + "-YPadding:=", + Padding[1][1], + "+ZPaddingType:=", + PaddingType, + "+ZPadding:=", + Padding[2][0], + "-ZPaddingType:=", + PaddingType, + "-ZPadding:=", + Padding[2][1], + ], + RegionAttributes, + ) def unite(self, names, keep_originals=False): self._modeler.Unite( self._selections_array(*names), - ["NAME:UniteParameters", "KeepOriginals:=", keep_originals]) + ["NAME:UniteParameters", "KeepOriginals:=", keep_originals], + ) return names[0] def intersect(self, names, keep_originals=False): self._modeler.Intersect( self._selections_array(*names), - ["NAME:IntersectParameters", "KeepOriginals:=", keep_originals]) + ["NAME:IntersectParameters", "KeepOriginals:=", keep_originals], + ) return names[0] def translate(self, name, vector): - self._modeler.Move(self._selections_array(name), [ - "NAME:TranslateParameters", "TranslateVectorX:=", vector[0], - "TranslateVectorY:=", vector[1], "TranslateVectorZ:=", vector[2] - ]) + self._modeler.Move( + self._selections_array(name), + [ + "NAME:TranslateParameters", + "TranslateVectorX:=", + vector[0], + "TranslateVectorY:=", + vector[1], + "TranslateVectorZ:=", + vector[2], + ], + ) def get_boundary_assignment(self, boundary_name: str): # Gets a list of face IDs associated with the given boundary or excitation assignment. @@ -2400,10 +2753,10 @@ def get_boundary_assignment(self, boundary_name: str): return objects def append_PerfE_assignment(self, boundary_name: str, object_names: list): - ''' - This will create a new boundary if need, and will - otherwise append given names to an existing boundary - ''' + """ + This will create a new boundary if need, and will + otherwise append given names to an existing boundary + """ # enforce boundary_name = str(boundary_name) if isinstance(object_names, str): @@ -2411,32 +2764,34 @@ def append_PerfE_assignment(self, boundary_name: str, object_names: list): object_names = list(object_names) # enforce list # do actual work - if boundary_name not in self._boundaries.GetBoundaries( + if ( + boundary_name not in self._boundaries.GetBoundaries() ): # GetBoundariesOfType("Perfect E") # need to make a new boundary self.assign_perfect_E(object_names, name=boundary_name) else: # need to append objects = list(self.get_boundary_assignment(boundary_name)) - self._boundaries.ReassignBoundary([ - "NAME:" + boundary_name, "Objects:=", - list(set(objects + object_names)) - ]) - - def append_mesh(self, mesh_name: str, object_names: list, old_objs: list, - **kwargs): - ''' + self._boundaries.ReassignBoundary( + [ + "NAME:" + boundary_name, + "Objects:=", + list(set(objects + object_names)), + ] + ) + + def append_mesh(self, mesh_name: str, object_names: list, old_objs: list, **kwargs): + """ This will create a new boundary if need, and will otherwise append given names to an existing boundary old_obj = circ._mesh_assign - ''' + """ mesh_name = str(mesh_name) if isinstance(object_names, str): object_names = [object_names] object_names = list(object_names) # enforce list - if mesh_name not in self.mesh_get_names( - ): # need to make a new boundary + if mesh_name not in self.mesh_get_names(): # need to make a new boundary objs = object_names self.mesh_length(mesh_name, object_names, **kwargs) else: # need to append @@ -2445,46 +2800,54 @@ def append_mesh(self, mesh_name: str, object_names: list, old_objs: list, return objs - def assign_perfect_E(self, obj: List[str], name: str = 'PerfE'): - ''' + def assign_perfect_E(self, obj: List[str], name: str = "PerfE"): + """ Assign a boundary condition to a list of objects. Arg: objs (List[str]): Takes a name of an object or a list of object names. name(str): If `name` is not specified `PerfE` is appended to object name for the name. - ''' + """ if not isinstance(obj, list): obj = [obj] - if name == 'PerfE': - name = str(obj) + '_' + name + if name == "PerfE": + name = str(obj) + "_" + name name = increment_name(name, self._boundaries.GetBoundaries()) self._boundaries.AssignPerfectE( - ["NAME:" + name, "Objects:=", obj, "InfGroundPlane:=", False]) + ["NAME:" + name, "Objects:=", obj, "InfGroundPlane:=", False] + ) def _make_lumped_rlc(self, r, l, c, start, end, obj_arr, name="LumpRLC"): name = increment_name(name, self._boundaries.GetBoundaries()) params = ["NAME:" + name] params += obj_arr - params.append([ - "NAME:CurrentLine", - # for some reason here it seems to switch to use the model units, rather than meters - "Start:=", - fix_units(start, unit_assumed=LENGTH_UNIT), - "End:=", - fix_units(end, unit_assumed=LENGTH_UNIT) - ]) + params.append( + [ + "NAME:CurrentLine", + # for some reason here it seems to switch to use the model units, rather than meters + "Start:=", + fix_units(start, unit_assumed=LENGTH_UNIT), + "End:=", + fix_units(end, unit_assumed=LENGTH_UNIT), + ] + ) params += [ - "UseResist:=", r != 0, "Resistance:=", r, "UseInduct:=", l != 0, - "Inductance:=", l, "UseCap:=", c != 0, "Capacitance:=", c + "UseResist:=", + r != 0, + "Resistance:=", + r, + "UseInduct:=", + l != 0, + "Inductance:=", + l, + "UseCap:=", + c != 0, + "Capacitance:=", + c, ] self._boundaries.AssignLumpedRLC(params) - def _make_lumped_port(self, - start, - end, - obj_arr, - z0="50ohm", - name="LumpPort"): + def _make_lumped_port(self, start, end, obj_arr, z0="50ohm", name="LumpPort"): start = fix_units(start, unit_assumed=LENGTH_UNIT) end = fix_units(end, unit_assumed=LENGTH_UNIT) @@ -2492,17 +2855,35 @@ def _make_lumped_port(self, params = ["NAME:" + name] params += obj_arr params += [ - "RenormalizeAllTerminals:=", True, "DoDeembed:=", False, + "RenormalizeAllTerminals:=", + True, + "DoDeembed:=", + False, [ "NAME:Modes", [ - "NAME:Mode1", "ModeNum:=", 1, "UseIntLine:=", True, - ["NAME:IntLine", "Start:=", start, "End:=", - end], "CharImp:=", "Zpi", "AlignmentGroup:=", 0, - "RenormImp:=", "50ohm" - ] - ], "ShowReporterFilter:=", False, "ReporterFilter:=", [True], - "FullResistance:=", z0, "FullReactance:=", "0ohm" + "NAME:Mode1", + "ModeNum:=", + 1, + "UseIntLine:=", + True, + ["NAME:IntLine", "Start:=", start, "End:=", end], + "CharImp:=", + "Zpi", + "AlignmentGroup:=", + 0, + "RenormImp:=", + "50ohm", + ], + ], + "ShowReporterFilter:=", + False, + "ReporterFilter:=", + [True], + "FullResistance:=", + z0, + "FullReactance:=", + "0ohm", ] self._boundaries.AssignLumpedPort(params) @@ -2511,13 +2892,13 @@ def get_face_ids(self, obj): return self._modeler.GetFaceIDs(obj) def get_object_name_by_face_id(self, ID: str): - ''' Gets an object name corresponding to the input face id. ''' + """Gets an object name corresponding to the input face id.""" return self._modeler.GetObjectNameByFaceID(ID) def get_vertex_ids(self, obj): """ - Get the vertex IDs of given an object name - oVertexIDs = oEditor.GetVertexIDsFromObject(“Box1”) + Get the vertex IDs of given an object name + oVertexIDs = oEditor.GetVertexIDsFromObject(“Box1”) """ return self._modeler.GetVertexIDsFromObject(obj) @@ -2544,19 +2925,23 @@ def set_working_coordinate_system(self, cs_name="Global"): Use: Sets the working coordinate system. Command: Modeler>Coordinate System>Set Working CS """ - self._modeler.SetWCS([ - "NAME:SetWCS Parameter", - "Working Coordinate System:=", - cs_name, - "RegionDepCSOk:=", - False # this one is prob not needed, but comes with the record tool - ]) + self._modeler.SetWCS( + [ + "NAME:SetWCS Parameter", + "Working Coordinate System:=", + cs_name, + "RegionDepCSOk:=", + False, # this one is prob not needed, but comes with the record tool + ] + ) - def create_relative_coorinate_system_both(self, - cs_name, - origin=["0um", "0um", "0um"], - XAxisVec=["1um", "0um", "0um"], - YAxisVec=["0um", "1um", "0um"]): + def create_relative_coorinate_system_both( + self, + cs_name, + origin=["0um", "0um", "0um"], + XAxisVec=["1um", "0um", "0um"], + YAxisVec=["0um", "1um", "0um"], + ): """ Use: Creates a relative coordinate system. Only the Name attribute of the parameter is supported. Command: Modeler>Coordinate System>Create>Relative CS->Offset @@ -2571,22 +2956,44 @@ def create_relative_coorinate_system_both(self, origin, XAxisVec, YAxisVec: 3-vectors You can also pass in params such as origin = [0,1,0] rather than ["0um","1um","0um"], but these will be interpreted in default units, so it is safer to be explicit. Explicit over implicit. """ - self._modeler.CreateRelativeCS([ - "NAME:RelativeCSParameters", "Mode:=", "Axis/Position", - "OriginX:=", origin[0], "OriginY:=", origin[1], "OriginZ:=", - origin[2], "XAxisXvec:=", XAxisVec[0], "XAxisYvec:=", XAxisVec[1], - "XAxisZvec:=", XAxisVec[2], "YAxisXvec:=", YAxisVec[0], - "YAxisYvec:=", YAxisVec[1], "YAxisZvec:=", YAxisVec[1] - ], ["NAME:Attributes", "Name:=", cs_name]) + self._modeler.CreateRelativeCS( + [ + "NAME:RelativeCSParameters", + "Mode:=", + "Axis/Position", + "OriginX:=", + origin[0], + "OriginY:=", + origin[1], + "OriginZ:=", + origin[2], + "XAxisXvec:=", + XAxisVec[0], + "XAxisYvec:=", + XAxisVec[1], + "XAxisZvec:=", + XAxisVec[2], + "YAxisXvec:=", + YAxisVec[0], + "YAxisYvec:=", + YAxisVec[1], + "YAxisZvec:=", + YAxisVec[1], + ], + ["NAME:Attributes", "Name:=", cs_name], + ) def subtract(self, blank_name, tool_names, keep_originals=False): selection_array = [ - "NAME:Selections", "Blank Parts:=", blank_name, "Tool Parts:=", - ",".join(tool_names) + "NAME:Selections", + "Blank Parts:=", + blank_name, + "Tool Parts:=", + ",".join(tool_names), ] self._modeler.Subtract( - selection_array, - ["NAME:UniteParameters", "KeepOriginals:=", keep_originals]) + selection_array, ["NAME:UniteParameters", "KeepOriginals:=", keep_originals] + ) return blank_name def _fillet(self, radius, vertex_index, obj): @@ -2596,16 +3003,25 @@ def _fillet(self, radius, vertex_index, obj): else: to_fillet = [int(vertices[vertex_index])] - -# print(vertices) -# print(radius) - self._modeler.Fillet(["NAME:Selections", "Selections:=", obj], [ - "NAME:Parameters", + # print(vertices) + # print(radius) + self._modeler.Fillet( + ["NAME:Selections", "Selections:=", obj], [ - "NAME:FilletParameters", "Edges:=", [], "Vertices:=", - to_fillet, "Radius:=", radius, "Setback:=", "0mm" - ] - ]) + "NAME:Parameters", + [ + "NAME:FilletParameters", + "Edges:=", + [], + "Vertices:=", + to_fillet, + "Radius:=", + radius, + "Setback:=", + "0mm", + ], + ], + ) def _fillet_edges(self, radius, edge_index, obj): edges = self._modeler.GetEdgeIDsFromObject(obj) @@ -2614,22 +3030,42 @@ def _fillet_edges(self, radius, edge_index, obj): else: to_fillet = [int(edges[edge_index])] - self._modeler.Fillet(["NAME:Selections", "Selections:=", obj], [ - "NAME:Parameters", + self._modeler.Fillet( + ["NAME:Selections", "Selections:=", obj], [ - "NAME:FilletParameters", "Edges:=", to_fillet, "Vertices:=", - [], "Radius:=", radius, "Setback:=", "0mm" - ] - ]) + "NAME:Parameters", + [ + "NAME:FilletParameters", + "Edges:=", + to_fillet, + "Vertices:=", + [], + "Radius:=", + radius, + "Setback:=", + "0mm", + ], + ], + ) def _fillets(self, radius, vertices, obj): - self._modeler.Fillet(["NAME:Selections", "Selections:=", obj], [ - "NAME:Parameters", + self._modeler.Fillet( + ["NAME:Selections", "Selections:=", obj], [ - "NAME:FilletParameters", "Edges:=", [], "Vertices:=", vertices, - "Radius:=", radius, "Setback:=", "0mm" - ] - ]) + "NAME:Parameters", + [ + "NAME:FilletParameters", + "Edges:=", + [], + "Vertices:=", + vertices, + "Radius:=", + radius, + "Setback:=", + "0mm", + ], + ], + ) def _sweep_along_path(self, to_sweep, path_obj): """ @@ -2641,49 +3077,73 @@ def _sweep_along_path(self, to_sweep, path_obj): whose length is the desired resulting thickness path_obj (polyline): Original polyline; want to broaden this """ - self.rename_obj(path_obj, str(path_obj) + '_path') + self.rename_obj(path_obj, str(path_obj) + "_path") new_name = self.rename_obj(to_sweep, path_obj) - names = [path_obj, str(path_obj) + '_path'] - self._modeler.SweepAlongPath(self._selections_array(*names), [ - "NAME:PathSweepParameters", "DraftAngle:=", "0deg", "DraftType:=", - "Round", "CheckFaceFaceIntersection:=", False, "TwistAngle:=", - "0deg" - ]) + names = [path_obj, str(path_obj) + "_path"] + self._modeler.SweepAlongPath( + self._selections_array(*names), + [ + "NAME:PathSweepParameters", + "DraftAngle:=", + "0deg", + "DraftType:=", + "Round", + "CheckFaceFaceIntersection:=", + False, + "TwistAngle:=", + "0deg", + ], + ) return Polyline(new_name, self) def sweep_along_vector(self, names, vector): - self._modeler.SweepAlongVector(self._selections_array(*names), [ - "NAME:VectorSweepParameters", "DraftAngle:=", "0deg", - "DraftType:=", "Round", "CheckFaceFaceIntersection:=", False, - "SweepVectorX:=", vector[0], "SweepVectorY:=", vector[1], - "SweepVectorZ:=", vector[2] - ]) + self._modeler.SweepAlongVector( + self._selections_array(*names), + [ + "NAME:VectorSweepParameters", + "DraftAngle:=", + "0deg", + "DraftType:=", + "Round", + "CheckFaceFaceIntersection:=", + False, + "SweepVectorX:=", + vector[0], + "SweepVectorY:=", + vector[1], + "SweepVectorZ:=", + vector[2], + ], + ) def rename_obj(self, obj, name): - self._modeler.ChangeProperty([ - "NAME:AllTabs", + self._modeler.ChangeProperty( [ - "NAME:Geometry3DAttributeTab", ["NAME:PropServers", - str(obj)], - ["NAME:ChangedProps", ["NAME:Name", "Value:=", - str(name)]] + "NAME:AllTabs", + [ + "NAME:Geometry3DAttributeTab", + ["NAME:PropServers", str(obj)], + ["NAME:ChangedProps", ["NAME:Name", "Value:=", str(name)]], + ], ] - ]) + ) return name class ModelEntity(str, HfssPropertyObject): prop_tab = "Geometry3DCmdTab" model_command = None - transparency = make_float_prop("Transparent", - prop_tab="Geometry3DAttributeTab", - prop_server=lambda self: self) - material = make_str_prop("Material", - prop_tab="Geometry3DAttributeTab", - prop_server=lambda self: self) - wireframe = make_float_prop("Display Wireframe", - prop_tab="Geometry3DAttributeTab", - prop_server=lambda self: self) + transparency = make_float_prop( + "Transparent", prop_tab="Geometry3DAttributeTab", prop_server=lambda self: self + ) + material = make_str_prop( + "Material", prop_tab="Geometry3DAttributeTab", prop_server=lambda self: self + ) + wireframe = make_float_prop( + "Display Wireframe", + prop_tab="Geometry3DAttributeTab", + prop_server=lambda self: self, + ) coordinate_system = make_str_prop("Coordinate System") def __new__(self, val, *args, **kwargs): @@ -2694,8 +3154,9 @@ def __init__(self, val, modeler): :type val: str :type modeler: HfssModeler """ - super(ModelEntity, - self).__init__() # val) #Comment out keyword to match arguments + super( + ModelEntity, self + ).__init__() # val) #Comment out keyword to match arguments self.modeler = modeler self.prop_server = self + ":" + self.model_command + ":1" @@ -2740,9 +3201,9 @@ def __init__(self, name, modeler, corner, size): self.center = [c + s / 2 if s else c for c, s in zip(corner, size)] def make_center_line(self, axis): - ''' + """ Returns `start` and `end` list of 3 coordinates - ''' + """ axis_idx = ["x", "y", "z"].index(axis.lower()) start = [c for c in self.center] start[axis_idx] -= self.size[axis_idx] / 2 @@ -2754,25 +3215,21 @@ def make_center_line(self, axis): def make_rlc_boundary(self, axis, r=0, l=0, c=0, name="LumpRLC"): start, end = self.make_center_line(axis) - self.modeler._make_lumped_rlc(r, - l, - c, - start, - end, ["Objects:=", [self]], - name=name) + self.modeler._make_lumped_rlc( + r, l, c, start, end, ["Objects:=", [self]], name=name + ) def make_lumped_port(self, axis, z0="50ohm", name="LumpPort"): start, end = self.make_center_line(axis) - self.modeler._make_lumped_port(start, - end, ["Objects:=", [self]], - z0=z0, - name=name) + self.modeler._make_lumped_port( + start, end, ["Objects:=", [self]], z0=z0, name=name + ) class Polyline(ModelEntity): - ''' + """ Assume closed polyline, which creates a polygon. - ''' + """ model_command = "CreatePolyline" @@ -2786,14 +3243,13 @@ def __init__(self, name, modeler, points=None): pass # TODO: points = collection of points + # axis = find_orth_axis() -# axis = find_orth_axis() - -# TODO: find the plane of the polyline for now, assume Z -# def find_orth_axis(): -# X, Y, Z = (True, True, True) -# for point in points: -# X = + # TODO: find the plane of the polyline for now, assume Z + # def find_orth_axis(): + # X, Y, Z = (True, True, True) + # for point in points: + # X = def unite(self, list_other): union = self.modeler.unite(self + list_other) @@ -2807,33 +3263,28 @@ def make_center_line(self, axis): # Expects to act on a rectangle... center = [ center[0] + point[0] / self.n_points, center[1] + point[1] / self.n_points, - center[2] + point[2] / self.n_points + center[2] + point[2] / self.n_points, ] size = [ 2 * (center[0] - self.points[0][0]), 2 * (center[1] - self.points[0][1]), - 2 * (center[1] - self.points[0][2]) + 2 * (center[1] - self.points[0][2]), ] axis_idx = ["x", "y", "z"].index(axis.lower()) start = [c for c in center] start[axis_idx] -= size[axis_idx] / 2 - start = [ - self.modeler.eval_var_str(s, unit=LENGTH_UNIT) for s in start - ] # TODO + start = [self.modeler.eval_var_str(s, unit=LENGTH_UNIT) for s in start] # TODO end = [c for c in center] end[axis_idx] += size[axis_idx] / 2 end = [self.modeler.eval_var_str(s, unit=LENGTH_UNIT) for s in end] return start, end def make_rlc_boundary(self, axis, r=0, l=0, c=0, name="LumpRLC"): - name = str(self) + '_' + name + name = str(self) + "_" + name start, end = self.make_center_line(axis) - self.modeler._make_lumped_rlc(r, - l, - c, - start, - end, ["Objects:=", [self]], - name=name) + self.modeler._make_lumped_rlc( + r, l, c, start, end, ["Objects:=", [self]], name=name + ) def fillet(self, radius, vertex_index): self.modeler._fillet(radius, vertex_index, self) @@ -2842,14 +3293,14 @@ def vertices(self): return self.modeler.get_vertex_ids(self) def rename(self, new_name): - ''' - Warning: The increment_name only works if the sheet has not been stracted or used as a tool elsewhere. - These names are not checked; they require modifying get_objects_in_group. + """ + Warning: The increment_name only works if the sheet has not been stracted or used as a tool elsewhere. + These names are not checked; they require modifying get_objects_in_group. - ''' + """ new_name = increment_name( - new_name, self.modeler.get_objects_in_group( - "Sheets")) # this is for a closed polyline + new_name, self.modeler.get_objects_in_group("Sheets") + ) # this is for a closed polyline # check to get the actual new name in case there was a substracted object with that name face_ids = self.modeler.get_face_ids(str(self)) @@ -2861,9 +3312,11 @@ def rename(self, new_name): class OpenPolyline(ModelEntity): # Assume closed polyline model_command = "CreatePolyline" - show_direction = make_prop('Show Direction', - prop_tab="Geometry3DAttributeTab", - prop_server=lambda self: self) + show_direction = make_prop( + "Show Direction", + prop_tab="Geometry3DAttributeTab", + prop_server=lambda self: self, + ) def __init__(self, name, modeler, points=None): super(OpenPolyline, self).__init__(name, modeler) @@ -2874,14 +3327,13 @@ def __init__(self, name, modeler, points=None): else: pass + # axis = find_orth_axis() -# axis = find_orth_axis() - -# TODO: find the plane of the polyline for now, assume Z -# def find_orth_axis(): -# X, Y, Z = (True, True, True) -# for point in points: -# X = + # TODO: find the plane of the polyline for now, assume Z + # def find_orth_axis(): + # X, Y, Z = (True, True, True) + # for point in points: + # X = def vertices(self): return self.modeler.get_vertex_ids(self) @@ -2890,19 +3342,17 @@ def fillet(self, radius, vertex_index): self.modeler._fillet(radius, vertex_index, self) def fillets(self, radius, do_not_fillet=[]): - ''' - do_not_fillet : Index list of vertices to not fillete - ''' + """ + do_not_fillet : Index list of vertices to not fillete + """ raw_list_vertices = self.modeler.get_vertex_ids(self) list_vertices = [] for vertex in raw_list_vertices[1:-1]: # ignore the start and finish list_vertices.append(int(vertex)) list_vertices = list( - map( - int, - np.delete(list_vertices, - np.array(do_not_fillet, dtype=int) - 1))) - #print(list_vertices, type(list_vertices[0])) + map(int, np.delete(list_vertices, np.array(do_not_fillet, dtype=int) - 1)) + ) + # print(list_vertices, type(list_vertices[0])) if len(list_vertices) != 0: self.modeler._fillets(radius, list_vertices, self) else: @@ -2912,15 +3362,13 @@ def sweep_along_path(self, to_sweep): return self.modeler._sweep_along_path(to_sweep, self) def rename(self, new_name): - ''' - Warning: The increment_name only works if the sheet has not been stracted or used as a tool elsewher. - These names are not checked - They require modifying get_objects_in_group - ''' - new_name = increment_name(new_name, - self.modeler.get_objects_in_group("Lines")) + """ + Warning: The increment_name only works if the sheet has not been stracted or used as a tool elsewher. + These names are not checked - They require modifying get_objects_in_group + """ + new_name = increment_name(new_name, self.modeler.get_objects_in_group("Lines")) # , self.points) - return OpenPolyline(self.modeler.rename_obj(self, new_name), - self.modeler) + return OpenPolyline(self.modeler.rename_obj(self, new_name), self.modeler) def copy(self, new_name): new_obj = OpenPolyline(self.modeler.copy(self), self.modeler) @@ -2949,14 +3397,13 @@ def __init__(self, setup): self.ComplexMag_Jvol = NamedCalcObject("ComplexMag_Jvol", setup) self.P_J = NamedCalcObject("P_J", setup) - self.named_expression = { - } # dictionary to hold additional named expressions + self.named_expression = {} # dictionary to hold additional named expressions def clear_named_expressions(self): self.parent.parent._fields_calc.ClearAllNamedExpr() def declare_named_expression(self, name): - """" + """ " If a named expression has been created in the fields calculator, this function can be called to initialize the name to work with the fields object """ @@ -3082,43 +3529,47 @@ def integrate_line(self, name): return self._integrate(name, "EnterLine") def normal2surface(self, name): - ''' return the part normal to surface. - Complex Vector. ''' - stack = self.stack + [("EnterSurf", name), - ("CalcOp", "Normal")] + """return the part normal to surface. + Complex Vector.""" + stack = self.stack + [("EnterSurf", name), ("CalcOp", "Normal")] stack.append(("CalcOp", "Dot")) stack.append(("EnterSurf", name)) - stack.append(("CalcOp", "Normal")) + stack.append(("CalcOp", "Normal")) stack.append(("CalcOp", "*")) return CalcObject(stack, self.setup) def tangent2surface(self, name): - ''' return the part tangent to surface. - Complex Vector. ''' - stack = self.stack + [("EnterSurf", name), - ("CalcOp", "Normal")] + """return the part tangent to surface. + Complex Vector.""" + stack = self.stack + [("EnterSurf", name), ("CalcOp", "Normal")] stack.append(("CalcOp", "Dot")) stack.append(("EnterSurf", name)) - stack.append(("CalcOp", "Normal")) + stack.append(("CalcOp", "Normal")) stack.append(("CalcOp", "*")) stack = self.stack + stack stack.append(("CalcOp", "-")) return CalcObject(stack, self.setup) def integrate_line_tangent(self, name): - ''' integrate line tangent to vector expression \n - name = of line to integrate over ''' - self.stack = self.stack + [("EnterLine", name), ("CalcOp", "Tangent"), - ("CalcOp", "Dot")] + """integrate line tangent to vector expression \n + name = of line to integrate over""" + self.stack = self.stack + [ + ("EnterLine", name), + ("CalcOp", "Tangent"), + ("CalcOp", "Dot"), + ] return self.integrate_line(name) def line_tangent_coor(self, name, coordinate): - ''' integrate line tangent to vector expression \n - name = of line to integrate over ''' - if coordinate not in ['X', 'Y', 'Z']: + """integrate line tangent to vector expression \n + name = of line to integrate over""" + if coordinate not in ["X", "Y", "Z"]: raise ValueError - self.stack = self.stack + [("EnterLine", name), ("CalcOp", "Tangent"), - ("CalcOp", "Scalar" + coordinate)] + self.stack = self.stack + [ + ("EnterLine", name), + ("CalcOp", "Tangent"), + ("CalcOp", "Scalar" + coordinate), + ] return self.integrate_line(name) def integrate_surf(self, name="AllObjects"): @@ -3127,8 +3578,8 @@ def integrate_surf(self, name="AllObjects"): def integrate_vol(self, name="AllObjects"): return self._integrate(name, "EnterVol") - def maximum_vol(self, name='AllObjects'): - return self._maximum(name, 'EnterVol') + def maximum_vol(self, name="AllObjects"): + return self._maximum(name, "EnterVol") def times_eps(self): stack = self.stack + [("ClcMaterial", ("Permittivity (epsi)", "mult"))] @@ -3140,7 +3591,7 @@ def times_mu(self): def write_stack(self): for fn, arg in self.stack: - if np.size(arg) > 1 and fn not in ['EnterVector']: + if np.size(arg) > 1 and fn not in ["EnterVector"]: getattr(self.calc_module, fn)(*arg) else: getattr(self.calc_module, fn)(arg) @@ -3155,10 +3606,10 @@ def save_as(self, name): def evaluate(self, phase=0, lv=None, print_debug=False): # , n_mode=1): self.write_stack() if print_debug: - print('---------------------') - print('writing to stack: OK') - print('-----------------') - #self.calc_module.set_mode(n_mode, 0) + print("---------------------") + print("writing to stack: OK") + print("-----------------") + # self.calc_module.set_mode(n_mode, 0) setup_name = self.setup.solution_name if lv is not None: @@ -3196,20 +3647,23 @@ def __init__(self, vec, setup): def get_active_project(): - ''' If you see the error: - "The requested operation requires elevation." - then you need to run your python as an admin. - ''' + """If you see the error: + "The requested operation requires elevation." + then you need to run your python as an admin. + """ import ctypes import os + try: is_admin = os.getuid() == 0 except AttributeError: is_admin = ctypes.windll.shell32.IsUserAnAdmin() != 0 if not is_admin: - print('\033[93m WARNING: you are not running as an admin! \ + print( + "\033[93m WARNING: you are not running as an admin! \ You need to run as an admin. You will probably get an error next.\ - \033[0m') + \033[0m" + ) app = HfssApp() desktop = app.get_app_desktop() @@ -3227,37 +3681,40 @@ def get_report_arrays(name: str): return r.get_arrays() -def load_ansys_project(proj_name: str, - project_path: str = None, - extension: str = '.aedt'): - ''' +def load_ansys_project( + proj_name: str, project_path: str = None, extension: str = ".aedt" +): + """ Utility function to load an Ansys project. Args: proj_name : None --> get active. (make sure 2 run as admin) extension : `aedt` is for 2016 version and newer - ''' + """ if project_path: # convert slashes correctly for system project_path = Path(project_path) # Checks - assert project_path.is_dir( + assert ( + project_path.is_dir() ), "ERROR! project_path is not a valid directory \N{loudly crying face}.\ Check the path, and especially \\ characters." project_path = Path(project_path, proj_name).with_suffix(extension) if (project_path).is_file(): - logger.info('\tFile path to HFSS project found.') + logger.info("\tFile path to HFSS project found.") else: raise Exception( "ERROR! Valid directory, but invalid project filename. \N{loudly crying face} Not found!\ - Please check your filename.\n%s\n" % project_path) + Please check your filename.\n%s\n" + % project_path + ) - if (project_path / '.lock').is_file(): + if (project_path / ".lock").is_file(): logger.warning( - '\t\tFile is locked. \N{fearful face} If connection fails, delete the .lock file.' + "\t\tFile is locked. \N{fearful face} If connection fails, delete the .lock file." ) app = HfssApp() @@ -3265,7 +3722,7 @@ def load_ansys_project(proj_name: str, desktop = app.get_app_desktop() logger.info(f"\tOpened Ansys Desktop v{desktop.get_version()}") - #logger.debug(f"\tOpen projects: {desktop.get_project_names()}") + # logger.debug(f"\tOpen projects: {desktop.get_project_names()}") if proj_name is not None: if proj_name in desktop.get_project_names(): diff --git a/pyEPR/calcs/back_box_numeric.py b/pyEPR/calcs/back_box_numeric.py index 22aeb9d..8fab76f 100644 --- a/pyEPR/calcs/back_box_numeric.py +++ b/pyEPR/calcs/back_box_numeric.py @@ -1,4 +1,4 @@ -''' +""" Numerical diagonalization of quantum Hamiltonian and parameter extraction. @@ -6,7 +6,7 @@ Original code on black_box_hamiltonian and make_dispersive functions by Phil Reinhold Revisions and updates by Zlatko Minev & Lysander Christakis -''' +""" # pylint: disable=invalid-name @@ -26,10 +26,12 @@ except (ImportError, ModuleNotFoundError): pass -__all__ = [ 'epr_numerical_diagonalization', - 'make_dispersive', - 'black_box_hamiltonian', - 'black_box_hamiltonian_nq'] +__all__ = [ + "epr_numerical_diagonalization", + "make_dispersive", + "black_box_hamiltonian", + "black_box_hamiltonian_nq", +] dot = MatrixOps.dot cos_approx = MatrixOps.cos_approx @@ -39,13 +41,18 @@ # ANALYSIS FUNCTIONS # ============================================================================== -def epr_numerical_diagonalization(freqs, Ljs, ϕzpf, - cos_trunc=8, - fock_trunc=9, - use_1st_order=False, - return_H=False, - non_linear_potential=None): - ''' + +def epr_numerical_diagonalization( + freqs, + Ljs, + ϕzpf, + cos_trunc=8, + fock_trunc=9, + use_1st_order=False, + return_H=False, + non_linear_potential=None, +): + """ Numerical diagonalization for pyEPR. Ask Zlatko for details. :param fs: (GHz, not radians) Linearized model, H_lin, normal mode frequencies in Hz, length M @@ -55,28 +62,42 @@ def epr_numerical_diagonalization(freqs, Ljs, ϕzpf, :return: Hamiltonian mode freq and dispersive shifts. Shifts are in MHz. Shifts have flipped sign so that down shift is positive. - ''' + """ freqs, Ljs, ϕzpf = map(np.array, (freqs, Ljs, ϕzpf)) - assert(all(freqs < 1E6) - ), "Please input the frequencies in GHz. \N{nauseated face}" - assert(all(Ljs < 1E-3) - ), "Please input the inductances in Henries. \N{nauseated face}" - - Hs = black_box_hamiltonian(freqs * 1E9, Ljs.astype(float), fluxQ*ϕzpf, - cos_trunc, fock_trunc, individual=use_1st_order, - non_linear_potential = non_linear_potential) + assert all(freqs < 1e6), "Please input the frequencies in GHz. \N{nauseated face}" + assert all( + Ljs < 1e-3 + ), "Please input the inductances in Henries. \N{nauseated face}" + + Hs = black_box_hamiltonian( + freqs * 1e9, + Ljs.astype(float), + fluxQ * ϕzpf, + cos_trunc, + fock_trunc, + individual=use_1st_order, + non_linear_potential=non_linear_potential, + ) f_ND, χ_ND, _, _ = make_dispersive( - Hs, fock_trunc, ϕzpf, freqs, use_1st_order=use_1st_order) - χ_ND = -1*χ_ND * 1E-6 # convert to MHz, and flip sign so that down shift is positive + Hs, fock_trunc, ϕzpf, freqs, use_1st_order=use_1st_order + ) + χ_ND = ( + -1 * χ_ND * 1e-6 + ) # convert to MHz, and flip sign so that down shift is positive return (f_ND, χ_ND, Hs) if return_H else (f_ND, χ_ND) - - -def black_box_hamiltonian(fs, ljs, fzpfs, cos_trunc=5, fock_trunc=8, individual=False, - non_linear_potential = None): +def black_box_hamiltonian( + fs, + ljs, + fzpfs, + cos_trunc=5, + fock_trunc=8, + individual=False, + non_linear_potential=None, +): r""" :param fs: Linearized model, H_lin, normal mode frequencies in Hz, length N :param ljs: junction linearized inductances in Henries, length M @@ -97,14 +118,15 @@ def black_box_hamiltonian(fs, ljs, fzpfs, cos_trunc=5, fock_trunc=8, individual= fzpfs = np.transpose(fzpfs) # Take from MxJ to JxM - assert np.isnan(fzpfs).any( - ) == False, "Phi ZPF has NAN, this is NOT allowed! Fix me. \n%s" % fzpfs - assert np.isnan(ljs).any( - ) == False, "Ljs has NAN, this is NOT allowed! Fix me." - assert np.isnan( - fs).any() == False, "freqs has NAN, this is NOT allowed! Fix me." - assert fzpfs.shape == (njuncs, n_modes), "incorrect shape for zpf array, {} not {}".format( - fzpfs.shape, (njuncs, n_modes)) + assert np.isnan(fzpfs).any() == False, ( + "Phi ZPF has NAN, this is NOT allowed! Fix me. \n%s" % fzpfs + ) + assert np.isnan(ljs).any() == False, "Ljs has NAN, this is NOT allowed! Fix me." + assert np.isnan(fs).any() == False, "freqs has NAN, this is NOT allowed! Fix me." + assert fzpfs.shape == ( + njuncs, + n_modes, + ), "incorrect shape for zpf array, {} not {}".format(fzpfs.shape, (njuncs, n_modes)) assert fs.shape == (n_modes,), "incorrect number of mode frequencies" assert ejs.shape == (njuncs,), "incorrect number of qubit frequencies" @@ -122,22 +144,25 @@ def tensor_out(op, loc): def cos(x): return cos_approx(x, cos_trunc=cos_trunc) - + if non_linear_potential is None: non_linear_potential = cos linear_part = dot(fs, mode_ns) - cos_interiors = [dot(fzpf_row/fluxQ, mode_fields) for fzpf_row in fzpfs] + cos_interiors = [dot(fzpf_row / fluxQ, mode_fields) for fzpf_row in fzpfs] nonlinear_part = dot(-fjs, map(non_linear_potential, cos_interiors)) if individual: return linear_part, nonlinear_part else: return linear_part + nonlinear_part + bbq_hmt = black_box_hamiltonian -def make_dispersive(H, fock_trunc, fzpfs=None, f0s=None, chi_prime=False, - use_1st_order=False): + +def make_dispersive( + H, fock_trunc, fzpfs=None, f0s=None, chi_prime=False, use_1st_order=False +): r""" Input: Hamiltonian Matrix. Optional: phi_zpfs and normal mode frequencies, f0s. @@ -149,14 +174,17 @@ def make_dispersive(H, fock_trunc, fzpfs=None, f0s=None, chi_prime=False, Based on the assignment of the excitations, the function returns the dressed mode frequencies :math:`\omega_m^\prime`, and the cross-Kerr matrix (including anharmonicities) extracted from the numerical diagonalization, as well as from 1st order perturbation theory. Note, the diagonal of the CHI matrix is directly the anharmonicity term. """ - if hasattr(H, '__len__'): # is it an array / list? + if hasattr(H, "__len__"): # is it an array / list? [H_lin, H_nl] = H H = H_lin + H_nl else: # make sure its a quanutm object from qutip import Qobj - if not isinstance(H, Qobj): # Validate that the input is a Qobj instance. - raise TypeError("Please pass in either a list of Qobjs or a Qobj for the Hamiltonian") - #assert type( + + if not isinstance(H, Qobj): # Validate that the input is a Qobj instance. + raise TypeError( + "Please pass in either a list of Qobjs or a Qobj for the Hamiltonian" + ) + # assert type( # H) == qutip.qobj.Qobj, "Please pass in either a list of Qobjs or Qobj for the Hamiltonian" print("Starting the diagonalization") @@ -164,12 +192,14 @@ def make_dispersive(H, fock_trunc, fzpfs=None, f0s=None, chi_prime=False, print("Finished the diagonalization") evals -= evals[0] - N = int(np.log(H.shape[0]) / np.log(fock_trunc)) # number of modes - assert H.shape[0] == fock_trunc ** N + N = int(np.log(H.shape[0]) / np.log(fock_trunc)) # number of modes + assert H.shape[0] == fock_trunc**N def fock_state_on(d): - ''' d={mode number: # of photons} ''' - return qutip.tensor(*[qutip.basis(fock_trunc, d.get(i, 0)) for i in range(N)]) # give me the value d[i] or 0 if d[i] does not exist + """d={mode number: # of photons}""" + return qutip.tensor( + *[qutip.basis(fock_trunc, d.get(i, 0)) for i in range(N)] + ) # give me the value d[i] or 0 if d[i] does not exist if use_1st_order: num_modes = N @@ -177,69 +207,82 @@ def fock_state_on(d): def multi_index_2_vector(d, num_modes, fock_trunc): return tensor([basis(fock_trunc, d.get(i, 0)) for i in range(num_modes)]) - '''this function creates a vector representation a given fock state given the data for excitations per - mode of the form d={mode number: # of photons}''' + """this function creates a vector representation a given fock state given the data for excitations per + mode of the form d={mode number: # of photons}""" def find_multi_indices(fock_trunc): - multi_indices = [{ind: item for ind, item in enumerate([i, j, k])} for i in range(fock_trunc) - for j in range(fock_trunc) - for k in range(fock_trunc)] + multi_indices = [ + {ind: item for ind, item in enumerate([i, j, k])} + for i in range(fock_trunc) + for j in range(fock_trunc) + for k in range(fock_trunc) + ] return multi_indices - '''this function generates all possible multi-indices for three modes for a given fock_trunc''' + """this function generates all possible multi-indices for three modes for a given fock_trunc""" def get_expect_number(left, middle, right): - return (left.dag()*middle*right).data.toarray()[0, 0] - '''this function calculates the expectation value of an operator called "middle" ''' + return (left.dag() * middle * right).data.toarray()[0, 0] + """this function calculates the expectation value of an operator called "middle" """ def get_basis0(fock_trunc, num_modes): multi_indices = find_multi_indices(fock_trunc) - basis0 = [multi_index_2_vector( - multi_indices[i], num_modes, fock_trunc) for i in range(len(multi_indices))] + basis0 = [ + multi_index_2_vector(multi_indices[i], num_modes, fock_trunc) + for i in range(len(multi_indices)) + ] evalues0 = [get_expect_number(v0, H_lin, v0).real for v0 in basis0] return multi_indices, basis0, evalues0 - '''this function creates a basis of fock states and their corresponding eigenvalues''' + """this function creates a basis of fock states and their corresponding eigenvalues""" def closest_state_to(vector0): - def PT_on_vector(original_vector, original_basis, pertub, energy0, evalue): new_vector = 0 * original_vector for i in range(len(original_basis)): - if (energy0[i]-evalue) > 1e-3: - new_vector += ((original_basis[i].dag()*H_nl*original_vector).data.toarray()[ - 0, 0])*original_basis[i]/(evalue-energy0[i]) + if (energy0[i] - evalue) > 1e-3: + new_vector += ( + ( + ( + original_basis[i].dag() * H_nl * original_vector + ).data.toarray()[0, 0] + ) + * original_basis[i] + / (evalue - energy0[i]) + ) else: pass - return (new_vector + original_vector)/(new_vector + original_vector).norm() - '''this function calculates the normalized vector with the first order correction term - from the non-linear hamiltonian ''' + return (new_vector + original_vector) / ( + new_vector + original_vector + ).norm() + """this function calculates the normalized vector with the first order correction term + from the non-linear hamiltonian """ - [multi_indices, basis0, evalues0] = get_basis0( - fock_trunc, num_modes) + [multi_indices, basis0, evalues0] = get_basis0(fock_trunc, num_modes) evalue0 = get_expect_number(vector0, H_lin, vector0) vector1 = PT_on_vector(vector0, basis0, H_nl, evalues0, evalue0) - index = np.argmax([(vector1.dag() * evec).norm() - for evec in evecs]) + index = np.argmax([(vector1.dag() * evec).norm() for evec in evecs]) return evals[index], evecs[index] else: + def closest_state_to(s): def distance(s2): return (s.dag() * s2[1]).norm() + return max(zip(evals, evecs), key=distance) f1s = [closest_state_to(fock_state_on({i: 1}))[0] for i in range(N)] - chis = [[0]*N for _ in range(N)] - chips = [[0]*N for _ in range(N)] + chis = [[0] * N for _ in range(N)] + chips = [[0] * N for _ in range(N)] for i in range(N): for j in range(i, N): - d = {k: 0 for k in range(N)} # put 0 photons in each mode (k) + d = {k: 0 for k in range(N)} # put 0 photons in each mode (k) d[i] += 1 d[j] += 1 # load ith mode and jth mode with 1 photon fs = fock_state_on(d) ev, evec = closest_state_to(fs) - chi = (ev - (f1s[i] + f1s[j])) + chi = ev - (f1s[i] + f1s[j]) chis[i][j] = chi chis[j][i] = chi @@ -247,17 +290,25 @@ def distance(s2): d[j] += 1 fs = fock_state_on(d) ev, evec = closest_state_to(fs) - chip = (ev - (f1s[i] + 2*f1s[j]) - 2 * chis[i][j]) + chip = ev - (f1s[i] + 2 * f1s[j]) - 2 * chis[i][j] chips[i][j] = chip chips[j][i] = chip if chi_prime: - return np.array(f1s), np.array(chis), np.array(chips), np.array(fzpfs), np.array(f0s) + return ( + np.array(f1s), + np.array(chis), + np.array(chips), + np.array(fzpfs), + np.array(f0s), + ) else: return np.array(f1s), np.array(chis), np.array(fzpfs), np.array(f0s) -def black_box_hamiltonian_nq(freqs, zmat, ljs, cos_trunc=6, fock_trunc=8, show_fit=False): +def black_box_hamiltonian_nq( + freqs, zmat, ljs, cos_trunc=6, fock_trunc=8, show_fit=False +): """ N-Qubit version of bbq, based on the full Z-matrix Currently reproduces 1-qubit data, untested on n-qubit data @@ -267,7 +318,7 @@ def black_box_hamiltonian_nq(freqs, zmat, ljs, cos_trunc=6, fock_trunc=8, show_f nj = len(ljs) assert zmat.shape == (nf, nj, nj) - imY = (1/zmat[:, 0, 0]).imag + imY = (1 / zmat[:, 0, 0]).imag # zeros where the sign changes from negative to positive (zeros,) = np.where((imY[:-1] <= 0) & (imY[1:] > 0)) @@ -277,25 +328,36 @@ def black_box_hamiltonian_nq(freqs, zmat, ljs, cos_trunc=6, fock_trunc=8, show_f f0s = np.zeros(nz) slopes = np.zeros((nj, nz)) import matplotlib.pyplot as plt + # Fit a second order polynomial in the region around the zero # Extract the exact location of the zero and the associated slope # If you need better than second order fit, you're not sampling finely enough for i, z in enumerate(zeros): - f0_guess = (freqs[z+1] + freqs[z]) / 2 + f0_guess = (freqs[z + 1] + freqs[z]) / 2 zero_polys = np.polyfit( - freqs[z-1:z+3] - f0_guess, imYs[:, z-1:z+3].transpose(), 2) + freqs[z - 1 : z + 3] - f0_guess, imYs[:, z - 1 : z + 3].transpose(), 2 + ) zero_polys = zero_polys.transpose() - f0s[i] = f0 = min(np.roots(zero_polys[0]), - key=lambda r: abs(r)) + f0_guess + f0s[i] = f0 = min(np.roots(zero_polys[0]), key=lambda r: abs(r)) + f0_guess for j, p in enumerate(zero_polys): slopes[j, i] = np.polyval(np.polyder(p), f0 - f0_guess) if show_fit: - plt.plot(freqs[z-1:z+3] - f0_guess, imYs[:, z-1:z + - 3].transpose(), lw=1, ls='--', marker='o', label=str(f0)) + plt.plot( + freqs[z - 1 : z + 3] - f0_guess, + imYs[:, z - 1 : z + 3].transpose(), + lw=1, + ls="--", + marker="o", + label=str(f0), + ) p = np.poly1d(zero_polys[0, :]) p2 = np.poly1d(zero_polys[1, :]) - plt.plot(freqs[z-1:z+3] - f0_guess, p(freqs[z-1:z+3] - f0_guess)) - plt.plot(freqs[z-1:z+3] - f0_guess, p2(freqs[z-1:z+3] - f0_guess)) + plt.plot( + freqs[z - 1 : z + 3] - f0_guess, p(freqs[z - 1 : z + 3] - f0_guess) + ) + plt.plot( + freqs[z - 1 : z + 3] - f0_guess, p2(freqs[z - 1 : z + 3] - f0_guess) + ) plt.legend(loc=0) zeffs = 2 / (slopes * f0s[np.newaxis, :]) @@ -306,4 +368,5 @@ def black_box_hamiltonian_nq(freqs, zmat, ljs, cos_trunc=6, fock_trunc=8, show_f H = black_box_hamiltonian(f0s, ljs, fzpfs, cos_trunc, fock_trunc) return make_dispersive(H, fock_trunc, fzpfs, f0s) + black_box_hamiltonian_nq = black_box_hamiltonian_nq diff --git a/pyEPR/calcs/basic.py b/pyEPR/calcs/basic.py index 4734935..6dd07ce 100644 --- a/pyEPR/calcs/basic.py +++ b/pyEPR/calcs/basic.py @@ -6,11 +6,11 @@ from numpy import sqrt from .. import logger -class CalcsBasic(): +class CalcsBasic: @staticmethod def epr_to_zpf(Pmj, SJ, Ω, EJ): - r''' + r""" Arguments, All as matrices (numpy arrays): :Pnj: MxJ energy-participation-ratio matrix, p_mj :SJ: MxJ sign matrix, s_mj @@ -19,16 +19,18 @@ def epr_to_zpf(Pmj, SJ, Ω, EJ): RETURNS: reduced zpf (in units of :math:`\phi_0`) - ''' + """ (Pmj, SJ, Ω, EJ) = map(np.array, (Pmj, SJ, Ω, EJ)) if (Pmj < 0).any(): - print('BAD!') - logger.error(f"""The simulation is not converged!!! \N{nauseated face} + print("BAD!") + logger.error( + f"""The simulation is not converged!!! \N{nauseated face} Some of the energy participations are less than zero. This happens when some participations are tiny 10^-8 or less or when not enough passes have been taken. The Pmj matrix is - {Pmj}""") + {Pmj}""" + ) # Technically, there the equation is hbar omega / 2J, but here we assume # that the hbar is absorbed in the units of omega, and omega and Ej have the same units. @@ -45,4 +47,4 @@ def epr_cap_to_nzpf(Pmj_cap, SJ, Ω, Ec): Experimental. To be tested """ (Pmj, SJ, Ω, EJ) = map(np.array, (Pmj_cap, SJ, Ω, Ec)) - return SJ * sqrt(Ω @ Pmj @ np.linalg.inv(Ec) /(4*4)) + return SJ * sqrt(Ω @ Pmj @ np.linalg.inv(Ec) / (4 * 4)) diff --git a/pyEPR/calcs/constants.py b/pyEPR/calcs/constants.py index 211efb7..3d91d85 100644 --- a/pyEPR/calcs/constants.py +++ b/pyEPR/calcs/constants.py @@ -5,16 +5,21 @@ """ # pylint: disable=invalid-name -from scipy.constants import Planck, elementary_charge, epsilon_0, pi # pylint: disable=unused-import +from scipy.constants import ( + Planck, + elementary_charge, + epsilon_0, + pi, +) # pylint: disable=unused-import # Pi π = pi # Reduced Planks constant -ħ = hbar = Planck/(2*pi) +ħ = hbar = Planck / (2 * pi) # Reduced Flux Quantum (3.29105976 × 10-16 Webers) -ϕ0 = fluxQ = ħ / (2*elementary_charge) +ϕ0 = fluxQ = ħ / (2 * elementary_charge) # Magnitude of the electric charge carried by a single electron e_el = elementary_charge diff --git a/pyEPR/calcs/convert.py b/pyEPR/calcs/convert.py index 29f8b7c..614019a 100644 --- a/pyEPR/calcs/convert.py +++ b/pyEPR/calcs/convert.py @@ -8,63 +8,67 @@ @author: Zlatko Minev """ -from __future__ import (absolute_import, # Python 2.7 and 3 compatibility - division, print_function) +from __future__ import ( + absolute_import, # Python 2.7 and 3 compatibility + division, + print_function, +) import numpy as np import pandas as pd from numpy import sqrt from .basic import CalcsBasic -from .constants import (Planck, e_el, elementary_charge, fluxQ, hbar, pi, ħ, π, - ϕ0) +from .constants import Planck, e_el, elementary_charge, fluxQ, hbar, pi, ħ, π, ϕ0 -class Convert(): - ''' - Static container class for conversions of units and variables. +class Convert: + """ + Static container class for conversions of units and variables. - TEST CONVERSION: + TEST CONVERSION: - .. code-block:: python + .. code-block:: python + + from pyEPR.toolbox.conversions import Convert + + Lj_nH, Cs_fF = 11, 60 + Convert.transmon_print_all_params(Lj_nH, Cs_fF); + """ - from pyEPR.toolbox.conversions import Convert - - Lj_nH, Cs_fF = 11, 60 - Convert.transmon_print_all_params(Lj_nH, Cs_fF); - ''' # Known SI prefixed - _prefix = {'y': -24, # yocto - 'z': -21, # zepto - 'a': -18, # atto - 'f': -15, # femto - 'p': -12, # pico - 'n': -9, # nano - 'u': -6, # micro - 'm': -3, # mili - 'c': -2, # centi - 'd': -1, # deci - ' ': 0, - 'k': 3, # kilo - 'M': 6, # mega - 'G': 9, # giga - 'T': 12, # tera - 'P': 15, # peta - 'E': 18, # exa - 'Z': 21, # zetta - 'Y': 24, # yotta - } + _prefix = { + "y": -24, # yocto + "z": -21, # zepto + "a": -18, # atto + "f": -15, # femto + "p": -12, # pico + "n": -9, # nano + "u": -6, # micro + "m": -3, # mili + "c": -2, # centi + "d": -1, # deci + " ": 0, + "k": 3, # kilo + "M": 6, # mega + "G": 9, # giga + "T": 12, # tera + "P": 15, # peta + "E": 18, # exa + "Z": 21, # zetta + "Y": 24, # yotta + } # Known SI units - _SI_units = ['H', # Henries - 'F', # Farads - 'Hz', # Hertz - 'Ohm', # Ohms - 'Ω', # Ohms - 'Wb' # Webers - 'J', # Joules - 'A' # Amps - ] + _SI_units = [ + "H", # Henries + "F", # Farads + "Hz", # Hertz + "Ohm", # Ohms + "Ω", # Ohms + "Wb" "J", # Webers # Joules + "A", # Amps + ] @staticmethod def toSI(number, from_units: str): @@ -74,9 +78,9 @@ def toSI(number, from_units: str): then the unit is assumed to be """ if from_units in Convert._SI_units: - from_units = ' ' + from_units = " " # else: we assume that the first letter is a prefix - return number*(10**Convert._prefix.get(from_units[0])) + return number * (10 ** Convert._prefix.get(from_units[0])) @staticmethod def fromSI(number, from_units: str): @@ -90,13 +94,13 @@ def fromSI(number, from_units: str): numeric number, with units expanded """ if from_units in Convert._SI_units: - from_units = ' ' + from_units = " " # else: we assume that the first letter is a prefix - return number*(10**(-Convert._prefix.get(from_units[0]))) + return number * (10 ** (-Convert._prefix.get(from_units[0]))) @staticmethod def _convert_num(out_func, in_num, in_units, out_units): - in_num = 1.0*in_num # to float + in_num = 1.0 * in_num # to float # convert units of input number in_num = Convert.toSI(in_num, in_units) out_num = out_func(in_num) # Assume func processes all in SI units @@ -104,92 +108,105 @@ def _convert_num(out_func, in_num, in_units, out_units): return out_num @staticmethod - def Ej_from_Lj(Lj, units_in='nH', units_out='MHz'): - r''' + def Ej_from_Lj(Lj, units_in="nH", units_out="MHz"): + r""" Josephson Junction energy from Josephson inductance. Returns in MHz :math:`E_j = \phi_0^2 / L_J` - ''' + """ return Convert._convert_num( # Plank to go from Joules to Hz - lambda _Lj: Planck**-1 * (ϕ0**2)/_Lj, - Lj, units_in, units_out) + lambda _Lj: Planck**-1 * (ϕ0**2) / _Lj, + Lj, + units_in, + units_out, + ) @staticmethod - def Lj_from_Ej(Ej, units_in='MHz', units_out='nH'): - r''' + def Lj_from_Ej(Ej, units_in="MHz", units_out="nH"): + r""" Josephson Junction ind from Josephson energy in MHZ. Returns in units of nano Henries by default :math:`E_j = \phi_0^2 / L_J` - ''' + """ return Convert._convert_num( - lambda _x: (ϕ0**2.)/(_x*Planck), # Plank to go from Joules to Hz - Ej, units_in, units_out) + lambda _x: (ϕ0**2.0) / (_x * Planck), # Plank to go from Joules to Hz + Ej, + units_in, + units_out, + ) @staticmethod - def Ic_from_Lj(Lj, units_in='nH', units_out='nA'): - r''' + def Ic_from_Lj(Lj, units_in="nH", units_out="nA"): + r""" Josephson Junction crit. curr from Josephson inductance. :math:`E_j = \phi_0^2 / L_J = \phi_0 I_C` - ''' + """ return Convert._convert_num( - lambda _x: ϕ0/_x, # Plank to go from Joules to Hz - Lj, units_in, units_out) + lambda _x: ϕ0 / _x, Lj, units_in, units_out # Plank to go from Joules to Hz + ) @staticmethod - def Lj_from_Ic(Lj, units_in='nA', units_out='nH'): - r''' + def Lj_from_Ic(Lj, units_in="nA", units_out="nH"): + r""" Josephson Junction crit. curr from Josephson inductance. :math:`E_j = \phi_0^2 / L_J = \phi_0 I_C` - ''' + """ return Convert._convert_num( - lambda _x: ϕ0/_x, # Plank to go from Joules to Hz - Lj, units_in, units_out) + lambda _x: ϕ0 / _x, Lj, units_in, units_out # Plank to go from Joules to Hz + ) @staticmethod - def Ec_from_Cs(Cs, units_in='fF', units_out='MHz'): - r''' + def Ec_from_Cs(Cs, units_in="fF", units_out="MHz"): + r""" Charging energy :math:`4E_c n^2`, where :math:`n=Q/2e` Returns in MHz :math:`E_{C}=\frac{e^{2}}{2C}J` - ''' + """ return Convert._convert_num( # Plank to go from Joules to Hz - lambda _x: Planck**-1 * (e_el**2.)/(2.*_x), - Cs, units_in, units_out) + lambda _x: Planck**-1 * (e_el**2.0) / (2.0 * _x), + Cs, + units_in, + units_out, + ) @staticmethod - def Cs_from_Ec(Ec, units_in='MHz', units_out='fF'): - r''' + def Cs_from_Ec(Ec, units_in="MHz", units_out="fF"): + r""" Charging energy :math:`4E_c n^2`, where :math:`n=Q/2e` Returns in SI units, in Farads. :math:`E_{C}=\frac{e^{2}}{2C}J` - ''' + """ return Convert._convert_num( # Plank to go from Joules to Hz - lambda _x: (e_el**2.)/(2.*_x*Planck), - Ec, units_in, units_out) + lambda _x: (e_el**2.0) / (2.0 * _x * Planck), + Ec, + units_in, + units_out, + ) @staticmethod def ZPF_from_LC(L, C): - r''' + r""" Input units assumed to be identical Returns Phi ZPF in and Q_ZPF in NOT reduced units, but SI - ''' - Z = sqrt(L/C) - return (sqrt(hbar*Z/2.), sqrt(hbar/(2.*Z))) # Phi , Q + """ + Z = sqrt(L / C) + return (sqrt(hbar * Z / 2.0), sqrt(hbar / (2.0 * Z))) # Phi , Q @staticmethod - def ZPF_from_EPR(hfss_freqs, hfss_epr_, hfss_signs, hfss_Ljs, - Lj_units_in='H', to_df=False): + def ZPF_from_EPR( + hfss_freqs, hfss_epr_, hfss_signs, hfss_Ljs, Lj_units_in="H", to_df=False + ): r""" Parameters: Can be either Pandas or numpy arrays. @@ -211,24 +228,25 @@ def ZPF_from_EPR(hfss_freqs, hfss_epr_, hfss_signs, hfss_Ljs, """ hfss_freqs, hfss_epr, hfss_signs, hfss_Ljs = map( - np.array, (hfss_freqs, hfss_epr_, hfss_signs, hfss_Ljs)) + np.array, (hfss_freqs, hfss_epr_, hfss_signs, hfss_Ljs) + ) Ωd = np.diagflat(hfss_freqs) - Ej = Convert.Ej_from_Lj( - hfss_Ljs, units_in=Lj_units_in, units_out='GHz') + Ej = Convert.Ej_from_Lj(hfss_Ljs, units_in=Lj_units_in, units_out="GHz") Ej = np.diagflat(Ej) ϕzpfs = CalcsBasic.epr_to_zpf(hfss_epr, hfss_signs, Ωd, Ej) if to_df: ϕzpfs = pd.DataFrame( - ϕzpfs, columns='ϕ'+hfss_epr_.columns.values, index=hfss_epr_.index) + ϕzpfs, columns="ϕ" + hfss_epr_.columns.values, index=hfss_epr_.index + ) return ϕzpfs, (Ωd, Ej, hfss_epr, hfss_signs) @staticmethod def Omega_from_LC(L, C): - r''' + r""" Calculate the resonant *angular* frequency - ''' - return sqrt(1./(L*C)) + """ + return sqrt(1.0 / (L * C)) diff --git a/pyEPR/calcs/hamiltonian.py b/pyEPR/calcs/hamiltonian.py index bb7b205..cca7c2e 100644 --- a/pyEPR/calcs/hamiltonian.py +++ b/pyEPR/calcs/hamiltonian.py @@ -3,18 +3,18 @@ Hamiltonian operations heavily draw on qutip package. This package must be installed for them to work. """ + try: import qutip from qutip import Qobj # basis, tensor, except (ImportError, ModuleNotFoundError): - Qobj=None + Qobj = None pass from ..toolbox.pythonic import fact class MatrixOps(object): - @staticmethod def cos(op_cos_arg: Qobj): """ @@ -23,40 +23,44 @@ def cos(op_cos_arg: Qobj): op_cos_arg (qutip.Qobj) : argument of the cosine """ - return 0.5*((1j*op_cos_arg).expm() + (-1j*op_cos_arg).expm()) + return 0.5 * ((1j * op_cos_arg).expm() + (-1j * op_cos_arg).expm()) @staticmethod def cos_approx(x, cos_trunc=5): """ Create a Taylor series matrix approximation of the cosine, up to some order. """ - return sum((-1)**i * x**(2*i) / float(fact(2*i)) for i in range(2, cos_trunc + 1)) + return sum( + (-1) ** i * x ** (2 * i) / float(fact(2 * i)) + for i in range(2, cos_trunc + 1) + ) @staticmethod def dot(ais, bis): """ Dot product """ - return sum(ai*bi for ai, bi in zip(ais, bis)) + return sum(ai * bi for ai, bi in zip(ais, bis)) class HamOps(object): - @staticmethod def fock_state_on(d: dict, fock_trunc: int, N_modes: int): - ''' d={mode number: # of photons} In the bare eigen basis - ''' + """d={mode number: # of photons} In the bare eigen basis""" # give me the value d[i] or 0 if d[i] does not exist - return qutip.tensor(*[qutip.basis(fock_trunc, d.get(i, 0)) - for i in range(N_modes)]) + return qutip.tensor( + *[qutip.basis(fock_trunc, d.get(i, 0)) for i in range(N_modes)] + ) @staticmethod def closest_state_to(s: Qobj, energyMHz, evecs): """ Returns the energy of the closest state to s """ + def distance(s2): return (s.dag() * s2[1]).norm() + return max(zip(energyMHz, evecs), key=distance) @staticmethod @@ -64,14 +68,14 @@ def closest_state_to_idx(s: Qobj, evecs): """ Returns the index """ + def distance(s2): return (s.dag() * s2[1]).norm() + return max(zip(range(len(evecs)), evecs), key=distance) @staticmethod - def identify_Fock_levels(fock_trunc: int, evecs, - N_modes=2, - Fock_max=4): + def identify_Fock_levels(fock_trunc: int, evecs, N_modes=2, Fock_max=4): """ Return quantum numbers in terms of the undiagonalized eigenbasis. """ diff --git a/pyEPR/calcs/quantum.py b/pyEPR/calcs/quantum.py index dc36d2b..ef9c034 100644 --- a/pyEPR/calcs/quantum.py +++ b/pyEPR/calcs/quantum.py @@ -5,13 +5,15 @@ import numpy as np + def create(n: int): """Returns matrix representation of an n-dimensional creation operator""" - diag = np.sqrt(np.arange(1,n)) + diag = np.sqrt(np.arange(1, n)) mat = np.zeros([n, n]) np.fill_diagonal(mat[1:], diag) return mat + def destroy(n: int): """Returns matrix representation of an n-dimensional annihilation operator""" diag = np.sqrt(np.arange(1, n)) @@ -19,13 +21,17 @@ def destroy(n: int): np.fill_diagonal(mat[:, 1:], diag) return mat + def number(n: int): """Returns matrix representation of an n-dimensional number operator""" mat = np.zeros([n, n]) np.fill_diagonal(mat, np.arange(n)) return mat -def basis(n: int, N: int): # Numpy does provide a method that does this but it's very slow + +def basis( + n: int, N: int +): # Numpy does provide a method that does this but it's very slow """Returns the n-th, N-dimensional basis vector""" vec = np.zeros([N, 1]) vec[n] = 1.0 diff --git a/pyEPR/calcs/transmon.py b/pyEPR/calcs/transmon.py index c2be374..2cb6703 100644 --- a/pyEPR/calcs/transmon.py +++ b/pyEPR/calcs/transmon.py @@ -2,6 +2,7 @@ Transmon calculations """ + import math import numpy as np @@ -13,8 +14,7 @@ from ..toolbox.pythonic import divide_diagonal_by_2 - -class CalcsTransmon(): +class CalcsTransmon: """ Common calculations and parameter reporting used for transmon qubits. """ @@ -48,10 +48,11 @@ def dispersiveH_params_PT_O1(Pmj, Ωm, Ej): f_0 = np.diag(Ωm) - χ_O1 = 0.25 * Ωm @ Pmj @ inv(Ej) @ Pmj.T @ Ωm * 1000. # GHz to MHz + χ_O1 = 0.25 * Ωm @ Pmj @ inv(Ej) @ Pmj.T @ Ωm * 1000.0 # GHz to MHz - f_O1 = f_0 - 0.5*np.ndarray.flatten(np.array(χ_O1.sum(1))) / \ - 1000. # 1st order PT expect freq to be dressed down by alpha + f_O1 = ( + f_0 - 0.5 * np.ndarray.flatten(np.array(χ_O1.sum(1))) / 1000.0 + ) # 1st order PT expect freq to be dressed down by alpha # Make the diagonals alpha χ_O1 = divide_diagonal_by_2(χ_O1) @@ -65,23 +66,30 @@ def transmon_get_all_params(Ej_MHz, Ec_MHz): Convenience func """ Ej, Ec = Ej_MHz, Ec_MHz - Lj_H, Cs_F = Convert.Lj_from_Ej( - Ej, 'MHz', 'H'), Convert.Cs_from_Ec(Ec, 'MHz', 'F') # SI units + Lj_H, Cs_F = Convert.Lj_from_Ej(Ej, "MHz", "H"), Convert.Cs_from_Ec( + Ec, "MHz", "F" + ) # SI units Phi_ZPF, Q_ZPF = Convert.ZPF_from_LC(Lj_H, Cs_F) - Omega_MHz = sqrt(1./(Lj_H*Cs_F)) * 1E-6 # MHz - f_MHz = Omega_MHz / (2*pi)*1E-3 - Z_Ohms = sqrt(Lj_H/Cs_F) - phi_ZPF = Phi_ZPF/fluxQ - n_ZPF = Q_ZPF / (2*e_el) - return {'Ej_MHz': Ej_MHz, 'Ec_MHz': Ec_MHz, - 'Lj_H': Lj_H, 'Cs_F': Cs_F, - 'Lj_nH': Lj_H*1E9, 'Cs_fF': Cs_F*1E15, - 'Phi_ZPF': Phi_ZPF, 'Q_ZPF': Q_ZPF, - 'phi_ZPF': phi_ZPF, 'n_ZPF': n_ZPF, - 'Omega_MHz': Omega_MHz, - 'f_MHz': f_MHz, - 'Z_Ohms': Z_Ohms, - } + Omega_MHz = sqrt(1.0 / (Lj_H * Cs_F)) * 1e-6 # MHz + f_MHz = Omega_MHz / (2 * pi) * 1e-3 + Z_Ohms = sqrt(Lj_H / Cs_F) + phi_ZPF = Phi_ZPF / fluxQ + n_ZPF = Q_ZPF / (2 * e_el) + return { + "Ej_MHz": Ej_MHz, + "Ec_MHz": Ec_MHz, + "Lj_H": Lj_H, + "Cs_F": Cs_F, + "Lj_nH": Lj_H * 1e9, + "Cs_fF": Cs_F * 1e15, + "Phi_ZPF": Phi_ZPF, + "Q_ZPF": Q_ZPF, + "phi_ZPF": phi_ZPF, + "n_ZPF": n_ZPF, + "Omega_MHz": Omega_MHz, + "f_MHz": f_MHz, + "Z_Ohms": Z_Ohms, + } @staticmethod def transmon_print_all_params(Lj_nH, Cs_fF): @@ -90,12 +98,14 @@ def transmon_print_all_params(Lj_nH, Cs_fF): Convenience func """ # Parameters - duplicates with transmon_get_all_params - Ej, Ec = Convert.Ej_from_Lj(Lj_nH, 'nH', 'MHz'), Convert.Ec_from_Cs( - Cs_fF, 'fF', 'MHz') # MHz - Lj_H, Cs_F = Convert.Lj_from_Ej(Ej, 'MHz', 'H'), Convert.Cs_from_Ec( - Ec, 'MHz', 'F') # SI units + Ej, Ec = Convert.Ej_from_Lj(Lj_nH, "nH", "MHz"), Convert.Ec_from_Cs( + Cs_fF, "fF", "MHz" + ) # MHz + Lj_H, Cs_F = Convert.Lj_from_Ej(Ej, "MHz", "H"), Convert.Cs_from_Ec( + Ec, "MHz", "F" + ) # SI units Phi_ZPF, Q_ZPF = Convert.ZPF_from_LC(Lj_H, Cs_F) - Omega_MHz = sqrt(1./(Lj_H*Cs_F)) * 1E-6 # MHz + Omega_MHz = sqrt(1.0 / (Lj_H * Cs_F)) * 1e-6 # MHz # Print text = r""" @@ -105,11 +115,19 @@ def transmon_print_all_params(Lj_nH, Cs_fF): \omega_0 &=2\pi\times %.2f \mathrm{\ GHz} & Z_0 &= %.0f \mathrm{\ \Omega} \\ \phi_\mathrm{ZPF} &= %.2f \ \ \phi_0 & n_\mathrm{ZPF} &=%.2f \ \ (2e) \\ \end{align} - """ % (Lj_H*1E9, Cs_F*1E15, Ej/1E3, Ec, - Omega_MHz / (2*pi)*1E-3, sqrt(Lj_H/Cs_F), - Phi_ZPF/fluxQ, Q_ZPF / (2*e_el)) + """ % ( + Lj_H * 1e9, + Cs_F * 1e15, + Ej / 1e3, + Ec, + Omega_MHz / (2 * pi) * 1e-3, + sqrt(Lj_H / Cs_F), + Phi_ZPF / fluxQ, + Q_ZPF / (2 * e_el), + ) from IPython.display import display, Math + display(Math(text)) return text @@ -120,5 +138,12 @@ def charge_dispersion_approx(m, Ec, Ej): Use Eq. (2.5) of Koch's paper. """ - return sqrt(2./pi) * Ec * (-1.)**(m) * 2.**(4.*m+5.) * exp(-sqrt(8*Ej/Ec)) * (Ej/(2*Ec))**(m/2.+3./4.)\ + return ( + sqrt(2.0 / pi) + * Ec + * (-1.0) ** (m) + * 2.0 ** (4.0 * m + 5.0) + * exp(-sqrt(8 * Ej / Ec)) + * (Ej / (2 * Ec)) ** (m / 2.0 + 3.0 / 4.0) / math.factorial(m) + ) diff --git a/pyEPR/core.py b/pyEPR/core.py index 5a54e4a..d7f038a 100644 --- a/pyEPR/core.py +++ b/pyEPR/core.py @@ -13,7 +13,6 @@ # pylint: disable=invalid-name, unused-import - from .project_info import ProjectInfo from .core_quantum_analysis import QuantumAnalysis from .core_distributed_analysis import DistributedAnalysis diff --git a/pyEPR/core_distributed_analysis.py b/pyEPR/core_distributed_analysis.py index 5af0181..63c6018 100644 --- a/pyEPR/core_distributed_analysis.py +++ b/pyEPR/core_distributed_analysis.py @@ -32,9 +32,12 @@ from .ansys import CalcObject, ConstantVecCalcObject, set_property, ureg from .calcs.constants import epsilon_0 from .project_info import ProjectInfo -from .reports import (plot_convergence_f_vspass, plot_convergence_max_df, - plot_convergence_maxdf_vs_sol, - plot_convergence_solved_elem) +from .reports import ( + plot_convergence_f_vspass, + plot_convergence_max_df, + plot_convergence_maxdf_vs_sol, + plot_convergence_solved_elem, +) from .toolbox.pythonic import print_NoNewLine @@ -59,7 +62,7 @@ class DistributedAnalysis(object): """ def __init__(self, *args, **kwargs): - ''' + """ Pass in the arguments for ProjectInfo. See help for `?ProjectInfo`. @@ -118,18 +121,20 @@ def __init__(self, *args, **kwargs): ("Height='0.06mm' Lj='13.5nH'", "Height='0.06mm' Lj='15.3nH'") - ''' + """ # Get the project info project_info = None - if (len(args) == 1) and (args[0].__class__.__name__ == 'ProjectInfo'): + if (len(args) == 1) and (args[0].__class__.__name__ == "ProjectInfo"): # isinstance(args[0], ProjectInfo): # fails on module repload with changes project_info = args[0] else: - assert len(args) == 0, '''Since you did not pass a ProjectInfo object + assert ( + len(args) == 0 + ), """Since you did not pass a ProjectInfo object as a argument, we now assume you are trying to create a project info object here by passing its arguments. See ProjectInfo. - It does not take any arguments, only kwargs. \N{face with medical mask}''' + It does not take any arguments, only kwargs. \N{face with medical mask}""" project_info = ProjectInfo(*args, **kwargs) # Input @@ -156,7 +161,7 @@ def __init__(self, *args, **kwargs): self.variations_analyzed = [] # : List of analyzed variations. List of indices # String identifier of variables, such as "Cj='2fF' Lj='12.5nH'" - self._nominal_variation = '' + self._nominal_variation = "" self._list_variations = ("",) # tuple set of variables # container for eBBQ list of variables; basically the same as _list_variations self._hfss_variables = Dict() @@ -165,9 +170,11 @@ def __init__(self, *args, **kwargs): self.update_ansys_info() - print('Design \"%s\" info:' % self.design.name) - print('\t%-15s %d\n\t%-15s %d' % ('# eigenmodes', self.n_modes, - '# variations', self.n_variations)) + print('Design "%s" info:' % self.design.name) + print( + "\t%-15s %d\n\t%-15s %d" + % ("# eigenmodes", self.n_modes, "# variations", self.n_variations) + ) # Setup data saving self.data_dir = None @@ -210,45 +217,45 @@ def project(self): @property def options(self): - """ Project info options""" + """Project info options""" return self.pinfo.options def setup_data(self): - ''' + """ Set up folder paths for saving data to. Sets the save filename with the current time. Saves to Path(config.root_dir) / self.project.name / self.design.name - ''' + """ if len(self.design.name) > 50: - logger.error('WARNING! DESIGN FILENAME MAY BE TOO LONG! ') + logger.error("WARNING! DESIGN FILENAME MAY BE TOO LONG! ") - self.data_dir = Path(config.root_dir) / \ - self.project.name / self.design.name - self.data_filename = self.data_dir / (time.strftime(config.save_format, - time.localtime()) + '.npz') + self.data_dir = Path(config.root_dir) / self.project.name / self.design.name + self.data_filename = self.data_dir / ( + time.strftime(config.save_format, time.localtime()) + ".npz" + ) if not self.data_dir.is_dir(): self.data_dir.mkdir(parents=True, exist_ok=True) def calc_p_junction_single(self, mode, variation, U_E=None, U_H=None): - ''' + """ This function is used in the case of a single junction only. For multiple junctions, see :func:`~pyEPR.DistributedAnalysis.calc_p_junction`. Assumes no lumped capacitive elements. - ''' + """ if U_E is None: U_E = self.calc_energy_electric(variation) if U_H is None: U_H = self.calc_energy_magnetic(variation) pj = OrderedDict() - pj_val = (U_E-U_H)/U_E - pj['pj_'+str(mode)] = np.abs(pj_val) - print(' p_j_' + str(mode) + ' = ' + str(pj_val)) + pj_val = (U_E - U_H) / U_E + pj["pj_" + str(mode)] = np.abs(pj_val) + print(" p_j_" + str(mode) + " = " + str(pj_val)) return pj # TODO: replace this method with the one below, here because some funcs use it still @@ -268,16 +275,17 @@ def get_freqs_bare(self, variation: str): freqs_bare_vals = [] freqs_bare_dict = OrderedDict() freqs, kappa_over_2pis = self.solutions.eigenmodes( - self.get_variation_string(variation)) + self.get_variation_string(variation) + ) for m in range(self.n_modes): - freqs_bare_dict['freq_bare_'+str(m)] = 1e9*freqs[m] - freqs_bare_vals.append(1e9*freqs[m]) + freqs_bare_dict["freq_bare_" + str(m)] = 1e9 * freqs[m] + freqs_bare_vals.append(1e9 * freqs[m]) if kappa_over_2pis is not None: - freqs_bare_dict['Q_'+str(m)] = freqs[m]/kappa_over_2pis[m] + freqs_bare_dict["Q_" + str(m)] = freqs[m] / kappa_over_2pis[m] else: - freqs_bare_dict['Q_'+str(m)] = 0 - #self.freqs_bare = freqs_bare_dict - #self.freqs_bare_vals = freqs_bare_vals + freqs_bare_dict["Q_" + str(m)] = 0 + # self.freqs_bare = freqs_bare_dict + # self.freqs_bare_vals = freqs_bare_vals return freqs_bare_dict, freqs_bare_vals def get_freqs_bare_pd(self, variation: str, frame=True): @@ -316,23 +324,22 @@ def get_freqs_bare_pd(self, variation: str, frame=True): Qs = freqs / pd.Series(kappa_over_2pis, index=range(len(freqs))) if frame: - df = pd.DataFrame({'Freq. (GHz)': freqs, 'Quality Factor': Qs}) - df.index.name = 'mode' + df = pd.DataFrame({"Freq. (GHz)": freqs, "Quality Factor": Qs}) + df.index.name = "mode" return df else: return freqs, Qs - def get_ansys_frequencies_all(self, vs='variation'): + def get_ansys_frequencies_all(self, vs="variation"): """ Return all ansys frequencies and quality factors vs a variation Returns a multi-index pandas DataFrame """ df = dict() - variable = None if vs == 'variation' else self.get_variable_vs_variations( - vs) + variable = None if vs == "variation" else self.get_variable_vs_variations(vs) for variation in self.variations: # just for the first 2 - if vs == 'variation': + if vs == "variation": label = variation else: label = variable[variation] @@ -341,7 +348,7 @@ def get_ansys_frequencies_all(self, vs='variation'): return pd.concat(df, names=[vs]) def _get_lv(self, variation=None): - ''' + """ List of variation variables in a format that is used when feeding back to ansys. Args: @@ -355,7 +362,7 @@ def _get_lv(self, variation=None): .. code-block:: python ['Lj1:=','13nH', 'QubitGap:=','100um'] - ''' + """ if variation is None: lv = self._nominal_variation # "Cj='2fF' Lj='12.5nH'" lv = self._parse_listvariations(lv) @@ -368,8 +375,8 @@ def _get_lv(self, variation=None): @property def n_variations(self): - """ Number of **solved** variations, corresponding to the - selected Setup. """ + """Number of **solved** variations, corresponding to the + selected Setup.""" return len(self._list_variations) def set_variation(self, variation: str): @@ -430,7 +437,7 @@ def _parse_listvariations(self, lv): """ lv = str(lv) lv = lv.replace("=", ":=,") - lv = lv.replace(' ', ',') + lv = lv.replace(" ", ",") lv = lv.replace("'", "") lv = lv.split(",") return lv @@ -446,8 +453,8 @@ def get_nominal_variation_index(self): try: return str(self._list_variations.index(self._nominal_variation)) except Exception: - print('WARNING: Unsure of the index, returning 0') - return '0' + print("WARNING: Unsure of the index, returning 0") + return "0" def get_ansys_variations(self): """ @@ -468,7 +475,7 @@ def get_ansys_variations(self): return self._list_variations def update_ansys_info(self): - '''' + """' Updates all information about the Ansys solved variations and variables. .. code-block:: python @@ -476,7 +483,7 @@ def update_ansys_info(self): n_modes, _list_variations, nominal_variation, n_variations - ''' + """ # from oDesign self._nominal_variation = self.design.get_nominal_variation() @@ -484,11 +491,12 @@ def update_ansys_info(self): # from oSetup -- only for the solved variations! self._list_variations = self.solutions.list_variations() - self.variations = [str(i) for i in range( - self.n_variations)] # TODO: change to integer? + self.variations = [ + str(i) for i in range(self.n_variations) + ] # TODO: change to integer? # eigenmodes - if self.design.solution_type == 'Eigenmode': + if self.design.solution_type == "Eigenmode": self.n_modes = int(self.setup.n_modes) else: self.n_modes = 0 @@ -502,7 +510,8 @@ def _update_ansys_variables(self, variations=None): variations = variations or self.variations for variation in variations: self._hfss_variables[variation] = pd.Series( - self.get_variables(variation=variation)) + self.get_variables(variation=variation) + ) return self._hfss_variables def get_ansys_variables(self): @@ -512,11 +521,11 @@ def get_ansys_variables(self): Returns: Return a dataframe of variables as index and columns as the variations """ - vs = 'variation' + vs = "variation" df = pd.DataFrame(self._hfss_variables, columns=self.variations) df.columns.name = vs - df.index = [x[1:] if x.startswith('_') else x for x in df.index] - #df.index.name = 'variable' + df.index = [x[1:] if x.startswith("_") else x for x in df.index] + # df.index.name = 'variable' return df def get_variables(self, variation=None): @@ -529,9 +538,9 @@ def get_variables(self, variation=None): """ lv = self._get_lv(variation) variables = OrderedDict() - for ii in range(int(len(lv)/2)): - variables['_'+lv[2*ii][:-2]] = lv[2*ii+1] - #self.variables = variables + for ii in range(int(len(lv) / 2)): + variables["_" + lv[2 * ii][:-2]] = lv[2 * ii + 1] + # self.variables = variables return variables def get_variable_vs_variations(self, variable: str, convert: bool = True): @@ -552,13 +561,15 @@ def get_variable_vs_variations(self, variable: str, convert: bool = True): s = s.apply(lambda x: ureg.Quantity(x).magnitude) return s - def calc_energy_electric(self, - variation: str = None, - obj: str = 'AllObjects', - volume: str = 'Deprecated', - smooth: bool = False, - obj_dims: int = 3): - r''' + def calc_energy_electric( + self, + variation: str = None, + obj: str = "AllObjects", + volume: str = "Deprecated", + smooth: bool = False, + obj_dims: int = 3, + ): + r""" Calculates two times the peak electric energy, or 4 times the RMS, :math:`4*\mathcal{E}_{\mathrm{elec}}` (since we do not divide by 2 and use the peak phasors). @@ -583,9 +594,11 @@ def calc_energy_electric(self, ℰ_substr = epr_hfss.calc_energy_electric(obj='Box1') print(f'Energy in substrate = {100*ℰ_substr/ℰ_total:.1f}%') - ''' - if volume != 'Deprecated': - logger.warning('The use of the "volume" argument is deprecated... use "obj" instead') + """ + if volume != "Deprecated": + logger.warning( + 'The use of the "volume" argument is deprecated... use "obj" instead' + ) obj = volume calcobject = CalcObject([], self.setup) @@ -605,19 +618,23 @@ def calc_energy_electric(self, elif obj_dims == 3: A = A.integrate_vol(name=obj) else: - logger.warning('Invalid object dimensions %s, using default of 3 (volume)' % obj_dims) + logger.warning( + "Invalid object dimensions %s, using default of 3 (volume)" % obj_dims + ) A = A.integrate_vol(name=obj) lv = self._get_lv(variation) return A.evaluate(lv=lv) - def calc_energy_magnetic(self, - variation: str = None, - obj: str = 'AllObjects', - volume: str = 'Deprecated', - smooth: bool = False, - obj_dims: int = 3): - ''' + def calc_energy_magnetic( + self, + variation: str = None, + obj: str = "AllObjects", + volume: str = "Deprecated", + smooth: bool = False, + obj_dims: int = 3, + ): + """ See calc_energy_electric. Args: @@ -626,9 +643,11 @@ def calc_energy_magnetic(self, volume (string | 'AllObjects'): Name of the volume to integrate over smooth (bool | False) : Smooth the electric field or not when performing calculation obj_dims (int | 3) : 1 - line, 2 - surface, 3 - volume. Default volume - ''' - if volume != 'Deprecated': - logger.warning('The use of the "volume" argument is deprecated... use "obj" instead') + """ + if volume != "Deprecated": + logger.warning( + 'The use of the "volume" argument is deprecated... use "obj" instead' + ) obj = volume calcobject = CalcObject([], self.setup) @@ -648,19 +667,18 @@ def calc_energy_magnetic(self, elif obj_dims == 3: A = A.integrate_vol(name=obj) else: - logger.warn(f'Invalid object dimensions {obj_dims}, using default of 3 (volume)') + logger.warn( + f"Invalid object dimensions {obj_dims}, using default of 3 (volume)" + ) A = A.integrate_vol(name=obj) lv = self._get_lv(variation) return A.evaluate(lv=lv) - def calc_p_electric_volume(self, - name_dielectric3D, - relative_to='AllObjects', - variation=None, - E_total=None - ): - r''' + def calc_p_electric_volume( + self, name_dielectric3D, relative_to="AllObjects", variation=None, E_total=None + ): + r""" Calculate the dielectric energy-participation ratio of a 3D object (one that has volume) relative to the dielectric energy of a list of objects. @@ -672,26 +690,26 @@ def calc_p_electric_volume(self, Returns: ℰ_object/ℰ_total, (ℰ_object, _total) - ''' + """ if E_total is None: - logger.debug('Calculating ℰ_total') + logger.debug("Calculating ℰ_total") ℰ_total = self.calc_energy_electric(obj=relative_to, variation=variation) else: ℰ_total = E_total - logger.debug('Calculating ℰ_object') + logger.debug("Calculating ℰ_object") ℰ_object = self.calc_energy_electric(obj=name_dielectric3D, variation=variation) - return ℰ_object/ℰ_total, (ℰ_object, ℰ_total) + return ℰ_object / ℰ_total, (ℰ_object, ℰ_total) def calc_current(self, fields, line: str): - ''' + """ Function to calculate Current based on line. Not in use. Args: line (str) : integration line between plates - name - ''' + """ self.design.Clear_Field_Clac_Stack() comp = fields.Vector_H exp = comp.integrate_line_tangent(line) @@ -700,7 +718,7 @@ def calc_current(self, fields, line: str): return I def calc_avg_current_J_surf_mag(self, variation: str, junc_rect: str, junc_line): - ''' Peak current I_max for mode J in junction J + """Peak current I_max for mode J in junction J The avg. is over the surface of the junction. I.e., spatial. Args: variation (str): A string identifier of the variation, @@ -709,23 +727,27 @@ def calc_avg_current_J_surf_mag(self, variation: str, junc_rect: str, junc_line) junc_line (str) : name of junction line to integrate over Returns: Value of peak current - ''' + """ lv = self._get_lv(variation) jl, uj = self.get_junc_len_dir(variation, junc_line) uj = ConstantVecCalcObject(uj, self.setup) calc = CalcObject([], self.setup) - #calc = calc.getQty("Jsurf").mag().integrate_surf(name = junc_rect) - calc = (((calc.getQty("Jsurf")).dot(uj)).imag() - ).integrate_surf(name=junc_rect) + # calc = calc.getQty("Jsurf").mag().integrate_surf(name = junc_rect) + calc = (((calc.getQty("Jsurf")).dot(uj)).imag()).integrate_surf(name=junc_rect) I = calc.evaluate(lv=lv) / jl # phase = 90 # self.design.Clear_Field_Clac_Stack() return I - def calc_current_using_line_voltage(self, variation: str, junc_line_name: str, - junc_L_Henries: float, Cj_Farads: float = None): - ''' + def calc_current_using_line_voltage( + self, + variation: str, + junc_line_name: str, + junc_L_Henries: float, + Cj_Farads: float = None, + ): + """ Peak current I_max for prespecified mode calculating line voltage across junction. Make sure that you have set the correct variation in HFSS before running this @@ -736,42 +758,55 @@ def calc_current_using_line_voltage(self, variation: str, junc_line_name: str, junc_L_Henries: junction inductance in henries Cj_Farads: junction cap in Farads TODO: Smooth? - ''' + """ lv = self._get_lv(variation) - v_calc_real = CalcObject([], self.setup).getQty( - "E").real().integrate_line_tangent(name=junc_line_name) - v_calc_imag = CalcObject([], self.setup).getQty( - "E").imag().integrate_line_tangent(name=junc_line_name) - V = np.sign(v_calc_real.evaluate(lv=lv)) * np.sqrt(v_calc_real.evaluate(lv=lv)**2 + - v_calc_imag.evaluate(lv=lv)**2) + v_calc_real = ( + CalcObject([], self.setup) + .getQty("E") + .real() + .integrate_line_tangent(name=junc_line_name) + ) + v_calc_imag = ( + CalcObject([], self.setup) + .getQty("E") + .imag() + .integrate_line_tangent(name=junc_line_name) + ) + V = np.sign(v_calc_real.evaluate(lv=lv)) * np.sqrt( + v_calc_real.evaluate(lv=lv) ** 2 + v_calc_imag.evaluate(lv=lv) ** 2 + ) # Get frequency - freq = CalcObject( - [('EnterOutputVar', ('Freq', "Complex"))], self.setup).real().evaluate() - omega = 2*np.pi*freq # in SI radian Hz units - - Z = omega*junc_L_Henries - if abs(float(Cj_Farads)) > 1E-29: # zero - #print('Non-zero Cj used in calc_current_using_line_voltage') - #Z += 1./(omega*Cj_Farads) + freq = ( + CalcObject([("EnterOutputVar", ("Freq", "Complex"))], self.setup) + .real() + .evaluate() + ) + omega = 2 * np.pi * freq # in SI radian Hz units + + Z = omega * junc_L_Henries + if abs(float(Cj_Farads)) > 1e-29: # zero + # print('Non-zero Cj used in calc_current_using_line_voltage') + # Z += 1./(omega*Cj_Farads) print( - '\t\t'f'Energy fraction (Lj over Lj&Cj)= {100./(1.+omega**2 *Cj_Farads*junc_L_Henries):.2f}%') + "\t\t" + f"Energy fraction (Lj over Lj&Cj)= {100./(1.+omega**2 *Cj_Farads*junc_L_Henries):.2f}%" + ) # f'Z_L= {omega*junc_L_Henries:.1f} Ohms Z_C= {1./(omega*Cj_Farads):.1f} Ohms') - I_peak = V/Z # I=V/(wL)s + I_peak = V / Z # I=V/(wL)s return I_peak, V, freq def calc_line_current(self, variation, junc_line_name): lv = self._get_lv(variation) calc = CalcObject([], self.setup) - calc = calc.getQty("H").imag().integrate_line_tangent( - name=junc_line_name) + calc = calc.getQty("H").imag().integrate_line_tangent(name=junc_line_name) # self.design.Clear_Field_Clac_Stack() return calc.evaluate(lv=lv) def get_junc_len_dir(self, variation: str, junc_line): - ''' + """ Return the length and direction of a junction defined by a line Args: @@ -782,49 +817,54 @@ def get_junc_len_dir(self, variation: str, junc_line): jl (float) : junction length uj (list of 3 floats): x,y,z coordinates of the unit vector tangent to the junction line - ''' + """ # lv = self._get_lv(variation) u = [] - for coor in ['X', 'Y', 'Z']: + for coor in ["X", "Y", "Z"]: calc = CalcObject([], self.setup) calc = calc.line_tangent_coor(junc_line, coor) u.append(calc.evaluate(lv=lv)) - jl = float(np.sqrt(u[0]**2+u[1]**2+u[2]**2)) - uj = [float(u[0]/jl), float(u[1]/jl), float(u[2]/jl)] + jl = float(np.sqrt(u[0] ** 2 + u[1] ** 2 + u[2] ** 2)) + uj = [float(u[0] / jl), float(u[1] / jl), float(u[2] / jl)] return jl, uj def get_Qseam(self, seam, mode, variation, U_H=None): - r''' + r""" Calculate the contribution to Q of a seam, by integrating the current in the seam with finite conductance: set in the config file ref: http://arxiv.org/pdf/1509.01119.pdf - ''' + """ if U_H is None: U_H = self.calc_energy_magnetic(variation) _, freqs_bare_vals = self.get_freqs_bare(variation) - self.omega = 2*np.pi*freqs_bare_vals[mode] + self.omega = 2 * np.pi * freqs_bare_vals[mode] lv = self._get_lv(variation) Qseam = OrderedDict() - print(f'Calculating Qseam_{seam} for mode {mode} ({mode}/{self.n_modes-1})') + print(f"Calculating Qseam_{seam} for mode {mode} ({mode}/{self.n_modes-1})") # overestimating the loss by taking norm2 of j, rather than jperp**2 j_2_norm = self.fields.Vector_Jsurf.norm_2() int_j_2 = j_2_norm.integrate_line(seam) int_j_2_val = int_j_2.evaluate(lv=lv, phase=90) - yseam = int_j_2_val/U_H/self.omega + yseam = int_j_2_val / U_H / self.omega - Qseam['Qseam_'+seam+'_' + - str(mode)] = config.dissipation.gseam/yseam + Qseam["Qseam_" + seam + "_" + str(mode)] = config.dissipation.gseam / yseam - print('Qseam_' + seam + '_' + str(mode), '=', str(config.dissipation.gseam/yseam)) + print( + "Qseam_" + seam + "_" + str(mode), + "=", + str(config.dissipation.gseam / yseam), + ) return pd.Series(Qseam) - def get_Qseam_sweep(self, seam, mode, variation, variable, values, unit, U_H=None, pltresult=True): + def get_Qseam_sweep( + self, seam, mode, variation, variable, values, unit, U_H=None, pltresult=True + ): """ Q due to seam loss. @@ -835,70 +875,100 @@ def get_Qseam_sweep(self, seam, mode, variation, variable, values, unit, U_H=Non if U_H is None: U_H = self.calc_energy_magnetic(variation) - self.solutions.set_mode(mode+1, 0) + self.solutions.set_mode(mode + 1, 0) self.fields = self.setup.get_fields() freqs_bare_dict, freqs_bare_vals = self.get_freqs_bare(variation) - self.omega = 2*np.pi*freqs_bare_vals[mode] + self.omega = 2 * np.pi * freqs_bare_vals[mode] print(variation) print(type(variation)) print(ureg(variation)) lv = self._get_lv(variation) Qseamsweep = [] - print('Calculating Qseam_' + seam + ' for mode ' + str(mode) + - ' (' + str(mode) + '/' + str(self.n_modes-1) + ')') + print( + "Calculating Qseam_" + + seam + + " for mode " + + str(mode) + + " (" + + str(mode) + + "/" + + str(self.n_modes - 1) + + ")" + ) for value in values: - self.design.set_variable(variable, str(value)+unit) + self.design.set_variable(variable, str(value) + unit) # overestimating the loss by taking norm2 of j, rather than jperp**2 j_2_norm = self.fields.Vector_Jsurf.norm_2() int_j_2 = j_2_norm.integrate_line(seam) int_j_2_val = int_j_2.evaluate(lv=lv, phase=90) - yseam = int_j_2_val/U_H/self.omega - Qseamsweep.append(config.dissipation.gseam/yseam) -# Qseamsweep['Qseam_sweep_'+seam+'_'+str(mode)] = gseam/yseam - # Cprint 'Qseam_' + seam + '_' + str(mode) + str(' = ') + str(gseam/yseam) + yseam = int_j_2_val / U_H / self.omega + Qseamsweep.append(config.dissipation.gseam / yseam) + # Qseamsweep['Qseam_sweep_'+seam+'_'+str(mode)] = gseam/yseam + # Cprint 'Qseam_' + seam + '_' + str(mode) + str(' = ') + str(gseam/yseam) if pltresult: _, ax = plt.subplots() ax.plot(values, Qseamsweep) - ax.set_yscale('log') - ax.set_xlabel(variable+' ('+unit+')') - ax.set_ylabel('Q'+'_'+seam) + ax.set_yscale("log") + ax.set_xlabel(variable + " (" + unit + ")") + ax.set_ylabel("Q" + "_" + seam) return Qseamsweep def get_Qdielectric(self, dielectric, mode, variation, U_E=None): if U_E is None: - U_E = self.calc_energy_electric(variation) + U_E = self.calc_energy_electric(variation) Qdielectric = OrderedDict() - print('Calculating Qdielectric_' + dielectric + ' for mode ' + - str(mode) + ' (' + str(mode) + '/' + str(self.n_modes-1) + ')') + print( + "Calculating Qdielectric_" + + dielectric + + " for mode " + + str(mode) + + " (" + + str(mode) + + "/" + + str(self.n_modes - 1) + + ")" + ) U_dielectric = self.calc_energy_electric(variation, obj=dielectric) - p_dielectric = U_dielectric/U_E + p_dielectric = U_dielectric / U_E # TODO: Update make p saved sep. and get Q for diff materials, indep. specify in pinfo - Qdielectric['Qdielectric_' + dielectric] = 1/(p_dielectric*config.dissipation.tan_delta_sapp) - print('p_dielectric'+'_'+dielectric+'_' + str(mode) + ' = ' + str(p_dielectric)) + Qdielectric["Qdielectric_" + dielectric] = 1 / ( + p_dielectric * config.dissipation.tan_delta_sapp + ) + print( + "p_dielectric" + + "_" + + dielectric + + "_" + + str(mode) + + " = " + + str(p_dielectric) + ) return pd.Series(Qdielectric) def get_Qsurface(self, mode, variation, name, U_E=None, material_properties=None): - ''' + """ Calculate the contribution to Q of a dielectric layer of dirt on a given surface. Set the dirt thickness and loss tangent in the config file ref: http://arxiv.org/pdf/1509.01854.pdf - ''' + """ if U_E is None: U_E = self.calc_energy_electric(variation) if material_properties is None: material_properties = {} - th = material_properties.get('th', config.dissipation.th) - eps_r = material_properties.get('eps_r', config.dissipation.eps_r) - tan_delta_surf = material_properties.get('tan_delta_surf', config.dissipation.tan_delta_surf) + th = material_properties.get("th", config.dissipation.th) + eps_r = material_properties.get("eps_r", config.dissipation.eps_r) + tan_delta_surf = material_properties.get( + "tan_delta_surf", config.dissipation.tan_delta_surf + ) lv = self._get_lv(variation) Qsurf = OrderedDict() - print(f'Calculating Qsurface {name} for mode ({mode}/{self.n_modes-1})') + print(f"Calculating Qsurface {name} for mode ({mode}/{self.n_modes-1})") calcobject = CalcObject([], self.setup) vecE = calcobject.getQty("E") A = vecE @@ -908,46 +978,47 @@ def get_Qsurface(self, mode, variation, name, U_E=None, material_properties=None A = A.integrate_surf(name=name) U_surf = A.evaluate(lv=lv) U_surf *= th * epsilon_0 * eps_r - p_surf = U_surf/U_E - Qsurf[f'Qsurf_{name}'] = 1 / (p_surf * tan_delta_surf) - print(f'p_surf_{name}_{mode} = {p_surf}') + p_surf = U_surf / U_E + Qsurf[f"Qsurf_{name}"] = 1 / (p_surf * tan_delta_surf) + print(f"p_surf_{name}_{mode} = {p_surf}") return pd.Series(Qsurf) def get_Qsurface_all(self, mode, variation, U_E=None): - ''' + """ Calculate the contribution to Q of a dielectric layer of dirt on all surfaces. Set the dirt thickness and loss tangent in the config file ref: http://arxiv.org/pdf/1509.01854.pdf - ''' - return self.get_Qsurface(mode, variation, name='AllObjects', U_E=U_E) + """ + return self.get_Qsurface(mode, variation, name="AllObjects", U_E=U_E) - def calc_Q_external(self, variation, freq_GHz, U_E = None): - ''' + def calc_Q_external(self, variation, freq_GHz, U_E=None): + """ Calculate the coupling Q of mode m with each port p Expected that you have specified the mode before calling this Args: variation (str): A string identifier of the variation, such as '0', '1', ... - ''' + """ if U_E is None: U_E = self.calc_energy_electric(variation) - Qp = pd.Series({}, dtype='float64') + Qp = pd.Series({}, dtype="float64") freq = freq_GHz * 1e9 # freq in Hz for port_nm, port in self.pinfo.ports.items(): - I_peak = self.calc_avg_current_J_surf_mag(variation, port['rect'], - port['line']) - U_dissip = 0.5 * port['R'] * I_peak**2 * 1 / freq - p = U_dissip / (U_E/2) # U_E is 2x the peak electrical energy + I_peak = self.calc_avg_current_J_surf_mag( + variation, port["rect"], port["line"] + ) + U_dissip = 0.5 * port["R"] * I_peak**2 * 1 / freq + p = U_dissip / (U_E / 2) # U_E is 2x the peak electrical energy kappa = p * freq Q = 2 * np.pi * freq / kappa - Qp['Q_' + port_nm] = Q + Qp["Q_" + port_nm] = Q return Qp def calc_p_junction(self, variation, U_H, U_E, Ljs, Cjs): - ''' + """ For a single specific mode. Expected that you have specified the mode before calling this, :func:`~pyEPR.DistributedAnalysis.set_mode`. @@ -971,74 +1042,85 @@ def calc_p_junction(self, variation, U_H, U_E, Ljs, Cjs): .. warning:: Potential errors: If you dont have a line or rect by the right name you will prob - get an error of the type: com_error: (-2147352567, 'Exception occurred.', + get an error of the type: com_error: (-2147352567, 'Exception occurred.', (0, None, None, None, 0, -2147024365), None) - ''' + """ # ------------------------------------------------------------ # Calculate all peak voltage and currents for all junctions in a given mode method = self.pinfo.options.method_calc_P_mj I_peak_ = {} V_peak_ = {} - Sj = pd.Series({}, dtype='float64') + Sj = pd.Series({}, dtype="float64") for j_name, j_props in self.pinfo.junctions.items(): - logger.debug(f'Calculating participations for {(j_name, j_props)}') + logger.debug(f"Calculating participations for {(j_name, j_props)}") Lj = Ljs[j_name] Cj = Cjs[j_name] - line_name = j_props['line'] + line_name = j_props["line"] - if method == 'J_surf_mag': # old method + if method == "J_surf_mag": # old method _I_peak_1 = self.calc_avg_current_J_surf_mag( - variation, j_props['rect'], line_name) + variation, j_props["rect"], line_name + ) # could also use this to back out the V_peak using the impedances as in the line # below for now, keep both methods _I_peak_2, _V_peak_2, _ = self.calc_current_using_line_voltage( - variation, line_name, Lj, Cj) + variation, line_name, Lj, Cj + ) logger.debug( - f'Difference in I_Peak calculation ala the two methods: {(_I_peak_1,_I_peak_2)}') + f"Difference in I_Peak calculation ala the two methods: {(_I_peak_1,_I_peak_2)}" + ) V_peak = _V_peak_2 # make sure this is signed I_peak = _I_peak_1 - elif method == 'line_voltage': # new preferred method + elif method == "line_voltage": # new preferred method I_peak, V_peak, _ = self.calc_current_using_line_voltage( - variation, line_name, Lj, Cj) + variation, line_name, Lj, Cj + ) else: - raise NotImplementedError('Other calculation methods\ - (self.pinfo.options.method_calc_P_mj) are possible but not implemented here. ') + raise NotImplementedError( + "Other calculation methods\ + (self.pinfo.options.method_calc_P_mj) are possible but not implemented here. " + ) # save results I_peak_[j_name] = I_peak V_peak_[j_name] = V_peak - Sj['s_' + j_name] = _Smj = 1 if V_peak > 0 else - 1 + Sj["s_" + j_name] = _Smj = 1 if V_peak > 0 else -1 # REPORT preliminary - pmj_ind = 0.5*Ljs[j_name] * I_peak**2 / U_E - pmj_cap = 0.5*Cjs[j_name] * V_peak**2 / U_E - #print('\tpmj_ind=',pmj_ind, Ljs[j_name], U_E) + pmj_ind = 0.5 * Ljs[j_name] * I_peak**2 / U_E + pmj_cap = 0.5 * Cjs[j_name] * V_peak**2 / U_E + # print('\tpmj_ind=',pmj_ind, Ljs[j_name], U_E) self.I_peak = I_peak self.V_peak = V_peak self.Ljs = Ljs self.Cjs = Cjs print( - f'\t{j_name:<15} {pmj_ind:>8.6g}{("(+)"if _Smj else "(-)"):>5s} {pmj_cap:>8.6g}') - #print('\tV_peak=', V_peak) + f'\t{j_name:<15} {pmj_ind:>8.6g}{("(+)"if _Smj else "(-)"):>5s} {pmj_cap:>8.6g}' + ) + # print('\tV_peak=', V_peak) # ------------------------------------------------------------ # Calculate participation from the peak voltage and currents # # All junction capacitive and inductive lumped energies - all peak - U_J_inds = {j_name: 0.5*Ljs[j_name] * I_peak_[j_name] - ** 2 for j_name in self.pinfo.junctions} - U_J_caps = {j_name: 0.5*Cjs[j_name] * V_peak_[j_name] - ** 2 for j_name in self.pinfo.junctions} + U_J_inds = { + j_name: 0.5 * Ljs[j_name] * I_peak_[j_name] ** 2 + for j_name in self.pinfo.junctions + } + U_J_caps = { + j_name: 0.5 * Cjs[j_name] * V_peak_[j_name] ** 2 + for j_name in self.pinfo.junctions + } U_tot_ind = U_H + sum(list(U_J_inds.values())) # total U_tot_cap = U_E + sum(list(U_J_caps.values())) @@ -1046,33 +1128,49 @@ def calc_p_junction(self, variation, U_H, U_E, Ljs, Cjs): # what to use for the norm? U_tot_cap or the mean of U_tot_ind and U_tot_cap? # i.e., (U_tot_ind + U_tot_cap)/2 U_norm = U_tot_cap - U_diff = (U_tot_cap-U_tot_ind)/(U_tot_cap+U_tot_ind) - print("\t\t"f"(U_tot_cap-U_tot_ind)/mean={U_diff*100:.2f}%") + U_diff = (U_tot_cap - U_tot_ind) / (U_tot_cap + U_tot_ind) + print("\t\t" f"(U_tot_cap-U_tot_ind)/mean={U_diff*100:.2f}%") if abs(U_diff) > 0.15: - print('WARNING: This simulation must not have converged well!!!\ + print( + "WARNING: This simulation must not have converged well!!!\ The difference in the total cap and ind energies is larger than 10%.\ - Proceed with caution.') + Proceed with caution." + ) - Pj = pd.Series(OrderedDict([(j_name, Uj_ind/U_norm) - for j_name, Uj_ind in U_J_inds.items()])) + Pj = pd.Series( + OrderedDict( + [(j_name, Uj_ind / U_norm) for j_name, Uj_ind in U_J_inds.items()] + ) + ) - PCj = pd.Series(OrderedDict([(j_name, Uj_cap/U_norm) - for j_name, Uj_cap in U_J_caps.items()])) + PCj = pd.Series( + OrderedDict( + [(j_name, Uj_cap / U_norm) for j_name, Uj_cap in U_J_caps.items()] + ) + ) # print('\t{:<15} {:>8.6g} {:>5s}'.format( # j_name, # Pj['p_' + j_name], # '+' if Sj['s_' + j_name] > 0 else '-')) - return Pj, Sj, PCj, pd.Series(I_peak), pd.Series(V_peak), \ - {'U_J_inds': U_J_inds, - 'U_J_caps': U_J_caps, - 'U_H': U_H, - 'U_E': U_E, - 'U_tot_ind': U_tot_ind, - 'U_tot_cap': U_tot_cap, - 'U_norm': U_norm, - 'U_diff': U_diff} + return ( + Pj, + Sj, + PCj, + pd.Series(I_peak), + pd.Series(V_peak), + { + "U_J_inds": U_J_inds, + "U_J_caps": U_J_caps, + "U_H": U_H, + "U_E": U_E, + "U_tot_ind": U_tot_ind, + "U_tot_cap": U_tot_cap, + "U_norm": U_norm, + "U_diff": U_diff, + }, + ) def get_previously_analyzed(self): """ @@ -1096,27 +1194,32 @@ def get_junctions_L_and_C(self, variation: str): variation (str) : label such as '0' or 'all', in which case return pandas table for all variations """ - if variation == 'all': + if variation == "all": # for all variations and concat raise NotImplementedError() # TODO else: - Ljs = pd.Series({}, dtype='float64') - Cjs = pd.Series({}, dtype='float64') + Ljs = pd.Series({}, dtype="float64") + Cjs = pd.Series({}, dtype="float64") for junc_name, val in self.pinfo.junctions.items(): # junction nickname _variables = self._hfss_variables[variation] - def _parse(name): return ureg.Quantity( - _variables['_'+val[name]]).to_base_units().magnitude - Ljs[junc_name] = _parse('Lj_variable') - Cjs[junc_name] = 2E-15 # _parse( + + def _parse(name): + return ( + ureg.Quantity(_variables["_" + val[name]]) + .to_base_units() + .magnitude + ) + + Ljs[junc_name] = _parse("Lj_variable") + Cjs[junc_name] = 2e-15 # _parse( # 'Cj_variable') if 'Cj_variable' in val else 0 return Ljs, Cjs - def do_EPR_analysis(self, - variations: list = None, - modes=None, - append_analysis=True): + def do_EPR_analysis( + self, variations: list = None, modes=None, append_analysis=True + ): """ Main analysis routine @@ -1134,7 +1237,7 @@ def do_EPR_analysis(self, Modes to analyze for example modes = [0, 2, 3] - append_analysis (bool) : + append_analysis (bool) : When we run the Ansys analysis, should we redo any variations that we have already done? Ansys Notes: @@ -1161,17 +1264,21 @@ def do_EPR_analysis(self, eprd = epr.DistributedAnalysis(pinfo) eprd.do_EPR_analysis(append_analysis=False) """ - + if not modes is None: - assert max(modes) < self.n_modes, 'Non-existing mode selected. \n'\ - f'The possible modes are between 0 and {self.n_modes-1}.' + assert max(modes) < self.n_modes, ( + "Non-existing mode selected. \n" + f"The possible modes are between 0 and {self.n_modes-1}." + ) if len(modes) != len(set(modes)): - logger.warn(f'Select each mode only once! Fixing...\n'\ - 'modes: {modes} --> {list(set(modes))}') + logger.warn( + f"Select each mode only once! Fixing...\n" + "modes: {modes} --> {list(set(modes))}" + ) modes = list(set(modes)) # Track the total timing - self._run_time = time.strftime('%Y%m%d_%H%M%S', time.localtime()) + self._run_time = time.strftime("%Y%m%d_%H%M%S", time.localtime()) # Update the latest hfss variation information self.update_ansys_info() @@ -1184,11 +1291,11 @@ def do_EPR_analysis(self, # Main loop - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # TODO: Move inside of loop to function calle self.analyze_variation for ii, variation in enumerate(variations): - print(f'\nVariation {variation} [{ii+1}/{len(variations)}]') + print(f"\nVariation {variation} [{ii+1}/{len(variations)}]") # Previously analyzed and we should re analyze if append_analysis and variation in self.get_previously_analyzed(): - print_NoNewLine(' previously analyzed ...\n') + print_NoNewLine(" previously analyzed ...\n") continue # QUESTION! should we set the current variation, can this save time, set the variables @@ -1199,8 +1306,10 @@ def do_EPR_analysis(self, time.sleep(0.4) if self.has_fields() == False: - logger.error(f" Error: HFSS does not have field solution for variation={ii}.\ - Skipping this mode in the analysis") + logger.error( + f" Error: HFSS does not have field solution for variation={ii}.\ + Skipping this mode in the analysis" + ) continue try: @@ -1209,30 +1318,32 @@ def do_EPR_analysis(self, # This could fail if more variables are added after the simulation is completed. self.set_variation(variation) except Exception as e: - print('\tERROR: Could not set the variation string.' - '\nPossible causes: Did you add a variable after the simulation was already solved? ' - '\nAttempting to proceed nonetheless, should be just slower ...') + print( + "\tERROR: Could not set the variation string." + "\nPossible causes: Did you add a variable after the simulation was already solved? " + "\nAttempting to proceed nonetheless, should be just slower ..." + ) # use nonframe because old style - freqs_bare_GHz, Qs_bare = self.get_freqs_bare_pd( - variation, frame=False) + freqs_bare_GHz, Qs_bare = self.get_freqs_bare_pd(variation, frame=False) # update to the latest self._hfss_variables[variation] = pd.Series( - self.get_variables(variation=variation)) + self.get_variables(variation=variation) + ) # Create Ljs and Cjs series for a variation Ljs, Cjs = self.get_junctions_L_and_C(variation) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # This is crummy now. use dict - #result = dict() + # result = dict() Om = OrderedDict() # Matrix of angular frequency (of analyzed modes) Pm = OrderedDict() # Participation P matrix Sm = OrderedDict() # Sign S matrix Qm_coupling = OrderedDict() # Quality factor matrix - SOL = OrderedDict() # Other results + SOL = OrderedDict() # Other results Pm_cap = OrderedDict() I_peak = OrderedDict() V_peak = OrderedDict() @@ -1244,133 +1355,199 @@ def do_EPR_analysis(self, self.set_mode(mode) # Get HFSS solved frequencies - _Om = pd.Series({}, dtype='float64') + _Om = pd.Series({}, dtype="float64") temp_freq = freqs_bare_GHz[mode] - _Om['freq_GHz'] = temp_freq # freq + _Om["freq_GHz"] = temp_freq # freq Om[mode] = _Om print( - '\n'f' \033[1mMode {mode} at {"%.2f" % temp_freq} GHz [{mode+1}/{self.n_modes}]\033[0m') + "\n" + f' \033[1mMode {mode} at {"%.2f" % temp_freq} GHz [{mode+1}/{self.n_modes}]\033[0m' + ) # EPR Hamiltonian calculations # Calculation global energies and report # Magnetic - print(' Calculating ℰ_magnetic', end=',') + print(" Calculating ℰ_magnetic", end=",") try: self.U_H = self.calc_energy_magnetic(variation) except Exception as e: tb = sys.exc_info()[2] print("\n\nError:\n", e) - raise(Exception(' Did you save the field solutions?\n\ + raise ( + Exception( + " Did you save the field solutions?\n\ Failed during calculation of the total magnetic energy.\ This is the first calculation step, and is indicative that there are \ - no field solutions saved. ').with_traceback(tb)) + no field solutions saved. " + ).with_traceback(tb) + ) # Electric - print('ℰ_electric') + print("ℰ_electric") self.U_E = self.calc_energy_electric(variation) # the unnormed - sol = pd.Series({'U_H': self.U_H, 'U_E': self.U_E}) + sol = pd.Series({"U_H": self.U_H, "U_E": self.U_E}) # Fraction - report the peak energy, properly normalized # the 2 is from the calculation methods - print(f""" {'(ℰ_E-ℰ_H)/ℰ_E':>15s} {'ℰ_E':>9s} {'ℰ_H':>9s} - {100*(self.U_E - self.U_H)/self.U_E:>15.1f}% {self.U_E/2:>9.4g} {self.U_H/2:>9.4g}\n""") + print( + f""" {'(ℰ_E-ℰ_H)/ℰ_E':>15s} {'ℰ_E':>9s} {'ℰ_H':>9s} + {100*(self.U_E - self.U_H)/self.U_E:>15.1f}% {self.U_E/2:>9.4g} {self.U_H/2:>9.4g}\n""" + ) # Calculate EPR for each of the junctions print( - f' Calculating junction energy participation ration (EPR)\n\tmethod=`{self.pinfo.options.method_calc_P_mj}`. First estimates:') + f" Calculating junction energy participation ration (EPR)\n\tmethod=`{self.pinfo.options.method_calc_P_mj}`. First estimates:" + ) print( - f"\t{'junction':<15s} EPR p_{mode}j sign s_{mode}j (p_capacitive)") - - Pm[mode], Sm[mode], Pm_cap[mode], I_peak[mode], V_peak[mode], ansys_energies[mode] = self.calc_p_junction( - variation, self.U_H/2., self.U_E/2., Ljs, Cjs) + f"\t{'junction':<15s} EPR p_{mode}j sign s_{mode}j (p_capacitive)" + ) + + ( + Pm[mode], + Sm[mode], + Pm_cap[mode], + I_peak[mode], + V_peak[mode], + ansys_energies[mode], + ) = self.calc_p_junction( + variation, self.U_H / 2.0, self.U_E / 2.0, Ljs, Cjs + ) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # EPR Dissipative calculations -- should be a function block below # TODO: this should really be passed as argument to the functions rather than a # property of the calss I would say - self.omega = 2*np.pi*freqs_bare_GHz[mode] + self.omega = 2 * np.pi * freqs_bare_GHz[mode] - Qm_coupling[mode] = self.calc_Q_external(variation, - freqs_bare_GHz[mode], - self.U_E) + Qm_coupling[mode] = self.calc_Q_external( + variation, freqs_bare_GHz[mode], self.U_E + ) # get seam Q - if self.pinfo.dissipative['seams']: - for seam in self.pinfo.dissipative['seams']: - sol = pd.concat([sol, self.get_Qseam(seam, mode, variation, self.U_H)]) + if self.pinfo.dissipative["seams"]: + for seam in self.pinfo.dissipative["seams"]: + sol = pd.concat( + [sol, self.get_Qseam(seam, mode, variation, self.U_H)] + ) # get Q dielectric - if self.pinfo.dissipative['dielectrics_bulk']: - for dielectric in self.pinfo.dissipative['dielectrics_bulk']: - sol = pd.concat([sol, self.get_Qdielectric(dielectric, mode, variation, self.U_E)]) + if self.pinfo.dissipative["dielectrics_bulk"]: + for dielectric in self.pinfo.dissipative["dielectrics_bulk"]: + sol = pd.concat( + [ + sol, + self.get_Qdielectric( + dielectric, mode, variation, self.U_E + ), + ] + ) # get Q surface - if self.pinfo.dissipative['dielectric_surfaces']: - if self.pinfo.dissipative['dielectric_surfaces'] == 'all': - sol = pd.concat([sol, self.get_Qsurface_all(mode, variation, self.U_E)]) + if self.pinfo.dissipative["dielectric_surfaces"]: + if self.pinfo.dissipative["dielectric_surfaces"] == "all": + sol = pd.concat( + [sol, self.get_Qsurface_all(mode, variation, self.U_E)] + ) else: - for surface, properties in self.pinfo.dissipative['dielectric_surfaces'].items(): - sol = pd.concat([sol, self.get_Qsurface(mode, variation, surface, self.U_E, properties)]) + for surface, properties in self.pinfo.dissipative[ + "dielectric_surfaces" + ].items(): + sol = pd.concat( + [ + sol, + self.get_Qsurface( + mode, variation, surface, self.U_E, properties + ), + ] + ) SOL[mode] = sol # Save - self._update_results(variation, Om, Pm, Sm, Qm_coupling, SOL, - freqs_bare_GHz, Qs_bare, Ljs, Cjs, - Pm_cap, I_peak, V_peak, - ansys_energies, - self._hfss_variables[variation]) + self._update_results( + variation, + Om, + Pm, + Sm, + Qm_coupling, + SOL, + freqs_bare_GHz, + Qs_bare, + Ljs, + Cjs, + Pm_cap, + I_peak, + V_peak, + ansys_energies, + self._hfss_variables[variation], + ) self.save() self._previously_analyzed.add(variation) - print('\nANALYSIS DONE. Data saved to:\n\n' + - str(self.data_filename)+'\n\n') + print("\nANALYSIS DONE. Data saved to:\n\n" + str(self.data_filename) + "\n\n") return self.data_filename, variations - def _update_results(self, variation: str, Om, Pm, Sm, Qm_coupling, sols, - freqs_bare_GHz, Qs_bare, Ljs, Cjs, Pm_cap, I_peak, V_peak, - ansys_energies, _hfss_variables): - ''' + def _update_results( + self, + variation: str, + Om, + Pm, + Sm, + Qm_coupling, + sols, + freqs_bare_GHz, + Qs_bare, + Ljs, + Cjs, + Pm_cap, + I_peak, + V_peak, + ansys_energies, + _hfss_variables, + ): + """ Save variation - ''' + """ # raw, not normalized - DataFrames - self.results[variation]['Pm'] = pd.DataFrame(Pm).transpose() - self.results[variation]['Pm_cap'] = pd.DataFrame(Pm_cap).transpose() - self.results[variation]['Sm'] = pd.DataFrame(Sm).transpose() - self.results[variation]['Om'] = pd.DataFrame(Om) - self.results[variation]['sols'] = pd.DataFrame(sols).transpose() - self.results[variation]['Qm_coupling'] = pd.DataFrame( - Qm_coupling).transpose() - - self.results[variation]['Ljs'] = Ljs # pd.Series - self.results[variation]['Cjs'] = Cjs # pd.Series - self.results[variation]['Qs'] = Qs_bare - self.results[variation]['freqs_hfss_GHz'] = freqs_bare_GHz - self.results[variation]['hfss_variables'] = _hfss_variables - self.results[variation]['modes'] = self.modes + self.results[variation]["Pm"] = pd.DataFrame(Pm).transpose() + self.results[variation]["Pm_cap"] = pd.DataFrame(Pm_cap).transpose() + self.results[variation]["Sm"] = pd.DataFrame(Sm).transpose() + self.results[variation]["Om"] = pd.DataFrame(Om) + self.results[variation]["sols"] = pd.DataFrame(sols).transpose() + self.results[variation]["Qm_coupling"] = pd.DataFrame(Qm_coupling).transpose() + + self.results[variation]["Ljs"] = Ljs # pd.Series + self.results[variation]["Cjs"] = Cjs # pd.Series + self.results[variation]["Qs"] = Qs_bare + self.results[variation]["freqs_hfss_GHz"] = freqs_bare_GHz + self.results[variation]["hfss_variables"] = _hfss_variables + self.results[variation]["modes"] = self.modes # mostly for debug info - self.results[variation]['I_peak'] = pd.Series(I_peak) - self.results[variation]['V_peak'] = pd.Series(V_peak) - self.results[variation]['ansys_energies'] = ansys_energies # dict + self.results[variation]["I_peak"] = pd.Series(I_peak) + self.results[variation]["V_peak"] = pd.Series(V_peak) + self.results[variation]["ansys_energies"] = ansys_energies # dict - self.results[variation]['mesh'] = None - self.results[variation]['convergence'] = None - self.results[variation]['convergence_f_pass'] = None + self.results[variation]["mesh"] = None + self.results[variation]["convergence"] = None + self.results[variation]["convergence_f_pass"] = None if self.options.save_mesh_stats: - self.results[variation]['mesh'] = self.get_mesh_statistics( - variation) # dataframe - self.results[variation]['convergence'] = self.get_convergence( - variation) - self.results[variation]['convergence_f_pass'] = self.hfss_report_f_convergence( - variation, save_csv=False) # dataframe + self.results[variation]["mesh"] = self.get_mesh_statistics( + variation + ) # dataframe + self.results[variation]["convergence"] = self.get_convergence(variation) + self.results[variation][ + "convergence_f_pass" + ] = self.hfss_report_f_convergence( + variation, save_csv=False + ) # dataframe @staticmethod def results_variations_on_inside(results: dict): @@ -1390,15 +1567,19 @@ def results_variations_on_inside(results: dict): new_res = dict() for key in keys: - new_res[key] = {variation: results[variation].get(key, None) - for variation in variations} + new_res[key] = { + variation: results[variation].get(key, None) for variation in variations + } # Conver to pandas Dataframe if all are pd.Series - if all(isinstance(new_res[key][variation], pd.Series) for variation in variations): + if all( + isinstance(new_res[key][variation], pd.Series) + for variation in variations + ): # print(key) # Conver these to dataframe # Variations will become columns new_res[key] = pd.DataFrame(new_res[key]) - new_res[key].columns.name = 'variation' + new_res[key].columns.name = "variation" # sort_df_col : maybe sort return new_res # dict of keys now @@ -1418,7 +1599,7 @@ def save(self, project_info: dict = None): results=self.results, ) - with open(str(self.data_filename), 'wb') as handle: + with open(str(self.data_filename), "wb") as handle: pickle.dump(to_save, handle) # , protocol=pickle.HIGHEST_PROTOCOL) def load(self, filepath=None): @@ -1429,13 +1610,13 @@ def load(self, filepath=None): """ filepath = filepath or self.data_filename - with open(str(filepath), 'rb') as handle: + with open(str(filepath), "rb") as handle: loaded = pickle.load(handle) return loaded - def get_mesh_statistics(self, variation='0'): - ''' + def get_mesh_statistics(self, variation="0"): + """ Args: variation (str): A string identifier of the variation, such as '0', '1', ... @@ -1450,12 +1631,12 @@ def get_mesh_statistics(self, variation='0'): 0 Region 909451 0.000243 0.860488 0.037048 6.006260e-13 0.037352 0.000029 6.268190e-04 1 substrate 1490356 0.000270 0.893770 0.023639 1.160090e-12 0.031253 0.000007 2.309920e-04 - ''' + """ variation = self._list_variations[ureg(variation)] return self.setup.get_mesh_stats(variation) - def get_convergence(self, variation='0'): - ''' + def get_convergence(self, variation="0"): + """ Args: variation (str): A string identifier of the variation, such as '0', '1', ... @@ -1472,13 +1653,13 @@ def get_convergence(self, variation='0'): 3 192746 3.208600 4 199244 1.524000 - ''' + """ variation = self._list_variations[ureg(variation)] df, _ = self.setup.get_convergence(variation) return df - def get_convergence_vs_pass(self, variation='0'): - ''' + def get_convergence_vs_pass(self, variation="0"): + """ Makes a plot in HFSS that return a pandas dataframe Args: @@ -1497,44 +1678,48 @@ def get_convergence_vs_pass(self, variation='0'): 2 5.114490 5.505828 6.242423 3 5.278594 5.604426 6.296777 - ''' + """ return self.hfss_report_f_convergence(variation) def set_mode(self, mode_num, phase=0): - ''' + """ Set source excitations should be used for fields post processing. Counting modes from 0 onward - ''' - assert self.setup, "ERROR: There is no 'setup' connected. \N{face with medical mask}" + """ + assert ( + self.setup + ), "ERROR: There is no 'setup' connected. \N{face with medical mask}" if mode_num < 0: - logger.error('Too small a mode number') + logger.error("Too small a mode number") self.solutions.set_mode(mode_num + 1, phase) if self.has_fields() == False: - logger.warning(f" Error: HFSS does not have field solution for variation={mode_num}.\ - Skipping this mode in the analysis \N{face with medical mask}") + logger.warning( + f" Error: HFSS does not have field solution for variation={mode_num}.\ + Skipping this mode in the analysis \N{face with medical mask}" + ) self.fields = self.setup.get_fields() def has_fields(self, variation: str = None): - ''' + """ Determine if fields exist for a particular solution. Just calls `self.solutions.has_fields(variation_string)` Args: variation (str): String of variation label, such as '0' or '1'. If None, gets the nominal variation - ''' + """ if self.solutions: - #print('variation=', variation) + # print('variation=', variation) variation_string = self.get_variation_string(variation) return self.solutions.has_fields(variation_string) else: return False - def hfss_report_f_convergence(self, variation='0', save_csv=True): - ''' + def hfss_report_f_convergence(self, variation="0", save_csv=True): + """ Create a report inside HFSS to plot the converge of freq and style it. Saves report to csv file. @@ -1550,13 +1735,13 @@ def hfss_report_f_convergence(self, variation='0', save_csv=True): 2 5.114490 5.505828 6.242423 3 5.278594 5.604426 6.296777 - ''' + """ # TODO: Move to class for reporter ? if not self.setup: - logger.error('NO SETUP PRESENT - hfss_report_f_convergence.') + logger.error("NO SETUP PRESENT - hfss_report_f_convergence.") return None - if not self.design.solution_type == 'Eigenmode': + if not self.design.solution_type == "Eigenmode": return None oDesign = self.design @@ -1564,34 +1749,38 @@ def hfss_report_f_convergence(self, variation='0', save_csv=True): report = oDesign._reporter # Create report - ycomp = [f"re(Mode({i}))" for i in range(1, 1+self.n_modes)] - params = ["Pass:=", ["All"]]+variation + ycomp = [f"re(Mode({i}))" for i in range(1, 1 + self.n_modes)] + params = ["Pass:=", ["All"]] + variation report_name = "Freq. vs. pass" if report_name in report.GetAllReportNames(): report.DeleteReports([report_name]) self.solutions.create_report( - report_name, "Pass", ycomp, params, pass_name='AdaptivePass') + report_name, "Pass", ycomp, params, pass_name="AdaptivePass" + ) # Properties of lines - curves = [f"{report_name}:re(Mode({i})):Curve1" for i in range( - 1, 1+self.n_modes)] + curves = [ + f"{report_name}:re(Mode({i})):Curve1" for i in range(1, 1 + self.n_modes) + ] # set_property(report, 'Attributes', curves, 'Line Width', 3) - set_property(report, 'Scaling', - f"{report_name}:AxisY1", 'Auto Units', False) - set_property(report, 'Scaling', f"{report_name}:AxisY1", 'Units', 'g') - set_property(report, 'Legend', - f"{report_name}:Legend", 'Show Solution Name', False) + set_property(report, "Scaling", f"{report_name}:AxisY1", "Auto Units", False) + set_property(report, "Scaling", f"{report_name}:AxisY1", "Units", "g") + set_property( + report, "Legend", f"{report_name}:Legend", "Show Solution Name", False + ) if save_csv: # Save try: - path = Path(self.data_dir)/'hfss_eig_f_convergence.csv' + path = Path(self.data_dir) / "hfss_eig_f_convergence.csv" report.ExportToFile(report_name, path) - logger.info(f'Saved convergences to {path}') + logger.info(f"Saved convergences to {path}") return pd.read_csv(path, index_col=0) except Exception as e: - logger.error(f"Error could not save and export hfss plot to {path}.\ + logger.error( + f"Error could not save and export hfss plot to {path}.\ Is the plot made in HFSS with the correct name.\ - Check the HFSS error window. \t Error = {e}") + Check the HFSS error window. \t Error = {e}" + ) return None @@ -1608,7 +1797,7 @@ def hfss_report_full_convergence(self, fig=None, _display=True): """ if fig is None: - fig = plt.figure(figsize=(11, 3.)) + fig = plt.figure(figsize=(11, 3.0)) for variation, variation_labels in self.get_variations().items(): fig.clf() @@ -1617,34 +1806,38 @@ def hfss_report_full_convergence(self, fig=None, _display=True): gs = mpl.gridspec.GridSpec(1, 3, width_ratios=[1.2, 1.5, 1]) axs = [fig.add_subplot(gs[i]) for i in range(3)] - logger.info(f'Creating report for variation {variation}') + logger.info(f"Creating report for variation {variation}") convergence_t = self.get_convergence(variation=variation) convergence_f = self.hfss_report_f_convergence(variation=variation) - axs[0].set_ylabel(variation_labels.replace(' ', '\n')) # add variation labels to y-axis of first plot + axs[0].set_ylabel( + variation_labels.replace(" ", "\n") + ) # add variation labels to y-axis of first plot ax0t = axs[1].twinx() plot_convergence_f_vspass(axs[0], convergence_f) plot_convergence_max_df(axs[1], convergence_t.iloc[:, 1]) plot_convergence_solved_elem(ax0t, convergence_t.iloc[:, 0]) - plot_convergence_maxdf_vs_sol(axs[2], convergence_t.iloc[:, 1], - convergence_t.iloc[:, 0]) + plot_convergence_maxdf_vs_sol( + axs[2], convergence_t.iloc[:, 1], convergence_t.iloc[:, 0] + ) fig.tight_layout(w_pad=0.1) # pad=0.0, w_pad=0.1, h_pad=1.0) if _display: from IPython.display import display + display(fig) return fig - def quick_plot_frequencies(self, swp_variable='variations', ax=None): + def quick_plot_frequencies(self, swp_variable="variations", ax=None): """ Quick plot of frequencies from HFSS """ fs = self.get_ansys_frequencies_all(swp_variable) ax = ax or plt.gca() - fs['Freq. (GHz)'].unstack(0).transpose().plot(marker='o', ax=ax) - ax.set_ylabel('Ansys frequencies (MHz)') + fs["Freq. (GHz)"].unstack(0).transpose().plot(marker="o", ax=ax) + ax.set_ylabel("Ansys frequencies (MHz)") ax.grid(alpha=0.2) return fs diff --git a/pyEPR/core_quantum_analysis.py b/pyEPR/core_quantum_analysis.py index 1b936c5..cdff996 100644 --- a/pyEPR/core_quantum_analysis.py +++ b/pyEPR/core_quantum_analysis.py @@ -33,11 +33,19 @@ from .calcs.constants import Planck, fluxQ from .core_distributed_analysis import DistributedAnalysis from .toolbox.plotting import cmap_discrete, legend_translucent -from .toolbox.pythonic import (DataFrame_col_diff, divide_diagonal_by_2, - print_color, print_matrix, sort_df_col, - sort_Series_idx, df_find_index, series_of_1D_dict_to_multi_df) +from .toolbox.pythonic import ( + DataFrame_col_diff, + divide_diagonal_by_2, + print_color, + print_matrix, + sort_df_col, + sort_Series_idx, + df_find_index, + series_of_1D_dict_to_multi_df, +) + +from .reports import plot_convergence_max_df, plot_convergence_solved_elem -from .reports import (plot_convergence_max_df, plot_convergence_solved_elem) class HamiltonianResultsContainer(OrderedDict): """ @@ -48,22 +56,22 @@ class HamiltonianResultsContainer(OrderedDict): It is a dictionary based class to contain the results stored. """ - file_name_extra = ' HamiltonianResultsContainer.npz' + file_name_extra = " HamiltonianResultsContainer.npz" def __init__(self, dict_file=None, data_dir=None): - """ input: - dict file - 1. either None to create an empty results hamiltonian as - as was done in the original code + """input: + dict file - 1. either None to create an empty results hamiltonian as + as was done in the original code - 2. or a string with the name of the file where the file of the - previously saved HamiltonianResultsContainer instance we wish - to load + 2. or a string with the name of the file where the file of the + previously saved HamiltonianResultsContainer instance we wish + to load - 3. or an existing instance of a dict class which will be - upgraded to the HamiltonianResultsContainer class + 3. or an existing instance of a dict class which will be + upgraded to the HamiltonianResultsContainer class - data_dir - the directory in which the file is to be saved or loaded - from, defaults to the config.root_dir + data_dir - the directory in which the file is to be saved or loaded + from, defaults to the config.root_dir """ super().__init__() @@ -71,8 +79,11 @@ def __init__(self, dict_file=None, data_dir=None): self.sort_index = True # for retrieval if data_dir is None: - data_dir = Path(config.root_dir) / 'temp' / \ - time.strftime('%Y-%m-%d %H-%M-%S', time.localtime()) + data_dir = ( + Path(config.root_dir) + / "temp" + / time.strftime("%Y-%m-%d %H-%M-%S", time.localtime()) + ) data_dir = Path(data_dir).resolve() file_name = data_dir.stem @@ -81,13 +92,12 @@ def __init__(self, dict_file=None, data_dir=None): directory.mkdir(parents=True, exist_ok=True) if dict_file is None: - self.file_name = str( - directory/(str(file_name)+self.file_name_extra)) - #logger.info(f'Filename hamiltonian params to {self.file_name }') + self.file_name = str(directory / (str(file_name) + self.file_name_extra)) + # logger.info(f'Filename hamiltonian params to {self.file_name }') elif isinstance(dict_file, str): try: - self.file_name = str(data_dir)+'\\' + dict_file + self.file_name = str(data_dir) + "\\" + dict_file self.load() except: self.file_name = dict_file @@ -96,11 +106,10 @@ def __init__(self, dict_file=None, data_dir=None): elif isinstance(dict_file, dict): # Depreciated self._inject_dic(dict_file) - self.file_name = str(data_dir)+self.file_name_extra + self.file_name = str(data_dir) + self.file_name_extra else: - raise ValueError( - 'type dict_file is of type {}'.format(type(dict_file))) + raise ValueError("type dict_file is of type {}".format(type(dict_file))) # load file def save(self, filename: str = None): @@ -129,8 +138,8 @@ def _inject_dic(self, add_dic): for key, val in add_dic.items(): # TODO remove all copies of same data # if key in self.keys(): - #raise ValueError('trying to overwrite an existing variation') - self[str(int(key)+Init_number_of_keys)] = val + # raise ValueError('trying to overwrite an existing variation') + self[str(int(key) + Init_number_of_keys)] = val return 1 @staticmethod @@ -148,11 +157,9 @@ def _do_sort_index(z: pd.DataFrame): else: return z - def vs_variations(self, - quantity: str, - variations: list = None, - vs='variation', - to_dataframe=False): + def vs_variations( + self, quantity: str, variations: list = None, vs="variation", to_dataframe=False + ): """ QUANTITIES: @@ -178,12 +185,11 @@ def vs_variations(self, res = OrderedDict() for key in variations: - if vs == 'variation': + if vs == "variation": res[key] = self[key][quantity] else: # convert the key to numeric if possible - key_new = ureg.Quantity( - self[key]['hfss_variables']['_'+vs]).magnitude + key_new = ureg.Quantity(self[key]["hfss_variables"]["_" + vs]).magnitude res[key_new] = self[key][quantity] # Convert to dataframe @@ -199,71 +205,80 @@ def vs_variations(self, # Quick lookup function - def get_frequencies_HFSS(self, variations: list = None, vs='variation'): - '''See help for `vs_variations`''' - return self.vs_variations('f_0', variations=variations, vs=vs, to_dataframe=True) + def get_frequencies_HFSS(self, variations: list = None, vs="variation"): + """See help for `vs_variations`""" + return self.vs_variations( + "f_0", variations=variations, vs=vs, to_dataframe=True + ) - def get_frequencies_O1(self, variations: list = None, vs='variation'): - '''See help for `vs_variations`''' - return self.vs_variations('f_1', variations=variations, vs=vs, to_dataframe=True) + def get_frequencies_O1(self, variations: list = None, vs="variation"): + """See help for `vs_variations`""" + return self.vs_variations( + "f_1", variations=variations, vs=vs, to_dataframe=True + ) - def get_frequencies_ND(self, variations: list = None, vs='variation'): - '''See help for `vs_variations`''' - return self.vs_variations('f_ND', variations=variations, vs=vs, to_dataframe=True) + def get_frequencies_ND(self, variations: list = None, vs="variation"): + """See help for `vs_variations`""" + return self.vs_variations( + "f_ND", variations=variations, vs=vs, to_dataframe=True + ) - def get_chi_O1(self, variations: list = None, vs='variation'): - return self.vs_variations('chi_O1', variations=variations, vs=vs) + def get_chi_O1(self, variations: list = None, vs="variation"): + return self.vs_variations("chi_O1", variations=variations, vs=vs) - def get_chi_ND(self, variations: list = None, vs='variation'): - return self.vs_variations('chi_ND', variations=variations, vs=vs) + def get_chi_ND(self, variations: list = None, vs="variation"): + return self.vs_variations("chi_ND", variations=variations, vs=vs) class QuantumAnalysis(object): - ''' + """ Defines an analysis object which loads and plots data from a h5 file This data is obtained using DistributedAnalysis - ''' + """ - def __init__(self, data_filename, - variations: list = None, - do_print_info=True, - Res_hamil_filename=None): + def __init__( + self, + data_filename, + variations: list = None, + do_print_info=True, + Res_hamil_filename=None, + ): self.data_filename = data_filename - self.results = HamiltonianResultsContainer(dict_file=Res_hamil_filename, - data_dir=data_filename) + self.results = HamiltonianResultsContainer( + dict_file=Res_hamil_filename, data_dir=data_filename + ) - with open(str(data_filename), 'rb') as handle: + with open(str(data_filename), "rb") as handle: # Contain everything: project_info and results self.data = Dict(pickle.load(handle)) # Reverse from variations on outside to on inside - results = DistributedAnalysis.results_variations_on_inside( - self.data.results) + results = DistributedAnalysis.results_variations_on_inside(self.data.results) # Convenience functions self.variations = variations or list(self.data.results.keys()) - self._hfss_variables = results['hfss_variables'] - self.freqs_hfss = results['freqs_hfss_GHz'] - self.Qs = results['Qs'] - self.Qm_coupling = results['Qm_coupling'] - self.Ljs = results['Ljs'] # DataFrame - self.Cjs = results['Cjs'] # DataFrame - self.OM = results['Om'] # dict of dataframes - self.PM = results['Pm'] # participation matrices - raw, unnormed here + self._hfss_variables = results["hfss_variables"] + self.freqs_hfss = results["freqs_hfss_GHz"] + self.Qs = results["Qs"] + self.Qm_coupling = results["Qm_coupling"] + self.Ljs = results["Ljs"] # DataFrame + self.Cjs = results["Cjs"] # DataFrame + self.OM = results["Om"] # dict of dataframes + self.PM = results["Pm"] # participation matrices - raw, unnormed here # participation matrices for capacitive elements - self.PM_cap = results['Pm_cap'] - self.SM = results['Sm'] # sign matrices - self.I_peak = results['I_peak'] - self.V_peak = results['V_peak'] - self.modes = results['modes'] + self.PM_cap = results["Pm_cap"] + self.SM = results["Sm"] # sign matrices + self.I_peak = results["I_peak"] + self.V_peak = results["V_peak"] + self.modes = results["modes"] - self.sols = results['sols'] - self.ansys_energies = results.get('ansys_energies', {}) + self.sols = results["sols"] + self.ansys_energies = results.get("ansys_energies", {}) - self.mesh_stats = results['mesh'] - self.convergence = results['convergence'] - self.convergence_f_pass = results['convergence_f_pass'] + self.mesh_stats = results["mesh"] + self.convergence = results["convergence"] + self.convergence_f_pass = results["convergence_f_pass"] self.n_modes = len(self.modes[self.variations[0]]) self._renorm_pj = config.epr.renorm_pj @@ -273,7 +288,8 @@ def __init__(self, data_filename, self.hfss_vars_diff_idx = dum if not (dum.any() == False) else [] try: self.Num_hfss_vars_diff_idx = len( - self.hfss_vars_diff_idx[self.hfss_vars_diff_idx == True]) + self.hfss_vars_diff_idx[self.hfss_vars_diff_idx == True] + ) except: e = sys.exc_info()[0] logger.warning("

Error: %s

" % e) @@ -290,7 +306,7 @@ def print_info(self): print("\t Differences in variations:") if len(self.hfss_vars_diff_idx) > 0: display(self._hfss_variables[self.hfss_vars_diff_idx]) - print('\n') + print("\n") def get_vs_variable(self, swp_var, attr: str): """ @@ -301,30 +317,35 @@ def get_vs_variable(self, swp_var, attr: str): swp_var (str) :name of sweep variable in ansys attr: name of local attribute, eg.., 'ansys_energies' """ - #from collections import OrderedDict + # from collections import OrderedDict variable = self.get_variable_vs(swp_var) - return OrderedDict([(variable[variation], val) - for variation, val in getattr(self, attr).items()]) + return OrderedDict( + [ + (variable[variation], val) + for variation, val in getattr(self, attr).items() + ] + ) def get_variable_vs(self, swpvar, lv=None): - """ lv is list of variations (example ['0', '1']), if None it takes all variations - swpvar is the variable by which to organize + """lv is list of variations (example ['0', '1']), if None it takes all variations + swpvar is the variable by which to organize - return: - ordered dictionary of key which is the variation number and the magnitude - of swaver as the item + return: + ordered dictionary of key which is the variation number and the magnitude + of swaver as the item """ ret = OrderedDict() if lv is None: for key, varz in self._hfss_variables.items(): - ret[key] = ureg.Quantity(varz['_'+swpvar]).magnitude + ret[key] = ureg.Quantity(varz["_" + swpvar]).magnitude else: try: for key in lv: ret[key] = ureg.Quantity( - self._hfss_variables[key]['_'+swpvar]).magnitude + self._hfss_variables[key]["_" + swpvar] + ).magnitude except: - print(' No such variation as ' + key) + print(" No such variation as " + key) return ret def get_variable_value(self, swpvar, lv=None): @@ -334,13 +355,13 @@ def get_variable_value(self, swpvar, lv=None): def get_variations_of_variable_value(self, swpvar, value, lv=None): """A function to return all the variations in which one of the variables - has a specific value lv is list of variations (example ['0', '1']), - if None it takes all variations - swpvar is a string and the name of the variable we wish to filter - value is the value of swapvr in which we are interested + has a specific value lv is list of variations (example ['0', '1']), + if None it takes all variations + swpvar is a string and the name of the variable we wish to filter + value is the value of swapvr in which we are interested - returns lv - a list of the variations for which swavr==value - """ + returns lv - a list of the variations for which swavr==value + """ if lv is None: lv = self.variations @@ -349,20 +370,21 @@ def get_variations_of_variable_value(self, swpvar, value, lv=None): lv = np.array(list(ret.keys()))[np.array(list(ret.values())) == value] - #lv = lv_temp if not len(lv_temp) else lv + # lv = lv_temp if not len(lv_temp) else lv if not (len(lv)): - raise ValueError('No variations have the variable-' + swpvar + - '= {}'.format(value)) + raise ValueError( + "No variations have the variable-" + swpvar + "= {}".format(value) + ) return list(lv) def get_variation_of_multiple_variables_value(self, Var_dic, lv=None): """ - SEE get_variations_of_variable_value - A function to return all the variations in which one of the variables has a specific value - lv is list of variations (example ['0', '1']), if None it takes all variations - Var_dic is a dic with the name of the variable as key and the value to filter as item - """ + SEE get_variations_of_variable_value + A function to return all the variations in which one of the variables has a specific value + lv is list of variations (example ['0', '1']), if None it takes all variations + Var_dic is a dic with the name of the variable as key and the value to filter as item + """ if lv is None: lv = self.variations @@ -371,42 +393,42 @@ def get_variation_of_multiple_variables_value(self, Var_dic, lv=None): for key, var in Var_dic.items(): lv = self.get_variations_of_variable_value(key, var, lv) if var_str is None: - var_str = key + '= {}'.format(var) + var_str = key + "= {}".format(var) else: - var_str = var_str + ' & ' + key + '= {}'.format(var) + var_str = var_str + " & " + key + "= {}".format(var) return lv, var_str def get_convergences_max_tets(self): - ''' Index([u'Pass Number', u'Solved Elements', u'Max Delta Freq. %' ]) ''' + """Index([u'Pass Number', u'Solved Elements', u'Max Delta Freq. %' ])""" ret = OrderedDict() for key, df in self.convergence.items(): - ret[key] = df['Solved Elements'].iloc[-1] + ret[key] = df["Solved Elements"].iloc[-1] return ret def get_convergences_tets_vs_pass(self, as_dataframe=True): - ''' Index([u'Pass Number', u'Solved Elements', u'Max Delta Freq. %' ]) ''' + """Index([u'Pass Number', u'Solved Elements', u'Max Delta Freq. %' ])""" ret = OrderedDict() for key, df in self.convergence.items(): - s = df['Solved Elements'] - s = s.reset_index().dropna().set_index('Pass Number') - #s.index = df['Pass Number'] + s = df["Solved Elements"] + s = s.reset_index().dropna().set_index("Pass Number") + # s.index = df['Pass Number'] ret[key] = s if as_dataframe: ret = pd.concat(ret) - ret = ret.unstack(0)['Solved Elements'] + ret = ret.unstack(0)["Solved Elements"] return ret def get_convergences_max_delta_freq_vs_pass(self, as_dataframe=True): - ''' Index([u'Pass Number', u'Solved Elements', u'Max Delta Freq. %' ]) ''' - KEY = 'Max Delta Freq. %' + """Index([u'Pass Number', u'Solved Elements', u'Max Delta Freq. %' ])""" + KEY = "Max Delta Freq. %" ret = OrderedDict() for key, df in self.convergence.items(): s = df[KEY] - s = s.reset_index().dropna().set_index('Pass Number') - #s.index = df['Pass Number'] + s = s.reset_index().dropna().set_index("Pass Number") + # s.index = df['Pass Number'] ret[key] = s if as_dataframe: @@ -418,36 +440,35 @@ def get_convergences_max_delta_freq_vs_pass(self, as_dataframe=True): def get_mesh_tot(self): ret = OrderedDict() for key, m in self.mesh_stats.items(): - ret[key] = m['Num Tets '].sum() + ret[key] = m["Num Tets "].sum() return ret def get_Ejs(self, variation): - ''' EJs in GHz + """EJs in GHz See calcs.convert - ''' + """ Ljs = self.Ljs[variation] - Ejs = fluxQ**2/Ljs/Planck*10**-9 + Ejs = fluxQ**2 / Ljs / Planck * 10**-9 return Ejs def get_Ecs(self, variation): - ''' ECs in GHz + """ECs in GHz Returns as pandas series - ''' + """ Cs = self.Cjs[variation] - return Convert.Ec_from_Cs(Cs, units_in='F', units_out='GHz') + return Convert.Ec_from_Cs(Cs, units_in="F", units_out="GHz") - def analyze_all_variations(self, - variations: List[str] = None, - analyze_previous=False, - **kwargs): - ''' + def analyze_all_variations( + self, variations: List[str] = None, analyze_previous=False, **kwargs + ): + """ See analyze_variation for full documentation Args: variations: None returns all_variations otherwise this is a list with number as strings ['0', '1'] analyze_previous: set to true if you wish to overwrite previous analysis **kwargs: Keyword arguments passed to :func:`~pyEPR.QuantumAnalysis.analyze_variation`. - ''' + """ result = OrderedDict() @@ -460,31 +481,34 @@ def analyze_all_variations(self, else: result[variation] = self.analyze_variation(variation, **kwargs) - self.results.save() return result def _get_ansys_total_energies(self, variation): res = {} - for getkey in ['U_tot_cap', 'U_tot_ind', 'U_H', 'U_E', 'U_norm']: - res[getkey] = pd.Series({mode: self.ansys_energies[variation][mode][getkey] - for mode in self.ansys_energies[variation]}) + for getkey in ["U_tot_cap", "U_tot_ind", "U_H", "U_E", "U_norm"]: + res[getkey] = pd.Series( + { + mode: self.ansys_energies[variation][mode][getkey] + for mode in self.ansys_energies[variation] + } + ) df = pd.DataFrame(res) - df.index.name = 'modes' + df.index.name = "modes" return df def _get_participation_normalized(self, variation, _renorm_pj=None, print_=False): - ''' - Get normalized Pmj Matrix + """ + Get normalized Pmj Matrix - Return DataFrame object for PJ - ''' + Return DataFrame object for PJ + """ if _renorm_pj is None: _renorm_pj = self._renorm_pj # Columns are junctions; rows are modes - Pm = self.PM[variation].copy() # EPR matrix DataFrame + Pm = self.PM[variation].copy() # EPR matrix DataFrame # EPR matrix for capacitor DataFrame Pm_cap = self.PM_cap[variation].copy() @@ -493,49 +517,51 @@ def _get_participation_normalized(self, variation, _renorm_pj=None, print_=False # Renormalize # Should we still do this when Pm_glb_sum is very small - #s = self.sols[variation] + # s = self.sols[variation] # sum of participation energies as calculated by global UH and UE # U_mode = s['U_E'] # peak mode energy; or U bar as i denote it sometimes # We need to add the capacitor here, and maybe take the mean of that energies = self._get_ansys_total_energies(variation) - U_mode = (energies['U_tot_cap'] + energies['U_tot_ind'])/2. - U_diff = abs(energies['U_tot_cap'] - energies['U_tot_ind'])/U_mode + U_mode = (energies["U_tot_cap"] + energies["U_tot_ind"]) / 2.0 + U_diff = abs(energies["U_tot_cap"] - energies["U_tot_ind"]) / U_mode if np.any(U_diff > 0.15): - logger.error(f"WARNING: U_tot_cap-U_tot_ind / mean = {np.max(np.abs(U_diff))*100:.1f}% is > 15%. \ - \nIs the simulation converged? Proceed with caution") + logger.error( + f"WARNING: U_tot_cap-U_tot_ind / mean = {np.max(np.abs(U_diff))*100:.1f}% is > 15%. \ + \nIs the simulation converged? Proceed with caution" + ) # global sums of participations - Pm_glb_sum = abs((U_mode-energies['U_H'])/U_mode) - Pm_cap_glb_sum = abs((U_mode-energies['U_E'])/U_mode) + Pm_glb_sum = abs((U_mode - energies["U_H"]) / U_mode) + Pm_cap_glb_sum = abs((U_mode - energies["U_E"]) / U_mode) # norms - Pm_norm = Pm_glb_sum/Pm.sum(axis=1) - Pm_cap_norm = Pm_cap_glb_sum/Pm_cap.sum(axis=1) + Pm_norm = Pm_glb_sum / Pm.sum(axis=1) + Pm_cap_norm = Pm_cap_glb_sum / Pm_cap.sum(axis=1) # this is not the correct scaling yet! WARNING. Factors of 2 laying around too # these numbers are a bit all over the place for now. very small if _renorm_pj == True or _renorm_pj == 1: - idx = Pm > -1E6 # everywhere scale - idx_cap = Pm_cap > -1E6 + idx = Pm > -1e6 # everywhere scale + idx_cap = Pm_cap > -1e6 elif _renorm_pj == 2: idx = Pm > 0.15 # Mask for where to scale idx_cap = Pm_cap > 0.15 else: raise NotImplementedError( - "Unknown _renorm_pj argument or config values!") + "Unknown _renorm_pj argument or config values!" + ) if print_: # \nPm_cap_norm=\n{Pm_cap_norm}") print(f"Pm_norm=\n{Pm_norm}\n") print(f"Pm_norm idx =\n{idx}") - Pm[idx] = Pm[idx].mul(Pm_norm, axis=0) Pm_cap[idx_cap] = Pm_cap[idx_cap].mul(Pm_cap_norm, axis=0) - #Pm = Pm.mul(Pm_norm, axis=0) - #Pm_cap = Pm_cap.mul(Pm_cap_norm, axis=0) + # Pm = Pm.mul(Pm_norm, axis=0) + # Pm_cap = Pm_cap.mul(Pm_cap_norm, axis=0) else: Pm_norm = 1 @@ -543,22 +569,28 @@ def _get_participation_normalized(self, variation, _renorm_pj=None, print_=False idx = None idx_cap = None if print_: - print('NO renorm!') + print("NO renorm!") if np.any(Pm < 0.0): - print_color(" ! Warning: Some p_mj was found <= 0. This is probably a numerical error,'\ + print_color( + " ! Warning: Some p_mj was found <= 0. This is probably a numerical error,'\ 'or a super low-Q mode. We will take the abs value. Otherwise, rerun with more precision,'\ - 'inspect, and do due diligence.)") - print(Pm, '\n') + 'inspect, and do due diligence.)" + ) + print(Pm, "\n") Pm = np.abs(Pm) - return {'PJ': Pm, 'Pm_norm': Pm_norm, 'PJ_cap': Pm_cap, - 'Pm_cap_norm': Pm_cap_norm, - 'idx': idx, - 'idx_cap': idx_cap} + return { + "PJ": Pm, + "Pm_norm": Pm_norm, + "PJ_cap": Pm_cap, + "Pm_cap_norm": Pm_cap_norm, + "idx": idx, + "idx_cap": idx_cap, + } def get_epr_base_matrices(self, variation, _renorm_pj=None, print_=False): - r''' + r""" Return the key matrices used in the EPR method for analytic calculations. All as matrices @@ -571,20 +603,21 @@ def get_epr_base_matrices(self, variation, _renorm_pj=None, print_=False): Return all as *np.array* PM, SIGN, Om, EJ, Phi_ZPF - ''' + """ # TODO: supersede by Convert.ZPF_from_EPR res = self._get_participation_normalized( - variation, _renorm_pj=_renorm_pj, print_=print_) + variation, _renorm_pj=_renorm_pj, print_=print_ + ) - PJ = np.array(res['PJ']) - PJ_cap = np.array(res['PJ_cap']) + PJ = np.array(res["PJ"]) + PJ_cap = np.array(res["PJ_cap"]) # Sign bits - SJ = np.array(self.SM[variation]) # DataFrame + SJ = np.array(self.SM[variation]) # DataFrame # Frequencies of HFSS linear modes. # Input in dataframe but of one line. Output nd array - Om = np.diagflat(self.OM[variation].values) # GHz + Om = np.diagflat(self.OM[variation].values) # GHz # Junction energies EJ = np.diagflat(self.get_Ejs(variation).values) # GHz Ec = np.diagflat(self.get_Ecs(variation).values) # GHz @@ -596,17 +629,19 @@ def get_epr_base_matrices(self, variation, _renorm_pj=None, print_=False): PHI_zpf = CalcsBasic.epr_to_zpf(PJ, SJ, Om, EJ) n_zpf = CalcsBasic.epr_cap_to_nzpf(PJ, SJ, Om, Ec) - return PJ, SJ, Om, EJ, PHI_zpf, PJ_cap, n_zpf # All as np.array - - def analyze_variation(self, - variation: str, - cos_trunc: int = None, - fock_trunc: int = None, - print_result: bool = True, - junctions: List = None, - modes: List = None): + return PJ, SJ, Om, EJ, PHI_zpf, PJ_cap, n_zpf # All as np.array + + def analyze_variation( + self, + variation: str, + cos_trunc: int = None, + fock_trunc: int = None, + print_result: bool = True, + junctions: List = None, + modes: List = None, + ): # TODO avoid analyzing a previously analyzed variation - ''' + """ Core analysis function to call! Args: @@ -626,14 +661,14 @@ def analyze_variation(self, order perturbation theory. Diag is anharmonicity, off diag is full cross-Kerr. * chi_ND [MHz]: Numerically diagonalized chi matrix. Diag is anharmonicity, off diag is full cross-Kerr. - ''' + """ # ensuring proper matrix dimensionality when slicing junctions = (junctions,) if type(junctions) is int else junctions if modes is None: modes = list(range(self.n_modes)) - + tmp_n_modes = self.n_modes tmp_modes = self.modes[variation] self.n_modes = len(modes) @@ -643,20 +678,21 @@ def analyze_variation(self, fock_trunc = cos_trunc = None if print_result: - print('\n', '. '*40) - print('Variation %s\n' % variation) + print("\n", ". " * 40) + print("Variation %s\n" % variation) else: - print('%s, ' % variation, end='') + print("%s, " % variation, end="") # Get matrices - PJ, SJ, Om, EJ, PHI_zpf, PJ_cap, n_zpf = self.get_epr_base_matrices( - variation) + PJ, SJ, Om, EJ, PHI_zpf, PJ_cap, n_zpf = self.get_epr_base_matrices(variation) freqs_hfss = self.freqs_hfss[variation].values[(modes)] Ljs = self.Ljs[variation].values # reduce matrices to only include certain modes/junctions if junctions is not None: - Ljs = Ljs[junctions, ] + Ljs = Ljs[ + junctions, + ] PJ = PJ[:, junctions] SJ = SJ[:, junctions] EJ = EJ[:, junctions][junctions, :] @@ -671,75 +707,85 @@ def analyze_variation(self, PJ_cap = PJ_cap[:, junctions] # Analytic 4-th order - CHI_O1 = 0.25 * Om @ PJ @ inv(EJ) @ PJ.T @ Om * 1000. # MHz - f1s = np.diag(Om) - 0.5*np.ndarray.flatten(np.array(CHI_O1.sum(1))) / \ - 1000. # 1st order PT expect freq to be dressed down by alpha - CHI_O1 = divide_diagonal_by_2(CHI_O1) # Make the diagonals alpha + CHI_O1 = 0.25 * Om @ PJ @ inv(EJ) @ PJ.T @ Om * 1000.0 # MHz + f1s = ( + np.diag(Om) - 0.5 * np.ndarray.flatten(np.array(CHI_O1.sum(1))) / 1000.0 + ) # 1st order PT expect freq to be dressed down by alpha + CHI_O1 = divide_diagonal_by_2(CHI_O1) # Make the diagonals alpha # Numerical diag if cos_trunc is not None: - f1_ND, CHI_ND = epr_numerical_diagonalization(freqs_hfss, - Ljs, - PHI_zpf, - cos_trunc=cos_trunc, - fock_trunc=fock_trunc) + f1_ND, CHI_ND = epr_numerical_diagonalization( + freqs_hfss, Ljs, PHI_zpf, cos_trunc=cos_trunc, fock_trunc=fock_trunc + ) else: f1_ND, CHI_ND = None, None result = OrderedDict() - result['f_0'] = self.freqs_hfss[variation][modes] * 1E3 # MHz - obtained directly from HFSS - result['f_1'] = pd.Series(f1s)*1E3 # MHz - result['f_ND'] = pd.Series(f1_ND)*1E-6 # MHz - result['chi_O1'] = pd.DataFrame(CHI_O1) - result['chi_ND'] = pd.DataFrame(CHI_ND) # why dataframe? - result['ZPF'] = PHI_zpf - result['Pm_normed'] = PJ + result["f_0"] = ( + self.freqs_hfss[variation][modes] * 1e3 + ) # MHz - obtained directly from HFSS + result["f_1"] = pd.Series(f1s) * 1e3 # MHz + result["f_ND"] = pd.Series(f1_ND) * 1e-6 # MHz + result["chi_O1"] = pd.DataFrame(CHI_O1) + result["chi_ND"] = pd.DataFrame(CHI_ND) # why dataframe? + result["ZPF"] = PHI_zpf + result["Pm_normed"] = PJ try: - result['Pm_raw'] = self.PM[variation][self.PM[variation].columns[0]][modes]#TODO change the columns to junctions + result["Pm_raw"] = self.PM[variation][self.PM[variation].columns[0]][ + modes + ] # TODO change the columns to junctions except: - result['Pm_raw'] = self.PM[variation] + result["Pm_raw"] = self.PM[variation] _temp = self._get_participation_normalized( - variation, _renorm_pj=self._renorm_pj, print_=print_result) - result['_Pm_norm'] = _temp['Pm_norm'][modes] - result['_Pm_cap_norm'] = _temp['Pm_cap_norm'][modes] + variation, _renorm_pj=self._renorm_pj, print_=print_result + ) + result["_Pm_norm"] = _temp["Pm_norm"][modes] + result["_Pm_cap_norm"] = _temp["Pm_cap_norm"][modes] # just propagate - result['hfss_variables'] = self._hfss_variables[variation] - result['Ljs'] = self.Ljs[variation] - result['Cjs'] = self.Cjs[variation] + result["hfss_variables"] = self._hfss_variables[variation] + result["Ljs"] = self.Ljs[variation] + result["Cjs"] = self.Cjs[variation] try: - result['Q_coupling'] = self.Qm_coupling[variation][self.Qm_coupling[variation].columns[junctions]][modes]#TODO change the columns to junctions + result["Q_coupling"] = self.Qm_coupling[variation][ + self.Qm_coupling[variation].columns[junctions] + ][ + modes + ] # TODO change the columns to junctions except: - result['Q_coupling'] = self.Qm_coupling[variation] - + result["Q_coupling"] = self.Qm_coupling[variation] + try: - result['Qs'] = self.Qs[variation][self.PM[variation].columns[junctions]][modes] #TODO change the columns to junctions + result["Qs"] = self.Qs[variation][self.PM[variation].columns[junctions]][ + modes + ] # TODO change the columns to junctions except: - result['Qs'] = self.Qs[variation][modes] + result["Qs"] = self.Qs[variation][modes] - result['sol'] = self.sols[variation] + result["sol"] = self.sols[variation] - result['fock_trunc'] = fock_trunc - result['cos_trunc'] = cos_trunc + result["fock_trunc"] = fock_trunc + result["cos_trunc"] = cos_trunc self.results[variation] = result self.results.save() - + if print_result: self.print_variation(variation) self.print_result(result) - - self.n_modes = tmp_n_modes # TODO is this smart should consider defining the modes of interest in the initialisation of the quantum object - self.modes[variation]=tmp_modes + + self.n_modes = tmp_n_modes # TODO is this smart should consider defining the modes of interest in the initialisation of the quantum object + self.modes[variation] = tmp_modes return result - def full_report_variations(self, var_list: list=None): + def full_report_variations(self, var_list: list = None): """see full_variation_report""" if var_list is None: var_list = self.variations - for variation in var_list: + for variation in var_list: self.full_variation_report(variation) - + def full_variation_report(self, variation): """ prints the results and parameters of a specific variation @@ -755,64 +801,67 @@ def full_variation_report(self, variation): """ self.print_variation(variation) - + self.print_result(variation) def print_variation(self, variation): """ Utility reporting function """ - if variation is int: variation = str(variation) + if variation is int: + variation = str(variation) if len(self.hfss_vars_diff_idx) > 0: - print('\n*** Different parameters') + print("\n*** Different parameters") display(self._hfss_variables[self.hfss_vars_diff_idx][variation]) - print('\n') + print("\n") - print('*** P (participation matrix, not normlz.)') + print("*** P (participation matrix, not normlz.)") print(self.PM[variation]) - print('\n*** S (sign-bit matrix)') + print("\n*** S (sign-bit matrix)") print(self.SM[variation]) def print_result(self, result): """ Utility reporting function """ - if type(result) is str or type(result) is int: result = self.results[str(result)] + if type(result) is str or type(result) is int: + result = self.results[str(result)] # TODO: actually make into dataframe with mode labels and junction labels pritm = lambda x, frmt="{:9.2g}": print_matrix(x, frmt=frmt) - print('*** P (participation matrix, normalized.)') - pritm(result['Pm_normed']) + print("*** P (participation matrix, normalized.)") + pritm(result["Pm_normed"]) - print('\n*** Chi matrix O1 PT (MHz)\n Diag is anharmonicity, off diag is full cross-Kerr.') - pritm(result['chi_O1'], "{:9.3g}") + print( + "\n*** Chi matrix O1 PT (MHz)\n Diag is anharmonicity, off diag is full cross-Kerr." + ) + pritm(result["chi_O1"], "{:9.3g}") - print('\n*** Chi matrix ND (MHz) ') - pritm(result['chi_ND'], "{:9.3g}") + print("\n*** Chi matrix ND (MHz) ") + pritm(result["chi_ND"], "{:9.3g}") - print('\n*** Frequencies O1 PT (MHz)') - print(result['f_1']) + print("\n*** Frequencies O1 PT (MHz)") + print(result["f_1"]) - print('\n*** Frequencies ND (MHz)') - print(result['f_ND']) + print("\n*** Frequencies ND (MHz)") + print(result["f_ND"]) - print('\n*** Q_coupling') - print(result['Q_coupling']) + print("\n*** Q_coupling") + print(result["Q_coupling"]) def plotting_dic_x(self, Var_dic, var_name): dic = {} - if (len(Var_dic.keys())+1) == self.Num_hfss_vars_diff_idx: - lv, lv_str = self.get_variation_of_multiple_variables_value( - Var_dic) - dic['label'] = lv_str - dic['x_label'] = var_name - dic['x'] = self.get_variable_value(var_name, lv=lv) + if (len(Var_dic.keys()) + 1) == self.Num_hfss_vars_diff_idx: + lv, lv_str = self.get_variation_of_multiple_variables_value(Var_dic) + dic["label"] = lv_str + dic["x_label"] = var_name + dic["x"] = self.get_variable_value(var_name, lv=lv) else: - raise ValueError('more than one hfss variable changes each time') + raise ValueError("more than one hfss variable changes each time") return lv, dic @@ -825,11 +874,13 @@ def plot_results(self, result, Y_label, variable, X_label, variations: list = No # TODO? pass - def plot_hamiltonian_results(self, - swp_variable: str = 'variation', - variations: list = None, - fig=None, - x_label: str = None): + def plot_hamiltonian_results( + self, + swp_variable: str = "variation", + variations: list = None, + fig=None, + x_label: str = None, + ): """Plot results versus variation Keyword Arguments: @@ -851,18 +902,27 @@ def plot_hamiltonian_results(self, ############################################################################ # Axis: Frequencies - f0 = self.results.get_frequencies_HFSS( - variations=variations, vs=swp_variable).transpose().sort_index(key=lambda x : x.astype(int)) - f1 = self.results.get_frequencies_O1( - variations=variations, vs=swp_variable).transpose().sort_index(key=lambda x : x.astype(int)) - f_ND = self.results.get_frequencies_ND( - variations=variations, vs=swp_variable).transpose().sort_index(key=lambda x : x.astype(int)) + f0 = ( + self.results.get_frequencies_HFSS(variations=variations, vs=swp_variable) + .transpose() + .sort_index(key=lambda x: x.astype(int)) + ) + f1 = ( + self.results.get_frequencies_O1(variations=variations, vs=swp_variable) + .transpose() + .sort_index(key=lambda x: x.astype(int)) + ) + f_ND = ( + self.results.get_frequencies_ND(variations=variations, vs=swp_variable) + .transpose() + .sort_index(key=lambda x: x.astype(int)) + ) # changed by Asaf from f0 as not all modes are always analyzed mode_idx = list(f1.columns) n_modes = len(mode_idx) ax = axs[0, 0] - ax.set_title('Modal frequencies (MHz)') + ax.set_title("Modal frequencies (MHz)") # TODO: should move these kwargs to the config cmap = cmap_discrete(n_modes) @@ -871,51 +931,50 @@ def plot_hamiltonian_results(self, # Choose which freq should have the solid line drawn with it. ND if present, else f1 if f_ND.empty: plt_me_line = f1 - markerf1 = 'o' + markerf1 = "o" else: plt_me_line = f_ND - markerf1 = '.' + markerf1 = "." # plot the ND as points if present - f_ND.plot(**{**kw, **dict(marker='o', ms=4, zorder=30)}) + f_ND.plot(**{**kw, **dict(marker="o", ms=4, zorder=30)}) - f0.plot(**{**kw, **dict(marker='x', ms=2, zorder=10)}) + f0.plot(**{**kw, **dict(marker="x", ms=2, zorder=10)}) f1.plot(**{**kw, **dict(marker=markerf1, ms=4, zorder=20)}) - plt_me_line.plot(**{**kw, **dict(lw=1, alpha=0.6, color='grey')}) + plt_me_line.plot(**{**kw, **dict(lw=1, alpha=0.6, color="grey")}) ############################################################################ # Axis: Quality factors Qs = self.get_quality_factors(swp_variable=swp_variable) Qs = Qs if variations is None else Qs[variations] - Qs = Qs.transpose().sort_index(key=lambda x : x.astype(int)) + Qs = Qs.transpose().sort_index(key=lambda x: x.astype(int)) ax = axs[1, 0] - ax.set_title('Quality factors') - Qs.plot(ax=ax, lw=0, marker=markerf1, ms=4, - legend=True, zorder=20, color=cmap) - Qs.plot(ax=ax, lw=1, alpha=0.2, color='grey', legend=False) - + ax.set_title("Quality factors") + Qs.plot(ax=ax, lw=0, marker=markerf1, ms=4, legend=True, zorder=20, color=cmap) + Qs.plot(ax=ax, lw=1, alpha=0.2, color="grey", legend=False) + df_Qs = np.isinf(Qs) - # pylint: disable=E1101 + # pylint: disable=E1101 # Instance of 'ndarray' has no 'values' member (no-member) Qs_val = df_Qs.values Qs_inf = Qs_val.sum() - if not (len(Qs) == 0 or Qs_inf > 0): - ax.set_yscale('log') + if not (len(Qs) == 0 or Qs_inf > 0): + ax.set_yscale("log") ############################################################################ # Axis: Alpha and chi - axs[0][1].set_title('Anharmonicities (MHz)') - axs[1][1].set_title('Cross-Kerr frequencies (MHz)') + axs[0][1].set_title("Anharmonicities (MHz)") + axs[1][1].set_title("Cross-Kerr frequencies (MHz)") def plot_chi_alpha(chi, primary): """ Internal function to plot chi and then also to plot alpha """ idx = pd.IndexSlice - kw1 = dict(lw=0, ms=4, marker='o' if primary else 'x') - kw2 = dict(lw=1, alpha=0.2, color='grey', label='_nolegend_') + kw1 = dict(lw=0, ms=4, marker="o" if primary else "x") + kw2 = dict(lw=1, alpha=0.2, color="grey", label="_nolegend_") # ------------------------ # Plot anharmonicity ax = axs[0, 1] @@ -934,21 +993,24 @@ def plot_chi_alpha(chi, primary): for i, mode2 in enumerate(mode_idx): if int(mode2) > int(mode): chi_element = chi.loc[idx[:, mode], mode2].unstack(1) - chi_element.plot(ax=ax, label=f"{mode},{mode2}", color=cmap[i], **kw1) + chi_element.plot( + ax=ax, label=f"{mode},{mode2}", color=cmap[i], **kw1 + ) if primary: chi_element.plot(ax=ax, **kw2) def do_legends(): - legend_translucent(axs[0][1], leg_kw=dict(fontsize=7, title='Mode')) - legend_translucent(axs[1][1], leg_kw=dict(fontsize=7)) + legend_translucent(axs[0][1], leg_kw=dict(fontsize=7, title="Mode")) + legend_translucent(axs[1][1], leg_kw=dict(fontsize=7)) - chiO1 = self.get_chis(variations=variations, - swp_variable=swp_variable, numeric=False) - chiND = self.get_chis(variations=variations, - swp_variable=swp_variable, numeric=True) + chiO1 = self.get_chis( + variations=variations, swp_variable=swp_variable, numeric=False + ) + chiND = self.get_chis( + variations=variations, swp_variable=swp_variable, numeric=True + ) - use_ND = not np.any( - [r['fock_trunc'] == None for k, r in self.results.items()]) + use_ND = not np.any([r["fock_trunc"] == None for k, r in self.results.items()]) if use_ND: plot_chi_alpha(chiND, True) do_legends() @@ -968,33 +1030,39 @@ def do_legends(): # Below are functions introduced in v0.8 and newer - def report_results(self, swp_variable='variation', numeric=True): + def report_results(self, swp_variable="variation", numeric=True): """ Report in table form the results in a markdown friendly way in Jupyter notebook using the pandas interface. """ - with pd.option_context('display.precision', 2): + with pd.option_context("display.precision", 2): display(Markdown(("#### Mode frequencies (MHz)"))) display(Markdown(("###### Numerical diagonalization"))) - display(self.get_frequencies( - swp_variable=swp_variable, numeric=numeric)) + display(self.get_frequencies(swp_variable=swp_variable, numeric=numeric)) display(Markdown(("#### Kerr Non-linear coefficient table (MHz)"))) display(Markdown(("###### Numerical diagonalization"))) display(self.get_chis(swp_variable=swp_variable, numeric=numeric)) - def get_chis(self, swp_variable='variation', numeric=True, variations: list = None, - m=None, n=None): + def get_chis( + self, + swp_variable="variation", + numeric=True, + variations: list = None, + m=None, + n=None, + ): """return as multiindex data table If you provide m and n as integers or mode labels, then the chi between these modes will be returned as a pandas Series. """ - label = 'chi_ND' if numeric else 'chi_O1' - df = pd.concat(self.results.vs_variations( - label, vs=swp_variable, variations=variations), - names=[swp_variable]) + label = "chi_ND" if numeric else "chi_O1" + df = pd.concat( + self.results.vs_variations(label, vs=swp_variable, variations=variations), + names=[swp_variable], + ) if m is None and n is None: return df @@ -1002,25 +1070,34 @@ def get_chis(self, swp_variable='variation', numeric=True, variations: list = No s = df.loc[pd.IndexSlice[:, m], n].unstack(1)[m] return s - def get_frequencies(self, swp_variable='variation', numeric=True, variations: list = None): + def get_frequencies( + self, swp_variable="variation", numeric=True, variations: list = None + ): """return as multiindex data table - index: eigenmode label - columns: variation label + index: eigenmode label + columns: variation label """ - label = 'f_ND' if numeric else 'f_1' - return self.results.vs_variations(label, vs=swp_variable, to_dataframe=True, variations=variations) + label = "f_ND" if numeric else "f_1" + return self.results.vs_variations( + label, vs=swp_variable, to_dataframe=True, variations=variations + ) - def get_quality_factors(self, swp_variable='variation', variations: list = None): + def get_quality_factors(self, swp_variable="variation", variations: list = None): """return as pd.Series - index: eigenmode label - columns: variation label + index: eigenmode label + columns: variation label """ - return self.results.vs_variations('Qs', vs=swp_variable, to_dataframe=True, variations=variations) - - def get_participations(self, swp_variable='variation', - variations: list = None, - inductive=True, - _normed=True): + return self.results.vs_variations( + "Qs", vs=swp_variable, to_dataframe=True, variations=variations + ) + + def get_participations( + self, + swp_variable="variation", + variations: list = None, + inductive=True, + _normed=True, + ): """ inductive (bool): EPR for junction inductance when True, else for capacitors @@ -1044,23 +1121,24 @@ def get_participations(self, swp_variable='variation', if inductive: if _normed: - getme = 'Pm_normed' + getme = "Pm_normed" else: - getme = 'Pm_raw' + getme = "Pm_raw" else: if _normed: - getme = 'Pm_cap' + getme = "Pm_cap" else: raise NotImplementedError( - 'not inductive and not _normed not implemented') + "not inductive and not _normed not implemented" + ) participations = self.results.vs_variations(getme, vs=swp_variable) p2 = OrderedDict() for key, val in participations.items(): df = pd.DataFrame(val) - df.index.name = 'mode' - df.columns.name = 'junc_idx' + df.index.name = "mode" + df.columns.name = "junc_idx" p2[key] = df participations = pd.concat(p2, names=[swp_variable]) @@ -1073,55 +1151,62 @@ def _get_PM_as_DataFrame(self): Pm.unstack(1).groupby(axis=1,level=1).plot() """ Pm = pd.concat(self.PM) - Pm.index.set_names(['variation', 'mode'], inplace=True) - Pm.columns.set_names(['junction'], inplace=True) + Pm.index.set_names(["variation", "mode"], inplace=True) + Pm.columns.set_names(["junction"], inplace=True) return Pm - def get_ansys_energies(self, swp_var='variation'): + def get_ansys_energies(self, swp_var="variation"): """ Return a multi-index dataframe of ansys energies vs swep_variable Args: swp_var (str) : """ - if swp_var == 'variation': + if swp_var == "variation": energies = self.ansys_energies else: - energies = self.get_vs_variable(swp_var, 'ansys_energies') + energies = self.get_vs_variable(swp_var, "ansys_energies") - df = pd.concat({k: pd.DataFrame(v).transpose() - for k, v in energies.items()}) - df.index.set_names([swp_var, 'mode'], inplace=True) + df = pd.concat({k: pd.DataFrame(v).transpose() for k, v in energies.items()}) + df.index.set_names([swp_var, "mode"], inplace=True) return df - def quick_plot_participation(self, mode, junction, swp_variable='variation', ax=None, kw=None): + def quick_plot_participation( + self, mode, junction, swp_variable="variation", ax=None, kw=None + ): """Quick plot participation for one mode - kw : extra plot arguments + kw : extra plot arguments """ df = self.get_participations(swp_variable=swp_variable) kw = kw or {} ax = ax or plt.gca() - df.loc[pd.IndexSlice[:, mode], junction].unstack( - 1).plot(marker='o', ax=ax, **kw) - ax.set_ylabel(f'p_({mode},{junction})') - - def quick_plot_frequencies(self, mode, swp_variable='variation', ax=None, kw=None, numeric=False): + df.loc[pd.IndexSlice[:, mode], junction].unstack(1).plot( + marker="o", ax=ax, **kw + ) + ax.set_ylabel(f"p_({mode},{junction})") + + def quick_plot_frequencies( + self, mode, swp_variable="variation", ax=None, kw=None, numeric=False + ): """Quick plot freq for one mode - kw : extra plot arguments + kw : extra plot arguments """ kw = kw or {} ax = ax or plt.gca() s = self.get_frequencies( - numeric=numeric, swp_variable=swp_variable).transpose()[mode] - s.plot(marker='o', ax=ax, **kw) + numeric=numeric, swp_variable=swp_variable + ).transpose()[mode] + s.plot(marker="o", ax=ax, **kw) - ax.set_ylabel(f'$\\omega_{mode}$ (MHz)') + ax.set_ylabel(f"$\\omega_{mode}$ (MHz)") - def quick_plot_chi_alpha(self, mode1, mode2, swp_variable='variation', ax=None, kw=None, numeric=False): + def quick_plot_chi_alpha( + self, mode1, mode2, swp_variable="variation", ax=None, kw=None, numeric=False + ): """Quick plot chi between mode 1 and mode 2. If you select mode1=mode2, then you will plot the alpha @@ -1131,16 +1216,27 @@ def quick_plot_chi_alpha(self, mode1, mode2, swp_variable='variation', ax=None, kw = kw or {} ax = ax or plt.gca() - s = self.get_chis(swp_variable=swp_variable, - numeric=numeric).loc[pd.IndexSlice[:, mode1], mode2].unstack(1) - s.plot(marker='o', ax=ax, **kw) + s = ( + self.get_chis(swp_variable=swp_variable, numeric=numeric) + .loc[pd.IndexSlice[:, mode1], mode2] + .unstack(1) + ) + s.plot(marker="o", ax=ax, **kw) if mode1 == mode2: - ax.set_ylabel(f'$\\alpha$({mode1}) (MHz) [anharmonicity]') + ax.set_ylabel(f"$\\alpha$({mode1}) (MHz) [anharmonicity]") else: - ax.set_ylabel(f'$\\chi$({mode1,mode2}) (MHz) [total split]') - - def quick_plot_mode(self, mode, junction, mode1=None, swp_variable='variation', numeric=False, sharex=True): + ax.set_ylabel(f"$\\chi$({mode1,mode2}) (MHz) [total split]") + + def quick_plot_mode( + self, + mode, + junction, + mode1=None, + swp_variable="variation", + numeric=False, + sharex=True, + ): r"""Create a quick report to see mode parameters for only a single mode and a cross-kerr coupling to another mode. Plots the participation and cross participation @@ -1150,32 +1246,46 @@ def quick_plot_mode(self, mode, junction, mode1=None, swp_variable='variation', The values are either for the numeric or the non-numeric results, set by `numeric` """ - fig, axs = plt.subplots(2, 2, figsize=(12*0.9, 7*0.9)) + fig, axs = plt.subplots(2, 2, figsize=(12 * 0.9, 7 * 0.9)) self.quick_plot_frequencies( - mode, swp_variable=swp_variable, numeric=numeric, ax=axs[0, 1]) + mode, swp_variable=swp_variable, numeric=numeric, ax=axs[0, 1] + ) self.quick_plot_participation( - mode, junction, swp_variable=swp_variable, ax=axs[0, 0]) - self.quick_plot_chi_alpha(mode, mode, numeric=numeric, swp_variable=swp_variable, ax=axs[1, 0], - kw=dict(sharex=sharex)) + mode, junction, swp_variable=swp_variable, ax=axs[0, 0] + ) + self.quick_plot_chi_alpha( + mode, + mode, + numeric=numeric, + swp_variable=swp_variable, + ax=axs[1, 0], + kw=dict(sharex=sharex), + ) if mode1: self.quick_plot_chi_alpha( - mode, mode1, numeric=numeric, swp_variable=swp_variable, ax=axs[1, 1]) + mode, mode1, numeric=numeric, swp_variable=swp_variable, ax=axs[1, 1] + ) twinax = axs[0, 0].twinx() - self.quick_plot_participation(mode1, junction, swp_variable=swp_variable, ax=twinax, - kw=dict(alpha=0.7, color='maroon', sharex=sharex)) + self.quick_plot_participation( + mode1, + junction, + swp_variable=swp_variable, + ax=twinax, + kw=dict(alpha=0.7, color="maroon", sharex=sharex), + ) for ax in np.ndarray.flatten(axs): ax.grid(alpha=0.2) - axs[0, 1].set_title('Frequency (MHz)') - axs[0, 0].set_title('Self- and cross-EPR') - axs[1, 0].set_title('Anharmonicity') - axs[1, 1].set_title('Cross-Kerr') + axs[0, 1].set_title("Frequency (MHz)") + axs[0, 0].set_title("Self- and cross-EPR") + axs[1, 0].set_title("Anharmonicity") + axs[1, 1].set_title("Cross-Kerr") - fig.suptitle(f'Mode {mode}', y=1.025) + fig.suptitle(f"Mode {mode}", y=1.025) fig.tight_layout() - def quick_plot_convergence(self, ax = None): + def quick_plot_convergence(self, ax=None): """ Plot a report of the Ansys convergence vs pass number ona twin axis for the number of tets and the max delta frequency of the eignemode. @@ -1186,7 +1296,7 @@ def quick_plot_convergence(self, ax = None): convergence_tets = self.get_convergences_tets_vs_pass() convergence_freq = self.get_convergences_max_delta_freq_vs_pass() - convergence_freq.name = 'Δf' + convergence_freq.name = "Δf" plot_convergence_max_df(ax, convergence_freq) plot_convergence_solved_elem(ax_t, convergence_tets) diff --git a/pyEPR/project_info.py b/pyEPR/project_info.py index 412e2ff..42b649f 100644 --- a/pyEPR/project_info.py +++ b/pyEPR/project_info.py @@ -21,9 +21,7 @@ from . import Dict, ansys, config, logger from .toolbox.pythonic import get_instance_vars -diss_opt = [ - 'dielectrics_bulk', 'dielectric_surfaces', 'resistive_surfaces', 'seams' -] +diss_opt = ["dielectrics_bulk", "dielectric_surfaces", "resistive_surfaces", "seams"] class ProjectInfo(object): @@ -106,37 +104,47 @@ class ProjectInfo(object): http://google.github.io/styleguide/pyguide.html """ + class _Dissipative: """ Deprecating the _Dissipative class and turning it into a dictionary. This is used to message people on the deprecation so they could change their scripts. """ + def __init__(self): - self['pinfo'] = None + self["pinfo"] = None for opt in diss_opt: self[opt] = None def __setitem__(self, key, value): # --- check valid inputs --- - if not (key in diss_opt or key == 'pinfo'): + if not (key in diss_opt or key == "pinfo"): raise ValueError(f"No such parameter {key}") - if key != 'pinfo' and (not isinstance(value, (list, dict)) or \ - not all(isinstance(x, str) for x in value)) and (value != None): - raise ValueError(f'dissipative[\'{key}\'] must be a list of strings ' \ - 'containing names of models in the project or dictionary of strings of models containing ' \ - 'material loss properties!' + if ( + key != "pinfo" + and ( + not isinstance(value, (list, dict)) + or not all(isinstance(x, str) for x in value) + ) + and (value != None) + ): + raise ValueError( + f"dissipative['{key}'] must be a list of strings " + "containing names of models in the project or dictionary of strings of models containing " + "material loss properties!" ) - if key != 'pinfo' and hasattr(self['pinfo'], 'design'): + if key != "pinfo" and hasattr(self["pinfo"], "design"): for x in value: - if x not in self['pinfo'].get_all_object_names(): - raise ValueError( - f'\'{x}\' is not an object in the HFSS project') + if x not in self["pinfo"].get_all_object_names(): + raise ValueError(f"'{x}' is not an object in the HFSS project") super().__setattr__(key, value) def __getitem__(self, attr): - if not (attr in diss_opt or attr == 'pinfo'): - raise AttributeError(f'dissipative has no attribute "{attr}". '\ - f'The possible attributes are:\n {str(diss_opt)}') + if not (attr in diss_opt or attr == "pinfo"): + raise AttributeError( + f'dissipative has no attribute "{attr}". ' + f"The possible attributes are:\n {str(diss_opt)}" + ) return super().__getattribute__(attr) def __setattr__(self, attr, value): @@ -146,13 +154,14 @@ def __setattr__(self, attr, value): self[attr] = value def __getattr__(self, attr): - raise AttributeError(f'dissipative has no attribute "{attr}". '\ - f'The possible attributes are:\n {str(diss_opt)}') + raise AttributeError( + f'dissipative has no attribute "{attr}". ' + f"The possible attributes are:\n {str(diss_opt)}" + ) def __getattribute__(self, attr): if attr in diss_opt: - logger.warning( - f"DEPRECATED!! use pinfo.dissipative['{attr}'] instead!") + logger.warning(f"DEPRECATED!! use pinfo.dissipative['{attr}'] instead!") return super().__getattribute__(attr) def __repr__(self): @@ -162,16 +171,18 @@ def data(self): """Return dissipative as dictionary""" return {str(opt): self[opt] for opt in diss_opt} - def __init__(self, - project_path: str = None, - project_name: str = None, - design_name: str = None, - setup_name: str = None, - dielectrics_bulk: list =None, - dielectric_surfaces: list = None, - resistive_surfaces: list= None, - seams: list= None, - do_connect: bool = True): + def __init__( + self, + project_path: str = None, + project_name: str = None, + design_name: str = None, + setup_name: str = None, + dielectrics_bulk: list = None, + dielectric_surfaces: list = None, + resistive_surfaces: list = None, + seams: list = None, + do_connect: bool = True, + ): """ Keyword Arguments: @@ -194,12 +205,13 @@ def __init__(self, seams (list(str)) : List of names of seams. Defaults to ``None``. do_connect (bool) [additional]: Do create connection to Ansys or not? Defaults to ``True``. - + """ # Path: format path correctly to system convention - self.project_path = str(Path(project_path)) \ - if not (project_path is None) else None + self.project_path = ( + str(Path(project_path)) if not (project_path is None) else None + ) self.project_name = project_name self.design_name = design_name self.setup_name = setup_name @@ -224,37 +236,44 @@ def __init__(self, if do_connect: self.connect() - self.dissipative['pinfo'] = self + self.dissipative["pinfo"] = self _Forbidden = [ - 'app', 'design', 'desktop', 'project', 'dissipative', 'setup', - '_Forbidden', 'junctions' + "app", + "design", + "desktop", + "project", + "dissipative", + "setup", + "_Forbidden", + "junctions", ] def save(self): - ''' + """ Return all the data in a dictionary form that can be used to be saved - ''' + """ return dict( pinfo=pd.Series(get_instance_vars(self, self._Forbidden)), dissip=pd.Series(self.dissipative.data()), - options=pd.Series(get_instance_vars(self.options), dtype='object'), + options=pd.Series(get_instance_vars(self.options), dtype="object"), junctions=pd.DataFrame(self.junctions), ports=pd.DataFrame(self.ports), ) def connect_project(self): - """Sets + """Sets self.app self.desktop self.project self.project_name - self.project_path + self.project_path """ - logger.info('Connecting to Ansys Desktop API...') + logger.info("Connecting to Ansys Desktop API...") self.app, self.desktop, self.project = ansys.load_ansys_project( - self.project_name, self.project_path) + self.project_name, self.project_path + ) if self.project: # TODO: should be property? @@ -272,8 +291,7 @@ def connect_design(self, design_name: str = None): designs_in_project = self.project.get_designs() if not designs_in_project: self.design = None - logger.info( - f'No active design found (or error getting active design).') + logger.info(f"No active design found (or error getting active design).") return if self.design_name is None: @@ -282,31 +300,34 @@ def connect_design(self, design_name: str = None): self.design = self.project.get_active_design() self.design_name = self.design.name logger.info( - '\tOpened active design\n' - f'\tDesign: {self.design_name} [Solution type: {self.design.solution_type}]' + "\tOpened active design\n" + f"\tDesign: {self.design_name} [Solution type: {self.design.solution_type}]" ) except Exception as e: # No active design self.design = None self.design_name = None logger.info( - f'No active design found (or error getting active design). Note: {e}' + f"No active design found (or error getting active design). Note: {e}" ) else: try: self.design = self.project.get_design(self.design_name) logger.info( - '\tOpened active design\n' - f'\tDesign: {self.design_name} [Solution type: {self.design.solution_type}]' + "\tOpened active design\n" + f"\tDesign: {self.design_name} [Solution type: {self.design.solution_type}]" ) except Exception as e: _traceback = sys.exc_info()[2] logger.error(f"Original error \N{loudly crying face}: {e}\n") - raise (Exception(' Did you provide the correct design name?\ - Failed to pull up design. \N{loudly crying face}'). - with_traceback(_traceback)) + raise ( + Exception( + " Did you provide the correct design name?\ + Failed to pull up design. \N{loudly crying face}" + ).with_traceback(_traceback) + ) def connect_setup(self): """Connect to the first available setup or create a new in eigenmode and driven modal @@ -320,19 +341,19 @@ def connect_setup(self): setup_names = self.design.get_setup_names() if len(setup_names) == 0: - logger.warning('\tNo design setup detected.') + logger.warning("\tNo design setup detected.") setup = None - if self.design.solution_type == 'Eigenmode': - logger.warning('\tCreating eigenmode default setup.') + if self.design.solution_type == "Eigenmode": + logger.warning("\tCreating eigenmode default setup.") setup = self.design.create_em_setup() - elif self.design.solution_type == 'DrivenModal': - logger.warning('\tCreating driven modal default setup.') + elif self.design.solution_type == "DrivenModal": + logger.warning("\tCreating driven modal default setup.") setup = self.design.create_dm_setup() - elif self.design.solution_type == 'DrivenTerminal': - logger.warning('\tCreating driven terminal default setup.') + elif self.design.solution_type == "DrivenTerminal": + logger.warning("\tCreating driven terminal default setup.") setup = self.design.create_dt_setup() - elif self.design.solution_type == 'Q3D': - logger.warning('\tCreating Q3D default setup.') + elif self.design.solution_type == "Q3D": + logger.warning("\tCreating Q3D default setup.") setup = self.design.create_q3d_setup() self.setup_name = setup.name else: @@ -345,9 +366,10 @@ def connect_setup(self): _traceback = sys.exc_info()[2] logger.error(f"Original error \N{loudly crying face}: {e}\n") - raise Exception(' Did you provide the correct setup name?\ - Failed to pull up setup. \N{loudly crying face}' - ).with_traceback(_traceback) + raise Exception( + " Did you provide the correct setup name?\ + Failed to pull up setup. \N{loudly crying face}" + ).with_traceback(_traceback) else: self.setup = None @@ -361,7 +383,7 @@ def connect(self): self.connect_project() if not self.project: - logger.info('\tConnection to Ansys NOT established. \n') + logger.info("\tConnection to Ansys NOT established. \n") if self.project: self.connect_design() self.connect_setup() @@ -374,17 +396,17 @@ def connect(self): if self.project and self.design: logger.info( - f'\tConnected to project \"{self.project_name}\" and design \"{self.design_name}\" \N{grinning face} \n' + f'\tConnected to project "{self.project_name}" and design "{self.design_name}" \N{grinning face} \n' ) if not self.project: logger.info( - '\t Project not detected in Ansys. Is there a project in your desktop app? \N{thinking face} \n' + "\t Project not detected in Ansys. Is there a project in your desktop app? \N{thinking face} \n" ) if not self.design: logger.info( - f'\t Connected to project \"{self.project_name}\". No design detected' + f'\t Connected to project "{self.project_name}". No design detected' ) return self @@ -404,31 +426,34 @@ def get_setup(self, name: str): return None self.setup = self.design.get_setup(name=name) if self.setup is None: - logger.error(f"Could not retrieve setup: {name}\n \ - Did you give the right name? Does it exist?") + logger.error( + f"Could not retrieve setup: {name}\n \ + Did you give the right name? Does it exist?" + ) self.setup_name = self.setup.name - logger.info( - f'\tOpened setup `{self.setup_name}` ({type(self.setup)})') + logger.info(f"\tOpened setup `{self.setup_name}` ({type(self.setup)})") return self.setup def check_connected(self): """ Checks if fully connected including setup. """ - return\ - (self.setup is not None) and\ - (self.design is not None) and\ - (self.project is not None) and\ - (self.desktop is not None) and\ - (self.app is not None) + return ( + (self.setup is not None) + and (self.design is not None) + and (self.project is not None) + and (self.desktop is not None) + and (self.app is not None) + ) def disconnect(self): - ''' + """ Disconnect from existing Ansys Desktop API. - ''' - assert self.check_connected() is True,\ - "It does not appear that you have connected to HFSS yet.\ + """ + assert ( + self.check_connected() is True + ), "It does not appear that you have connected to HFSS yet.\ Use the connect() method. \N{nauseated face}" self.project.release() @@ -439,20 +464,19 @@ def disconnect(self): # UTILITY FUNCTIONS def get_dm(self): - ''' + """ Utility shortcut function to get the design and modeler. .. code-block:: python oDesign, oModeler = pinfo.get_dm() - ''' + """ return self.design, self.design.modeler def get_all_variables_names(self): """Returns array of all project and local design names.""" - return self.project.get_variable_names( - ) + self.design.get_variable_names() + return self.project.get_variable_names() + self.design.get_variable_names() def get_all_object_names(self): """Returns array of strings""" @@ -472,19 +496,28 @@ def validate_junction_info(self): for jjnm, jj in self.junctions.items(): - assert jj['Lj_variable'] in all_variables_names,\ - """pyEPR ProjectInfo user error found \N{face with medical mask}: + assert ( + jj["Lj_variable"] in all_variables_names + ), """pyEPR ProjectInfo user error found \N{face with medical mask}: Seems like for junction `%s` you specified a design or project variable for `Lj_variable` that does not exist in HFSS by the name: - `%s` """ % (jjnm, jj['Lj_variable']) + `%s` """ % ( + jjnm, + jj["Lj_variable"], + ) - for name in ['rect', 'line']: + for name in ["rect", "line"]: - assert jj[name] in all_object_names, \ - """pyEPR ProjectInfo user error found \N{face with medical mask}: + assert ( + jj[name] in all_object_names + ), """pyEPR ProjectInfo user error found \N{face with medical mask}: Seems like for junction `%s` you specified a %s that does not exist - in HFSS by the name: `%s` """ % (jjnm, name, jj[name]) + in HFSS by the name: `%s` """ % ( + jjnm, + name, + jj[name], + ) def __del__(self): - logger.info('Disconnected from Ansys HFSS') + logger.info("Disconnected from Ansys HFSS") # self.disconnect() diff --git a/pyEPR/reports.py b/pyEPR/reports.py index 6ed925d..d21e05f 100644 --- a/pyEPR/reports.py +++ b/pyEPR/reports.py @@ -9,7 +9,9 @@ from .toolbox.plotting import legend_translucent, plt -def _style_plot_convergence(ax, ylabel=None, xlabel='Pass number', ylabel_col='k', y_title=False): +def _style_plot_convergence( + ax, ylabel=None, xlabel="Pass number", ylabel_col="k", y_title=False +): ax.set_xlabel(xlabel) if ylabel: if y_title: @@ -20,64 +22,68 @@ def _style_plot_convergence(ax, ylabel=None, xlabel='Pass number', ylabel_col='k ax.autoscale(tight=False) ax.set_axisbelow(True) # Don't allow the axis to be on top of your data ax.minorticks_on() - ax.grid(which='minor', linestyle=':', - linewidth='0.5', color='black', alpha=0.2) - ax.grid(which='major', alpha=0.5) + ax.grid(which="minor", linestyle=":", linewidth="0.5", color="black", alpha=0.2) + ax.grid(which="major", alpha=0.5) -_style_plot_conv_kw = dict(marker='o', ms=4) +_style_plot_conv_kw = dict(marker="o", ms=4) -def plot_convergence_max_df(ax, s, kw={}, color='r'): - '''For a single pass''' - s.plot(ax=ax, **{**dict(c='r'), **_style_plot_conv_kw, **kw}) +def plot_convergence_max_df(ax, s, kw={}, color="r"): + """For a single pass""" + s.plot(ax=ax, **{**dict(c="r"), **_style_plot_conv_kw, **kw}) ax.set_yscale("log") _style_plot_convergence(ax) fig = ax.figure - fig.text(0.45, 0.95, s.name, ha="center", - va="bottom", size="medium", color=color) - ax.tick_params(axis='y', labelcolor=color) - #ax.axhline(1.0, color='k', lw=1.5,alpha= 0.35) - #ax.axhline(0.1, color='k', lw=1.5,alpha= 0.35) - ax.grid(which='minor', linestyle=':', - linewidth='0.5', color=color, alpha=0.25) - ax.grid(which='major', color='#c4abab', alpha=0.5) - ax.spines['left'].set_color(color) - - -def plot_convergence_solved_elem(ax, s, kw={}, color='b'): - '''For a single pass''' - (s/1000).plot(ax=ax, **{**dict(c='b'), **_style_plot_conv_kw, **kw}) + fig.text(0.45, 0.95, s.name, ha="center", va="bottom", size="medium", color=color) + ax.tick_params(axis="y", labelcolor=color) + # ax.axhline(1.0, color='k', lw=1.5,alpha= 0.35) + # ax.axhline(0.1, color='k', lw=1.5,alpha= 0.35) + ax.grid(which="minor", linestyle=":", linewidth="0.5", color=color, alpha=0.25) + ax.grid(which="major", color="#c4abab", alpha=0.5) + ax.spines["left"].set_color(color) + + +def plot_convergence_solved_elem(ax, s, kw={}, color="b"): + """For a single pass""" + (s / 1000).plot(ax=ax, **{**dict(c="b"), **_style_plot_conv_kw, **kw}) _style_plot_convergence(ax) # ax.set_ylim([100,None]) # ax.set_yscale("log") ax.minorticks_off() ax.grid(False) - ax.tick_params(axis='y', labelcolor=color) + ax.tick_params(axis="y", labelcolor=color) # ax.ticklabel_format(style='sci',scilimits=(0,0)) fig = ax.figure - fig.text(0.6, 0.95, 'Solved elements (1000s)', ha="center", - va="bottom", size="medium", color=color) - ax.spines['left'].set_color('r') - ax.spines['right'].set_color(color) + fig.text( + 0.6, + 0.95, + "Solved elements (1000s)", + ha="center", + va="bottom", + size="medium", + color=color, + ) + ax.spines["left"].set_color("r") + ax.spines["right"].set_color(color) def plot_convergence_f_vspass(ax, s, kw={}): - '''For a single pass''' + """For a single pass""" if s is not None: (s).plot(ax=ax, **{**_style_plot_conv_kw, **kw}) - _style_plot_convergence(ax, 'Eigenmode f vs. pass [GHz]', y_title=True) + _style_plot_convergence(ax, "Eigenmode f vs. pass [GHz]", y_title=True) legend_translucent(ax, leg_kw=dict(fontsize=6)) def plot_convergence_maxdf_vs_sol(ax, s, s2, kw={}): - ''' + """ ax, 'Max Δf %', 'Solved elements', kw for plot - ''' + """ s = s.copy() s.index = s2 (s).plot(ax=ax, **{**_style_plot_conv_kw, **kw}) - _style_plot_convergence(ax, s.name, xlabel='Solved elements', y_title=True) + _style_plot_convergence(ax, s.name, xlabel="Solved elements", y_title=True) ax.set_yscale("log") ax.set_xscale("log") @@ -90,36 +96,36 @@ def _plot_q3d_convergence_main(epr, RES): ax2 = ax.twinx() ax.cla() ax2.cla() - RES['alpha'].plot(ax=ax, c='b') - (RES['fQ']*1000).plot(ax=ax2, c='red') + RES["alpha"].plot(ax=ax, c="b") + (RES["fQ"] * 1000).plot(ax=ax2, c="red") from matplotlib import pyplot as plt - _style_plot_convergence( - ax, 'Alpha (blue), Freq (red) [MHz]', y_title=True) - ax2.set_ylabel('Frequency (MHz)', color='r') - ax.set_ylabel('Alpha(MHz)', color='b') - ax2.spines['right'].set_color('r') - ax2.tick_params(axis='y', labelcolor='r') - ax.tick_params(axis='y', labelcolor='b') + + _style_plot_convergence(ax, "Alpha (blue), Freq (red) [MHz]", y_title=True) + ax2.set_ylabel("Frequency (MHz)", color="r") + ax.set_ylabel("Alpha(MHz)", color="b") + ax2.spines["right"].set_color("r") + ax2.tick_params(axis="y", labelcolor="r") + ax.tick_params(axis="y", labelcolor="b") # legend_translucent(ax) # legend_translucent(ax2) - ax.set_xlabel('Pass') + ax.set_xlabel("Pass") fig.tight_layout() return fig def _plot_q3d_convergence_chi_f(RES): - df_chi = pd.DataFrame(RES['chi_in_MHz'].values.tolist()) - df_chi.index.name = 'Pass' - df_g = pd.DataFrame(RES['gbus'].values.tolist()) - df_g.index.name = 'Pass' + df_chi = pd.DataFrame(RES["chi_in_MHz"].values.tolist()) + df_chi.index.name = "Pass" + df_g = pd.DataFrame(RES["gbus"].values.tolist()) + df_g.index.name = "Pass" fig, axs = plt.subplots(1, 2, figsize=(9, 3.5)) df_chi.plot(lw=2, ax=axs[0]) df_g.plot(lw=2, ax=axs[1]) _style_plot_convergence(axs[0]) _style_plot_convergence(axs[1]) - axs[0].set_title(r'$\chi$ convergence (MHz)') - axs[1].set_title(r'$g$ convergence (MHz)') + axs[0].set_title(r"$\chi$ convergence (MHz)") + axs[1].set_title(r"$g$ convergence (MHz)") return fig diff --git a/pyEPR/toolbox/_logging.py b/pyEPR/toolbox/_logging.py index 4fee6ea..b359537 100644 --- a/pyEPR/toolbox/_logging.py +++ b/pyEPR/toolbox/_logging.py @@ -2,6 +2,7 @@ from .. import config + def set_up_logger(logger): # add custom stream handler logger.c_handler = logging.StreamHandler() @@ -15,5 +16,3 @@ def set_up_logger(logger): logger.c_handler.setFormatter(logger.c_format) logger.addHandler(logger.c_handler) logger.setLevel(getattr(logging, config.log.level)) - - diff --git a/pyEPR/toolbox/plotting.py b/pyEPR/toolbox/plotting.py index 5cc9a79..7479301 100644 --- a/pyEPR/toolbox/plotting.py +++ b/pyEPR/toolbox/plotting.py @@ -25,19 +25,19 @@ def mpl_dpi(dpi=200): - ''' + """ Set the matplotlib resolution for images dots per inch - ''' - mpl.rcParams['figure.dpi'] = dpi - mpl.rcParams['savefig.dpi'] = dpi + """ + mpl.rcParams["figure.dpi"] = dpi + mpl.rcParams["savefig.dpi"] = dpi def plt_cla(ax: Axes): - ''' + """ Clear all plotted objects on an axis ax : matplotlib axis - ''' + """ ax = ax if not ax is None else plt.gca() for artist in ax.lines + ax.collections + ax.patches + ax.images + ax.texts: artist.remove() @@ -46,14 +46,14 @@ def plt_cla(ax: Axes): def legend_translucent(ax: Axes, values=[], loc=0, alpha=0.5, leg_kw={}): - ''' + """ values = [ ["%.2f" %k for k in RES] ] Also, you can use the following: leg_kw = dict(fancybox =True, fontsize = 9, framealpha =0.5, ncol = 1) blah.plot().legend(**leg_kw ) - ''' + """ if ax.get_legend_handles_labels() == ([], []): return None @@ -65,18 +65,19 @@ def legend_translucent(ax: Axes, values=[], loc=0, alpha=0.5, leg_kw={}): ################################################################################# # Color cycles + def get_last_color(ax: Axes): - ''' + """ gets the color for the last plotted line use: datai.plot(label=name, marker='o') data.plot(label=name, marker='o', c=get_last_color(plt.gca())) - ''' + """ return ax.lines[-1].get_color() def get_next_color(ax: Axes): - ''' + """ To reset color cycle ax.set_prop_cycle(None) @@ -86,66 +87,74 @@ def get_next_color(ax: Axes): get_color_cycle(3) ['c', 'm', 'y', 'k']; # from cycler import cycler See also get_color_cycle - ''' - return next(ax._get_lines.prop_cycler)['color'] + """ + return next(ax._get_lines.prop_cycler)["color"] -def get_color_cycle(n, colormap=None, start=0., stop=1., format='hex'): - ''' +def get_color_cycle(n, colormap=None, start=0.0, stop=1.0, format="hex"): + """ See also get_next_color - ''' + """ colormap = colormap or default_colormap() pts = np.linspace(start, stop, n) colors = None - if format == 'hex': + if format == "hex": colors = [rgb2hex(colormap(pt)) for pt in pts] return colors def cmap_discrete(n, cmap_kw={}): - ''' Discrete colormap. - cmap_kw = dict(colormap = plt.cm.gist_earth, start = 0.05, stop = .95) - - cmap_kw - ----------------------- - helix = True, Allows us to instead call helix from here - ''' - if cmap_kw.pop('helix', False): + """Discrete colormap. + cmap_kw = dict(colormap = plt.cm.gist_earth, start = 0.05, stop = .95) + + cmap_kw + ----------------------- + helix = True, Allows us to instead call helix from here + """ + if cmap_kw.pop("helix", False): return cmap_discrete_CubeHelix(n, helix_kw=cmap_kw) - cmap_KW = dict(colormap=default_colormap(), - start=0.05, stop=.95) + cmap_KW = dict(colormap=default_colormap(), start=0.05, stop=0.95) cmap_KW.update(cmap_kw) - return get_color_cycle(n+1, **cmap_KW) + return get_color_cycle(n + 1, **cmap_KW) def cmap_discrete_CubeHelix(n, helix_kw={}): - ''' - https://github.com/jiffyclub/palettable/blob/master/demo/Cubehelix%20Demo.ipynb - cube.show_discrete_image() - - Requires palettable - ''' - from palettable import cubehelix # pylint: disable=import-error - helix_KW = dict(start_hue=240., end_hue=-300., min_sat=1., max_sat=2.5, - min_light=0.3, max_light=0.8, gamma=.9) + """ + https://github.com/jiffyclub/palettable/blob/master/demo/Cubehelix%20Demo.ipynb + cube.show_discrete_image() + + Requires palettable + """ + from palettable import cubehelix # pylint: disable=E0401 + + helix_KW = dict( + start_hue=240.0, + end_hue=-300.0, + min_sat=1.0, + max_sat=2.5, + min_light=0.3, + max_light=0.8, + gamma=0.9, + ) helix_KW.update(helix_kw) cube = cubehelix.Cubehelix.make(n=n, **helix_KW) return cube.mpl_colors -def xarr_heatmap(fg, title=None, kwheat={}, fmt=('%.3f', '%.2f'), fig=None): - ''' +def xarr_heatmap(fg, title=None, kwheat={}, fmt=("%.3f", "%.2f"), fig=None): + """ Needs seaborn and xarray - ''' + """ fig = plt.figure() if fig == None else fig df = fg.to_pandas() # format indices df.index = [float(fmt[0] % x) for x in df.index] df.columns = [float(fmt[1] % x) for x in df.columns] import seaborn as sns + ax = sns.heatmap(df, annot=True, **kwheat) ax.invert_yaxis() ax.set_title(title) @@ -153,8 +162,7 @@ def xarr_heatmap(fg, title=None, kwheat={}, fmt=('%.3f', '%.2f'), fig=None): ax.set_ylabel(fg.dims[0]) -__all__ = ['legend_translucent', 'cmap_discrete', - 'get_color_cycle', 'xarr_heatmap'] +__all__ = ["legend_translucent", "cmap_discrete", "get_color_cycle", "xarr_heatmap"] """ Jupyter widgets: diff --git a/pyEPR/toolbox/pythonic.py b/pyEPR/toolbox/pythonic.py index d3a37ae..21f97df 100644 --- a/pyEPR/toolbox/pythonic.py +++ b/pyEPR/toolbox/pythonic.py @@ -4,8 +4,12 @@ @author: Zlatko K. Minev, pyEPR ream """ -from __future__ import division, print_function, absolute_import # Python 2.7 and 3 compatibility -import platform # Which OS we run +from __future__ import ( + division, + print_function, + absolute_import, +) # Python 2.7 and 3 compatibility +import platform # Which OS we run import numpy as np import pandas as pd import warnings @@ -20,8 +24,9 @@ # Utility functions # ============================================================================== + def combinekw(kw1, kw2): - ''' Copy kw1, update with kw2, return result ''' + """Copy kw1, update with kw2, return result""" kw = kw1.copy() kw.update(kw2) return kw @@ -44,26 +49,26 @@ def isfloat(value): def floor_10(x): - ''' round to nearest lower power of 10 c''' - return 10.**(np.floor(np.log10(x))) + """round to nearest lower power of 10 c""" + return 10.0 ** (np.floor(np.log10(x))) def fact(n): - ''' Factorial ''' + """Factorial""" if n <= 1: return 1 - return n * fact(n-1) + return n * fact(n - 1) def nck(n, k): - ''' choose ''' - return fact(n)/(fact(k)*fact(n-k)) + """choose""" + return fact(n) / (fact(k) * fact(n - k)) def get_above_diagonal(M): - ''' extract the values that are above the diagonal. - Assumes square matrix - ''' + """extract the values that are above the diagonal. + Assumes square matrix + """ return M[np.triu_indices(M.shape[0], k=1)] @@ -76,30 +81,30 @@ def df_find_index(s: pd.Series, find, degree=2, ax=False): min_ = min(s.index.values) if find <= max_ and find >= min_: # interpolate - z = pd.Series(list(s.index.values)+[np.NaN], index=list(s) + [find]) + z = pd.Series(list(s.index.values) + [np.NaN], index=list(s) + [find]) z = z.sort_index() z = z.interpolate() return z[find], z else: - print('extrapolating') + print("extrapolating") z = pd.Series(list(s.index.values), index=list(s)) p = df_extrapolate(z, degree=degree, ax=False) value = p(find) return value, p -def df_interpolate_value(s: pd.Series, find, ax=False, method='index'): +def df_interpolate_value(s: pd.Series, find, ax=False, method="index"): """ Given a Pandas Series such as of freq with index Lj, find the freq that would correspond to Lj given a value not in the index """ - z = pd.Series(list(s) + [np.NaN], index=list(s.index.values)+[find]) + z = pd.Series(list(s) + [np.NaN], index=list(s.index.values) + [find]) z = z.sort_index() z = z.interpolate(method=method) return z[find], z -def df_extrapolate(s, degree=2, ax=False, rng_scale=2.): +def df_extrapolate(s, degree=2, ax=False, rng_scale=2.0): """ For a pandas series @@ -115,16 +120,17 @@ def df_extrapolate(s, degree=2, ax=False, rng_scale=2.): max_ = max(s.index.values) min_ = min(s.index.values) rng = max_ - min_ - xp = np.linspace(min_-rng_scale*rng, max_+rng_scale*rng, 100) + xp = np.linspace(min_ - rng_scale * rng, max_ + rng_scale * rng, 100) ys = p(xp) ax.plot(xp, ys) - s.plot(marker='o', ax=ax) + s.plot(marker="o", ax=ax) return p -def df_regress_value(s: pd.Series, index, degree=2, ax=False, method='index', - rng_scale=2.): +def df_regress_value( + s: pd.Series, index, degree=2, ax=False, method="index", rng_scale=2.0 +): """ for pandas series. calls either df_interpolate_value or df_extrapolate @@ -147,7 +153,7 @@ def series_of_1D_dict_to_multi_df(Uj_ind: pd.Series): def sort_df_col(df): - ''' sort by numerical int order ''' + """sort by numerical int order""" return df.sort_index(axis=1) # Buggy code, doesn't handles ints as inputs or floats as inputs @@ -162,7 +168,7 @@ def sort_df_col(df): def sort_Series_idx(sr): - ''' sort by numerical int order ''' + """sort by numerical int order""" idx_names = sr.index if np.all(idx_names.map(isint)): return sr[idx_names.astype(int).sort_values().astype(str)] @@ -175,9 +181,9 @@ def sort_Series_idx(sr): def get_instance_vars(obj, Forbidden=[]): VARS = {} for v in dir(obj): - if not (v.startswith('__') or v.startswith('_')): + if not (v.startswith("__") or v.startswith("_")): if not callable(getattr(obj, v)): - # Added for using addict.Dict which is not callable. + # Added for using addict.Dict which is not callable. if not isinstance(getattr(obj, v), Dict): if not (v in Forbidden): VARS[v] = getattr(obj, v) @@ -188,12 +194,17 @@ def deprecated(func): """This is a decorator which can be used to mark functions as deprecated. It will result in a warning being emitted when the function is used. See StackExchange""" + def newFunc(*args, **kwargs): - warnings.simplefilter('always', DeprecationWarning) # turn off filter - warnings.warn("Call to deprecated function {}.".format( - func.__name__), category=DeprecationWarning, stacklevel=2) - warnings.simplefilter('default', DeprecationWarning) # reset filter + warnings.simplefilter("always", DeprecationWarning) # turn off filter + warnings.warn( + "Call to deprecated function {}.".format(func.__name__), + category=DeprecationWarning, + stacklevel=2, + ) + warnings.simplefilter("default", DeprecationWarning) # reset filter return func(*args, **kwargs) + newFunc.__name__ = func.__name__ newFunc.__doc__ = func.__doc__ newFunc.__dict__.update(func.__dict__) @@ -201,7 +212,7 @@ def newFunc(*args, **kwargs): def info_str_platform(): - return ''' + return """ System platform information: @@ -218,7 +229,7 @@ def info_str_platform(): version : %s (implem: %s) compiler : %s - ''' % ( + """ % ( platform.system(), platform.node(), platform.release(), @@ -226,8 +237,10 @@ def info_str_platform(): platform.processor(), platform.platform(), platform.version(), - platform.python_version(), platform.python_implementation(), - platform.python_compiler()) + platform.python_version(), + platform.python_implementation(), + platform.python_compiler(), + ) # ============================================================================== @@ -238,36 +251,36 @@ def info_str_platform(): def print_matrix(M, frmt="{:7.2f}", append_row=""): M = np.mat(M) for row in np.array(M.tolist()): - print(' ', end='') + print(" ", end="") for chi in row: - print(frmt.format(chi), end='') - print(append_row+"\n", end='') + print(frmt.format(chi), end="") + print(append_row + "\n", end="") -def divide_diagonal_by_2(CHI0, div_fact=2.): +def divide_diagonal_by_2(CHI0, div_fact=2.0): CHI = CHI0.copy() CHI[np.diag_indices_from(CHI)] /= div_fact return CHI def print_NoNewLine(text): - print((text), end='') + print((text), end="") def print_color(text, style=0, fg=24, bg=43, newline=True): - '''For newer, see pc (or Print_colors) - style 0..8; fg 30..38; bg 40..48 - ''' - format = ';'.join([str(style), str(fg), str(bg)]) - s = '\x1b[%sm %s \x1b[0m' % (format, text) + """For newer, see pc (or Print_colors) + style 0..8; fg 30..38; bg 40..48 + """ + format = ";".join([str(style), str(fg), str(bg)]) + s = "\x1b[%sm %s \x1b[0m" % (format, text) if newline: print(s) else: - print(s, end='') + print(s, end="") class Print_colors: - '''Colors class:reset all colors with colors.reset; two + """Colors class:reset all colors with colors.reset; two sub classes fg for foreground and bg for background; use as colors.subclass.colorname. i.e. colors.fg.red or colors.bg.green also, the generic bold, disable, @@ -279,41 +292,42 @@ class Print_colors: ..codeblock python print(colors.bg.green, "adgd", colors.fg.red, "dsgdsg") print(colors.bg.lightgrey, "dsgsd", colors.fg.red, "sdgsd") - ''' - reset = '\033[0m' - bold = '\033[01m' - disable = '\033[02m' - underline = '\033[04m' - reverse = '\033[07m' - strikethrough = '\033[09m' - invisible = '\033[08m' + """ + + reset = "\033[0m" + bold = "\033[01m" + disable = "\033[02m" + underline = "\033[04m" + reverse = "\033[07m" + strikethrough = "\033[09m" + invisible = "\033[08m" class fg: - black = '\033[30m' - red = '\033[31m' - green = '\033[32m' - orange = '\033[33m' - blue = '\033[34m' - purple = '\033[35m' - cyan = '\033[36m' - lightgrey = '\033[37m' - darkgrey = '\033[90m' - lightred = '\033[91m' - lightgreen = '\033[92m' - yellow = '\033[93m' - lightblue = '\033[94m' - pink = '\033[95m' - lightcyan = '\033[96m' + black = "\033[30m" + red = "\033[31m" + green = "\033[32m" + orange = "\033[33m" + blue = "\033[34m" + purple = "\033[35m" + cyan = "\033[36m" + lightgrey = "\033[37m" + darkgrey = "\033[90m" + lightred = "\033[91m" + lightgreen = "\033[92m" + yellow = "\033[93m" + lightblue = "\033[94m" + pink = "\033[95m" + lightcyan = "\033[96m" class bg: - black = '\033[40m' - red = '\033[41m' - green = '\033[42m' - orange = '\033[43m' - blue = '\033[44m' - purple = '\033[45m' - cyan = '\033[46m' - lightgrey = '\033[47m' + black = "\033[40m" + red = "\033[41m" + green = "\033[42m" + orange = "\033[43m" + blue = "\033[44m" + purple = "\033[45m" + cyan = "\033[46m" + lightgrey = "\033[47m" pc = Print_colors @@ -324,14 +338,14 @@ class bg: def DataFrame_col_diff(PS, indx=0): - ''' check weather the columns of a dataframe are equal, - returns a T/F series of the row index that specifies which rows are different - USE: - PS[DataFrame_col_diff(PS)] - ''' + """check weather the columns of a dataframe are equal, + returns a T/F series of the row index that specifies which rows are different + USE: + PS[DataFrame_col_diff(PS)] + """ R = [] - for i in range(PS.shape[1]-1): - R += [PS.iloc[:, i] == PS.iloc[:, i+1]] + for i in range(PS.shape[1] - 1): + R += [PS.iloc[:, i] == PS.iloc[:, i + 1]] if len(R) == 1: return np.logical_not(R[0]) else: @@ -339,15 +353,16 @@ def DataFrame_col_diff(PS, indx=0): def DataFrame_display_side_by_side(*args, do_display=True): - ''' + """ from pyEPR.toolbox.pythonic import display_dfs https://stackoverflow.com/questions/38783027/jupyter-notebook-display-two-pandas-tables-side-by-side - ''' + """ from IPython.display import display_html - html_str = '' + + html_str = "" for df in args: html_str += df.to_html() - text = html_str.replace('table', 'table style="display:inline"') + text = html_str.replace("table", 'table style="display:inline"') if do_display: display_html(text, raw=True) return text @@ -357,32 +372,45 @@ def DataFrame_display_side_by_side(*args, do_display=True): def xarray_unravel_levels(arr, names, my_convert=lambda x: x): - ''' Takes in nested dict of dict of dataframes - names : names of lists; you dont have to include the last two dataframe columns & rows, but you can to override them - requires xarray - ''' + """Takes in nested dict of dict of dataframes + names : names of lists; you dont have to include the last two dataframe columns & rows, but you can to override them + requires xarray + """ import xarray # pylint: disable=import-error + if type(arr) == pd.DataFrame: return xarray.DataArray(arr, dims=None if len(names) == 0 else names) elif type(arr) in [OrderedDict, dict]: - return xarray.concat([xarray_unravel_levels(item, names[1:]) for k, item in arr.items()], pd.Index(arr.keys(), name=names[0])) + return xarray.concat( + [xarray_unravel_levels(item, names[1:]) for k, item in arr.items()], + pd.Index(arr.keys(), name=names[0]), + ) elif type(arr) == xarray.DataArray: return arr else: return my_convert(arr) -def robust_percentile(calc_data, ROBUST_PERCENTILE=2.): - ''' - analysis helper function - ''' +def robust_percentile(calc_data, ROBUST_PERCENTILE=2.0): + """ + analysis helper function + """ vmin = np.percentile(calc_data, ROBUST_PERCENTILE) vmax = np.percentile(calc_data, 100 - ROBUST_PERCENTILE) return vmin, vmax -__all__ = ['fact', 'nck', 'combinekw', - 'divide_diagonal_by_2', 'df_find_index', - 'sort_df_col', 'sort_Series_idx', - 'print_matrix', 'print_NoNewLine', - 'DataFrame_col_diff', 'xarray_unravel_levels', 'robust_percentile'] +__all__ = [ + "fact", + "nck", + "combinekw", + "divide_diagonal_by_2", + "df_find_index", + "sort_df_col", + "sort_Series_idx", + "print_matrix", + "print_NoNewLine", + "DataFrame_col_diff", + "xarray_unravel_levels", + "robust_percentile", +] diff --git a/readthedocs.yml b/readthedocs.yml deleted file mode 100644 index 3178110..0000000 --- a/readthedocs.yml +++ /dev/null @@ -1,19 +0,0 @@ -# .readthedocs.yml - -# Required -version: 2 - -# Build documentation in the docs/source/ directory with Sphinx -sphinx: - configuration: docs/source/conf.py - -# Optionally build your docs in additional formats such as PDF and ePub -formats: - - pdf - -# Optionally set the version of Python and requirements required to build your docs -python: - version: 3.8 - install: - - requirements: requirements.txt - - path: . diff --git a/scripts/Alec/11ghz/EPR_test.py b/scripts/Alec/11ghz/EPR_test.py index 5526848..eadc534 100644 --- a/scripts/Alec/11ghz/EPR_test.py +++ b/scripts/Alec/11ghz/EPR_test.py @@ -9,30 +9,46 @@ if 0: # Specify the HFSS project to be analyzed - project_info = ProjectInfo(r"C:\Users\awe4\Documents\Backed\hfss_simulations\11ghz\\") - project_info.project_name = '11ghz_alec' # Name of the project file (string). "None" will get the current active one. - project_info.design_name = '11ghz_design1' # Name of the design file (string). "None" will get the current active one. - project_info.setup_name = None # Name of the setup(string). "None" will get the current active one. - - project_info.junctions['bot_junc'] = {'rect':'bot_junction', 'line': 'bot_junc_line', 'Lj_variable':'bot_lj', 'length':0.0001} - project_info.junctions['top_junc'] = {'rect':'top_junction', 'line': 'top_junc_line', 'Lj_variable':'top_lj', 'length':0.0001} - - project_info.dissipative['dielectric_surfaces'] = None # supply names here, there are more options in project_info.dissipative. - - epr_hfss = DistributedAnalysis(project_info) + project_info = ProjectInfo( + r"C:\Users\awe4\Documents\Backed\hfss_simulations\11ghz\\" + ) + project_info.project_name = "11ghz_alec" # Name of the project file (string). "None" will get the current active one. + project_info.design_name = "11ghz_design1" # Name of the design file (string). "None" will get the current active one. + project_info.setup_name = ( + None # Name of the setup(string). "None" will get the current active one. + ) + + project_info.junctions["bot_junc"] = { + "rect": "bot_junction", + "line": "bot_junc_line", + "Lj_variable": "bot_lj", + "length": 0.0001, + } + project_info.junctions["top_junc"] = { + "rect": "top_junction", + "line": "top_junc_line", + "Lj_variable": "top_lj", + "length": 0.0001, + } + + project_info.dissipative[ + "dielectric_surfaces" + ] = None # supply names here, there are more options in project_info.dissipative. + + epr_hfss = DistributedAnalysis(project_info) epr_hfss.do_EPR_analysis() #%% if 1: - epr = QuantumAnalysis(epr_hfss.data_filename) # Analysis results - #result = epr.analyze_variation('1', cos_trunc = 8, fock_trunc = 7) - epr.analyze_all_variations(cos_trunc = 10, fock_trunc = 7); + epr = QuantumAnalysis(epr_hfss.data_filename) # Analysis results + # result = epr.analyze_variation('1', cos_trunc = 8, fock_trunc = 7) + epr.analyze_all_variations(cos_trunc=10, fock_trunc=7) #%% if 1: - PM = OrderedDict() - for n in range(3,10): - epr.analyze_all_variations(cos_trunc = 10, fock_trunc = n); - PM[n] = epr.results['0']['chi_ND'] + PM = OrderedDict() + for n in range(3, 10): + epr.analyze_all_variations(cos_trunc=10, fock_trunc=n) + PM[n] = epr.results["0"]["chi_ND"] - {k:v[0][0] for k,v in PM.items()} \ No newline at end of file + {k: v[0][0] for k, v in PM.items()} diff --git a/scripts/Alec/7ghz/7ghz_pyEPR.py b/scripts/Alec/7ghz/7ghz_pyEPR.py index 79484c5..8fc1b03 100644 --- a/scripts/Alec/7ghz/7ghz_pyEPR.py +++ b/scripts/Alec/7ghz/7ghz_pyEPR.py @@ -10,25 +10,39 @@ if 1: # Specify the HFSS project to be analyzed project_info = ProjectInfo(r"C:\Users\awe4\Documents\Simulations\HFSS\11ghz\\") - project_info.project_name = '2017_08_Zlatko_Shyam_AutStab' # Name of the project file (string). "None" will get the current active one. - project_info.design_name = 'pyEPR_2_chips' # Name of the design file (string). "None" will get the current active one. - project_info.setup_name = None # Name of the setup(string). "None" will get the current active one. + project_info.project_name = "2017_08_Zlatko_Shyam_AutStab" # Name of the project file (string). "None" will get the current active one. + project_info.design_name = "pyEPR_2_chips" # Name of the design file (string). "None" will get the current active one. + project_info.setup_name = ( + None # Name of the setup(string). "None" will get the current active one. + ) ## Describe the junctions in the HFSS design - project_info.junctions['jAlice'] = {'rect':'qubitAlice', 'line': 'alice_line', 'Lj_variable':'LJAlice', 'length':0.0001} - project_info.junctions['jBob'] = {'rect':'qubitBob', 'line': 'bob_line', 'Lj_variable':'LJBob', 'length':0.0001} + project_info.junctions["jAlice"] = { + "rect": "qubitAlice", + "line": "alice_line", + "Lj_variable": "LJAlice", + "length": 0.0001, + } + project_info.junctions["jBob"] = { + "rect": "qubitBob", + "line": "bob_line", + "Lj_variable": "LJBob", + "length": 0.0001, + } # Dissipative elements EPR - project_info.dissipative['dielectric_surfaces'] = None # supply names here, there are more options in project_info.dissipative. + project_info.dissipative[ + "dielectric_surfaces" + ] = None # supply names here, there are more options in project_info.dissipative. # Run analysis - epr_hfss = DistributedAnalysis(project_info) + epr_hfss = DistributedAnalysis(project_info) epr_hfss.do_EPR_analysis() -if 0: # Hamiltonian analysis +if 0: # Hamiltonian analysis filename = epr_hfss.data_filename - #filename = r'C:\\Users\\rslqulab\\Desktop\\zkm\\2017_pyEPR_data\\\\/2017_08_Zlatko_Shyam_AutStab/2 pyEPR/2 pyEPR_20170825_170550.hdf5' - epr = QuantumAnalysis(filename) + # filename = r'C:\\Users\\rslqulab\\Desktop\\zkm\\2017_pyEPR_data\\\\/2017_08_Zlatko_Shyam_AutStab/2 pyEPR/2 pyEPR_20170825_170550.hdf5' + epr = QuantumAnalysis(filename) epr._renorm_pj = False - #result = epr.analyze_variation('1', cos_trunc = 8, fock_trunc = 7) - epr.analyze_all_variations(cos_trunc = 8, fock_trunc = 7) + # result = epr.analyze_variation('1', cos_trunc = 8, fock_trunc = 7) + epr.analyze_all_variations(cos_trunc=8, fock_trunc=7) epr.plot_hamiltonian_results() diff --git a/scripts/Kaicheng/import_pyEPR.py b/scripts/Kaicheng/import_pyEPR.py index 8746539..e84e326 100644 --- a/scripts/Kaicheng/import_pyEPR.py +++ b/scripts/Kaicheng/import_pyEPR.py @@ -10,29 +10,38 @@ if 0: # Specify the HFSS project to be analyzed project_info = ProjectInfo(r"X:\Simulation\\hfss\\KC\\") - project_info.project_name = '2013-12-03_9GHzCavity' # Name of the project file (string). "None" will get the current active one. - project_info.design_name = '9GHz_EM_center_SNAIL' # Name of the design file (string). "None" will get the current active one. - project_info.setup_name = None # Name of the setup(string). "None" will get the current active one. + project_info.project_name = "2013-12-03_9GHzCavity" # Name of the project file (string). "None" will get the current active one. + project_info.design_name = "9GHz_EM_center_SNAIL" # Name of the design file (string). "None" will get the current active one. + project_info.setup_name = ( + None # Name of the setup(string). "None" will get the current active one. + ) ## Describe the junctions in the HFSS design - project_info.junctions['snail'] = {'rect':'qubit', 'line': 'JunctionLine', 'Lj_variable':'LJ', 'length':0.0001} -# project_info.junctions['jBob'] = {'rect':'qubitBob', 'line': 'bob_line', 'Lj_variable':'LJBob', 'length':0.0001} + project_info.junctions["snail"] = { + "rect": "qubit", + "line": "JunctionLine", + "Lj_variable": "LJ", + "length": 0.0001, + } + # project_info.junctions['jBob'] = {'rect':'qubitBob', 'line': 'bob_line', 'Lj_variable':'LJBob', 'length':0.0001} # Dissipative elements EPR - project_info.dissipative['dielectric_surfaces'] = None # supply names here, there are more options in project_info.dissipative. + project_info.dissipative[ + "dielectric_surfaces" + ] = None # supply names here, there are more options in project_info.dissipative. # Run analysis - epr_hfss = DistributedAnalysis(project_info) - epr_hfss.do_EPR_analysis() #variations = ['1', '70'] + epr_hfss = DistributedAnalysis(project_info) + epr_hfss.do_EPR_analysis() # variations = ['1', '70'] -if 1: # Hamiltonian analysis -# filename = epr_hfss.data_filename - filename = r'X:\Simulation\hfss\KC\pyEPR_results_2018\2013-12-03_9GHzCavity\9GHz_EM_center_SNAIL\9GHz_EM_center_SNAIL_20180726_170049.hdf5' - #filename = r'C:\\Users\\rslqulab\\Desktop\\zkm\\2017_pyEPR_data\\\\/2017_08_Zlatko_Shyam_AutStab/2 pyEPR/2 pyEPR_20170825_170550.hdf5' - epr = QuantumAnalysis(filename) +if 1: # Hamiltonian analysis + # filename = epr_hfss.data_filename + filename = r"X:\Simulation\hfss\KC\pyEPR_results_2018\2013-12-03_9GHzCavity\9GHz_EM_center_SNAIL\9GHz_EM_center_SNAIL_20180726_170049.hdf5" + # filename = r'C:\\Users\\rslqulab\\Desktop\\zkm\\2017_pyEPR_data\\\\/2017_08_Zlatko_Shyam_AutStab/2 pyEPR/2 pyEPR_20170825_170550.hdf5' + epr = QuantumAnalysis(filename) - #result = epr.analyze_variation('1', cos_trunc = 8, fock_trunc = 7) - epr.analyze_all_variations(cos_trunc = None, fock_trunc = 4) # only quadratic part + # result = epr.analyze_variation('1', cos_trunc = 8, fock_trunc = 7) + epr.analyze_all_variations(cos_trunc=None, fock_trunc=4) # only quadratic part epr.plot_hamiltonian_results() if 1: @@ -40,5 +49,5 @@ f1 = epr.results.get_frequencies_O1() chi = epr.results.get_chi_O1() mode_idx = list(f0.index) - nmodes = len(mode_idx) - cmap = cmap_discrete(nmodes) + nmodes = len(mode_idx) + cmap = cmap_discrete(nmodes) diff --git a/scripts/hanhee/run_vs_pass.py b/scripts/hanhee/run_vs_pass.py index 12aee7a..ecbb6a0 100644 --- a/scripts/hanhee/run_vs_pass.py +++ b/scripts/hanhee/run_vs_pass.py @@ -9,37 +9,44 @@ from pyEPR import ProjectInfo, DistributedAnalysis, QuantumAnalysis # 1. Project and design. Open link to HFSS controls. -project_info = ProjectInfo('D:\LOGIQ-IBMQ\Cranes\HFSS simulation\\', - project_name = '2018-12-03 Zlatko pyEPR', # Project file name (string). "None" will get the current active one. - design_name = 'L-4 bus-EPR' # Design name (string). "None" will get the current active one. - ) +project_info = ProjectInfo( + "D:\LOGIQ-IBMQ\Cranes\HFSS simulation\\", + project_name="2018-12-03 Zlatko pyEPR", # Project file name (string). "None" will get the current active one. + design_name="L-4 bus-EPR", # Design name (string). "None" will get the current active one. +) # 2a. Junctions. Specify junctions in HFSS model -for i in range(1,3+1): # specify N number of junctions - i=str(i) - project_info.junctions['j'+i] = {'Lj_variable':'Lj'+i, 'rect':'Qubit'+i, 'line': 'Polyline'+i, 'length':30*10**-6} +for i in range(1, 3 + 1): # specify N number of junctions + i = str(i) + project_info.junctions["j" + i] = { + "Lj_variable": "Lj" + i, + "rect": "Qubit" + i, + "line": "Polyline" + i, + "length": 30 * 10**-6, + } # 2b. Dissipative elements. -#project_info.dissipative['dielectrics_bulk'] = ['subs_Q1'] # supply names here, there are more options in project_info.dissipative. -#project_info.dissipative['dielectric_surfaces'] = ['interface'] +# project_info.dissipative['dielectrics_bulk'] = ['subs_Q1'] # supply names here, there are more options in project_info.dissipative. +# project_info.dissipative['dielectric_surfaces'] = ['interface'] #%% # 3. Run analysis if 1: - passes = range(1,20,1) + passes = range(1, 20, 1) epr_hfss = DistributedAnalysis(project_info) # CLEAR DATA # if not 'RES' in locals(): from collections import OrderedDict + RES = OrderedDict() #%%% setup_name = None -design = epr_hfss.design +design = epr_hfss.design setup_name = setup_name if not (setup_name is None) else design.get_setup_names()[0] -setup = design.get_setup(setup_name) -print(' HFSS setup name: %s' % setup_name) +setup = design.get_setup(setup_name) +print(" HFSS setup name: %s" % setup_name) #%% from numpy import diag, sqrt, array @@ -47,127 +54,187 @@ from pyEPR.toolbox import get_above_diagonal -def do_analysis(pass_, variation = '0'): +def do_analysis(pass_, variation="0"): epr_hfss.do_EPR_analysis(variations=[variation]) RES[pass_] = OrderedDict() epr = QuantumAnalysis(epr_hfss.data_filename) - RES[pass_]['epr'] = epr - #epr = RES[pass_]['epr'] - RES[pass_]['freq_hfss'] = epr.freqs_hfss[variation] - RES[pass_]['Pmj_raw'] = epr.PM[variation] + RES[pass_]["epr"] = epr + # epr = RES[pass_]['epr'] + RES[pass_]["freq_hfss"] = epr.freqs_hfss[variation] + RES[pass_]["Pmj_raw"] = epr.PM[variation] ## NORMED - dum = epr.get_Pmj('0') - RES[pass_]['Pmj_normed'] = dum['PJ'] # DataFrame - RES[pass_]['_Pmj_norm'] = dum['Pm_norm'] # Series + dum = epr.get_Pmj("0") + RES[pass_]["Pmj_normed"] = dum["PJ"] # DataFrame + RES[pass_]["_Pmj_norm"] = dum["Pm_norm"] # Series - RES[pass_]['mats'] = epr.get_matrices(variation, print_=False) # arrays - # PJ, SJ, Om, EJ, PHI_zpf - RES[pass_]['Hres'] =Hres = zkm_get_Hparams(*RES[pass_]['mats']) - RES[pass_]['alpha'] = Hres['alpha'] # easy access - RES[pass_]['chi'] = Hres['chi'] + RES[pass_]["mats"] = epr.get_matrices(variation, print_=False) # arrays + # PJ, SJ, Om, EJ, PHI_zpf + RES[pass_]["Hres"] = Hres = zkm_get_Hparams(*RES[pass_]["mats"]) + RES[pass_]["alpha"] = Hres["alpha"] # easy access + RES[pass_]["chi"] = Hres["chi"] ### Numerical diagonalization - #RES[pass_]['ND'] = None + # RES[pass_]['ND'] = None if 1: - print(' ND pass=%s variation=%s' % (pass_, variation)) + print(" ND pass=%s variation=%s" % (pass_, variation)) from pyEPR.core import pyEPR_ND - f1_ND, CHI_ND = pyEPR_ND(epr.freqs_hfss[variation], epr.Ljs[variation], - RES[pass_]['mats'][-1], # PHI_zpf - cos_trunc=10, fock_trunc=9) - RES[pass_]['ND'] = {} - RES[pass_]['ND']['f01'] = f1_ND - RES[pass_]['ND']['CHI'] = CHI_ND + + f1_ND, CHI_ND = pyEPR_ND( + epr.freqs_hfss[variation], + epr.Ljs[variation], + RES[pass_]["mats"][-1], # PHI_zpf + cos_trunc=10, + fock_trunc=9, + ) + RES[pass_]["ND"] = {} + RES[pass_]["ND"]["f01"] = f1_ND + RES[pass_]["ND"]["CHI"] = CHI_ND return Hres + def zkm_get_Hparams(PJ, SJ, Om, EJ, PHI): - ''' - Report all in MHz - ''' + """ + Report all in MHz + """ M, J = PJ.shape - res = {'alpha_p4R2_p6R1':[], # {p=4,RWA=2} + {p=6,RWA2} - 'chi':{}, # {p=4,RWA=1} - 'omega_zx':{}} - - res['alpha_p4R2_p6R1'] = \ - [1000 * sum([ \ - PJ[m,j]**2 *(Om[m,m]**2) / (8.*EJ[j,j]) * \ - ( 1. + PJ[m,j] * Om[m,m] / EJ[j,j] * (17./32.* PJ[m,j] - 0.25) ) - for j in range(J)]) # MHz - for m in range(M) ] - - res['alpha_zpf'] = \ - [1000 * sum([ 0.5*EJ[j,j] * PHI[m,j]**4 + 2.*306./Om[m,m]*(EJ[j,j]/24.)**2 * PHI[m,j]**8 - 0.25*EJ[j,j] * PHI[m,j]**6 - for j in range(J)]) # MHz - for m in range(M) ] # fully equivalent, checked + res = { + "alpha_p4R2_p6R1": [], # {p=4,RWA=2} + {p=6,RWA2} + "chi": {}, # {p=4,RWA=1} + "omega_zx": {}, + } + + res["alpha_p4R2_p6R1"] = [ + 1000 + * sum( + [ + PJ[m, j] ** 2 + * (Om[m, m] ** 2) + / (8.0 * EJ[j, j]) + * ( + 1.0 + + PJ[m, j] * Om[m, m] / EJ[j, j] * (17.0 / 32.0 * PJ[m, j] - 0.25) + ) + for j in range(J) + ] + ) # MHz + for m in range(M) + ] + + res["alpha_zpf"] = [ + 1000 + * sum( + [ + 0.5 * EJ[j, j] * PHI[m, j] ** 4 + + 2.0 * 306.0 / Om[m, m] * (EJ[j, j] / 24.0) ** 2 * PHI[m, j] ** 8 + - 0.25 * EJ[j, j] * PHI[m, j] ** 6 + for j in range(J) + ] + ) # MHz + for m in range(M) + ] # fully equivalent, checked ### Cross-Kerr for m in range(M): for m1 in range(m): - res['chi']['%d,%d'%(m1,m)] = 1000.*sum([ - EJ[j,j] * PHI[m1,j]**2 * PHI[m,j]**2 - for j in range(J)]) + res["chi"]["%d,%d" % (m1, m)] = 1000.0 * sum( + [EJ[j, j] * PHI[m1, j] ** 2 * PHI[m, j] ** 2 for j in range(J)] + ) - res['chi_zpf'] = {} # debug only + res["chi_zpf"] = {} # debug only for m in range(M): for m1 in range(m): - res['chi_zpf']['%d,%d'%(m1,m)] = 1000.*sum([ - Om[m1,m1]*Om[m,m]*PJ[m1,j]*PJ[m,j]/(4.*EJ[j,j]) - for j in range(J)]) # fully equivalent, checked - - res['chi2'] = {} # higher order correction + res["chi_zpf"]["%d,%d" % (m1, m)] = 1000.0 * sum( + [ + Om[m1, m1] * Om[m, m] * PJ[m1, j] * PJ[m, j] / (4.0 * EJ[j, j]) + for j in range(J) + ] + ) # fully equivalent, checked + + res["chi2"] = {} # higher order correction for m in range(M): for m1 in range(m): - res['chi2']['%d,%d'%(m1,m)] = 1000.*sum([ # TODO: CHECK AND SIGN - (EJ[j,j]/24.)**2*(\ - 864.*PHI[m1,j]**6 *PHI[m,j]**2 / ( +Om[m1,m1] - Om[m,m])\ - -864.*PHI[m1,j]**2 *PHI[m,j]**6 / ( -Om[m1,m1] + Om[m,m])\ - -576.*PHI[m1,j]**6 *PHI[m,j]**2 / ( Om[m1,m1] )\ - -576.*PHI[m1,j]**2 *PHI[m,j]**6 / ( Om[m,m] )\ - -576.*PHI[m1,j]**4 *PHI[m,j]**4 / ( Om[m1,m1] + Om[m,m] )\ - +288.*PHI[m1,j]**2 *PHI[m,j]**6 / ( Om[m1,m1] -3*Om[m,m] )\ - +288.*PHI[m1,j]**6 *PHI[m,j]**2 / ( Om[m,m] -3*Om[m1,m1] )\ - ) - for j in range(J)]) + res["chi2"]["%d,%d" % (m1, m)] = 1000.0 * sum( + [ # TODO: CHECK AND SIGN + (EJ[j, j] / 24.0) ** 2 + * ( + 864.0 + * PHI[m1, j] ** 6 + * PHI[m, j] ** 2 + / (+Om[m1, m1] - Om[m, m]) + - 864.0 + * PHI[m1, j] ** 2 + * PHI[m, j] ** 6 + / (-Om[m1, m1] + Om[m, m]) + - 576.0 * PHI[m1, j] ** 6 * PHI[m, j] ** 2 / (Om[m1, m1]) + - 576.0 * PHI[m1, j] ** 2 * PHI[m, j] ** 6 / (Om[m, m]) + - 576.0 + * PHI[m1, j] ** 4 + * PHI[m, j] ** 4 + / (Om[m1, m1] + Om[m, m]) + + 288.0 + * PHI[m1, j] ** 2 + * PHI[m, j] ** 6 + / (Om[m1, m1] - 3 * Om[m, m]) + + 288.0 + * PHI[m1, j] ** 6 + * PHI[m, j] ** 2 + / (Om[m, m] - 3 * Om[m1, m1]) + ) + for j in range(J) + ] + ) ### CR Gate - analytical {p=4, RWA1} - res['omega_zx'] = {} + res["omega_zx"] = {} for m in range(M): for m1 in range(m): if m != m1: - res['omega_zx']['%d,%d'%(m,m1)] = 1000.* abs( sum([ - EJ[j,j] * PHI[m,j]**3 * PHI[m1,j] - for j in range(J)])) # fully equivalent, checked - - res['alpha'] = res['alpha_p4R2_p6R1'] # MHz - res['f01'] = 1000*diag(Om) - res['alpha'] - \ - [ sum([res['chi']['%d,%d'% (min(m1,m),max(m1,m))] \ - for m1 in range(M) if m1 != m])\ - for m in range(M)] - # MHz -kerrs .. todo # f_01 frequency + res["omega_zx"]["%d,%d" % (m, m1)] = 1000.0 * abs( + sum([EJ[j, j] * PHI[m, j] ** 3 * PHI[m1, j] for j in range(J)]) + ) # fully equivalent, checked + + res["alpha"] = res["alpha_p4R2_p6R1"] # MHz + res["f01"] = ( + 1000 * diag(Om) + - res["alpha"] + - [ + sum( + [ + res["chi"]["%d,%d" % (min(m1, m), max(m1, m))] + for m1 in range(M) + if m1 != m + ] + ) + for m in range(M) + ] + ) + # MHz -kerrs .. todo # f_01 frequency return res -if 0: # update all passes + +if 0: # update all passes for pass_ in RES.keys(): do_analysis(pass_) -#res = do_analysis(pass_) -#pd.Series(res['alpha']) +# res = do_analysis(pass_) +# pd.Series(res['alpha']) #%% def do_plot(RES): - ''' - Make sure - %matplotlib qt - TODO: in future just setup once, and then update lines only - ''' + """ + Make sure + %matplotlib qt + TODO: in future just setup once, and then update lines only + """ # live plot https://stackoverflow.com/questions/11874767/how-do-i-plot-in-real-time-in-a-while-loop-using-matplotlib # see also pylive import matplotlib - matplotlib.use('Qt5Agg') + + matplotlib.use("Qt5Agg") import matplotlib.pyplot as plt import pandas as pd import numpy as np @@ -176,146 +243,156 @@ def do_plot(RES): plt.ion() - fig = plt.figure(1, figsize=(25,10)) + fig = plt.figure(1, figsize=(25, 10)) fig.clf() - fig, axs = plt.subplots(2, 3, subplot_kw=dict(), num=1,sharex=True) + fig, axs = plt.subplots(2, 3, subplot_kw=dict(), num=1, sharex=True) - kw=dict(marker='o') + kw = dict(marker="o") leg_kw = dict(fontsize=9, ncol=1) # Frequency - ax = axs[0,0] - df = pd.DataFrame({x:1000.*RES[x]['freq_hfss'] for x in RES}).transpose() - df.plot(ax=ax,**kw) - ax.set_title('Linear mode, HFSS frequency $\omega_m/2\pi$ and dressed (MHz)') - legend_translucent(ax, leg_kw=combinekw(leg_kw,dict(title='Mode #'))) #ax.legend(title= 'Mode #') + ax = axs[0, 0] + df = pd.DataFrame({x: 1000.0 * RES[x]["freq_hfss"] for x in RES}).transpose() + df.plot(ax=ax, **kw) + ax.set_title("Linear mode, HFSS frequency $\omega_m/2\pi$ and dressed (MHz)") + legend_translucent( + ax, leg_kw=combinekw(leg_kw, dict(title="Mode #")) + ) # ax.legend(title= 'Mode #') # Dressed frequency ax.set_prop_cycle(None) - df = pd.DataFrame({x:RES[x]['Hres']['f01'] for x in RES}).transpose() - df.plot(ax=ax,legend=False,**combinekw(kw,dict(marker=None,alpha=0.5,ls='--'))) - + df = pd.DataFrame({x: RES[x]["Hres"]["f01"] for x in RES}).transpose() + df.plot(ax=ax, legend=False, **combinekw(kw, dict(marker=None, alpha=0.5, ls="--"))) # Pmj Norm - ax = axs[1,0] - df = pd.DataFrame({x:RES[x]['_Pmj_norm'] for x in RES}).transpose() - df.plot(ax=ax,**kw) - ax.set_title('HFSS $p_{mj}$ norm') - legend_translucent(ax, leg_kw=combinekw(leg_kw,dict(title='Mode #'))) - - # Frequency - ax = axs[0,1] - df = pd.DataFrame({x:RES[x]['alpha'] for x in RES}).transpose() - df.plot(ax=ax,**kw) - ax.set_title(r'Anharmonicity $\alpha_{mj}/2\pi$ (MHz)') - legend_translucent(ax, leg_kw=combinekw(leg_kw,dict(title='Mode #'))) - if RES[pass_]['ND'] is not None: # plot numerical solution + ax = axs[1, 0] + df = pd.DataFrame({x: RES[x]["_Pmj_norm"] for x in RES}).transpose() + df.plot(ax=ax, **kw) + ax.set_title("HFSS $p_{mj}$ norm") + legend_translucent(ax, leg_kw=combinekw(leg_kw, dict(title="Mode #"))) + + # Frequency + ax = axs[0, 1] + df = pd.DataFrame({x: RES[x]["alpha"] for x in RES}).transpose() + df.plot(ax=ax, **kw) + ax.set_title(r"Anharmonicity $\alpha_{mj}/2\pi$ (MHz)") + legend_translucent(ax, leg_kw=combinekw(leg_kw, dict(title="Mode #"))) + if RES[pass_]["ND"] is not None: # plot numerical solution ax.set_prop_cycle(None) - df = pd.DataFrame({x:diag(RES[x]['ND']['CHI']) for x in RES}).transpose() - df.plot(ax=ax,legend=False,**combinekw(kw,dict(marker=None,alpha=0.5,ls='--'))) - - ax = axs[0,2] - df = pd.DataFrame({x:RES[x]['chi'] for x in RES}).transpose() - df.plot(ax=ax,**kw) - ax.set_title(r'Cross-Kerr $\chi_{mm\prime}/2\pi$ (MHz)') - legend_translucent(ax, leg_kw=combinekw(leg_kw,dict(title='Mode #'))) -# if RES[pass_]['ND'] is not None: # plot numerical solution -# ax.set_prop_cycle(None) -# df = pd.DataFrame({x:-get_above_diagonal(RES[x]['ND']['CHI']) for x in RES}).transpose() -## df = pd.DataFrame({x:RES[x]['Hres']['chi2'] for x in RES}).transpose() -# df.plot(ax=ax,legend=False,**combinekw(kw,dict(marker=None,alpha=0.5,ls='--'))) - - ax = axs[1,2] - df = pd.DataFrame({x:RES[x]['Hres']['omega_zx'] for x in RES}).transpose() - df.plot(ax=ax,**kw) - ax.set_title(r'Cross-Resonance $\omega_{ZX}/2\pi$ (MHz)') - legend_translucent(ax, leg_kw=combinekw(leg_kw,dict(title='C,T'))) + df = pd.DataFrame({x: diag(RES[x]["ND"]["CHI"]) for x in RES}).transpose() + df.plot( + ax=ax, legend=False, **combinekw(kw, dict(marker=None, alpha=0.5, ls="--")) + ) + + ax = axs[0, 2] + df = pd.DataFrame({x: RES[x]["chi"] for x in RES}).transpose() + df.plot(ax=ax, **kw) + ax.set_title(r"Cross-Kerr $\chi_{mm\prime}/2\pi$ (MHz)") + legend_translucent(ax, leg_kw=combinekw(leg_kw, dict(title="Mode #"))) + # if RES[pass_]['ND'] is not None: # plot numerical solution + # ax.set_prop_cycle(None) + # df = pd.DataFrame({x:-get_above_diagonal(RES[x]['ND']['CHI']) for x in RES}).transpose() + ## df = pd.DataFrame({x:RES[x]['Hres']['chi2'] for x in RES}).transpose() + # df.plot(ax=ax,legend=False,**combinekw(kw,dict(marker=None,alpha=0.5,ls='--'))) + + ax = axs[1, 2] + df = pd.DataFrame({x: RES[x]["Hres"]["omega_zx"] for x in RES}).transpose() + df.plot(ax=ax, **kw) + ax.set_title(r"Cross-Resonance $\omega_{ZX}/2\pi$ (MHz)") + legend_translucent(ax, leg_kw=combinekw(leg_kw, dict(title="C,T"))) # Pmj normed plot - ax = axs[1,1] - da = xarray_unravel_levels({x:RES[x]['Pmj_normed'] for x in RES}, - names=['pass','mode', 'junction']) - for mode in da.coords['mode']: - for junc in da.coords['junction']: + ax = axs[1, 1] + da = xarray_unravel_levels( + {x: RES[x]["Pmj_normed"] for x in RES}, names=["pass", "mode", "junction"] + ) + for mode in da.coords["mode"]: + for junc in da.coords["junction"]: junc_name = str(junc.values)[2:] ys = da.sel(mode=mode, junction=junc) - ys.plot.line(ax=ax, label='%2s,%4s'%(str(mode.values),junc_name),**kw) + ys.plot.line(ax=ax, label="%2s,%4s" % (str(mode.values), junc_name), **kw) - min_ = floor_10(min(abs(np.min(da.values)),abs(np.max(da.values)))) # just in case - ax.set_ylim(min_,1.05) - ax.set_yscale("log", nonposy='clip') - legend_translucent(ax, leg_kw=combinekw(leg_kw,dict(title='$p_{mj}$'))) - ax.set_title('HFSS $p_{mj}$ normed') + min_ = floor_10(min(abs(np.min(da.values)), abs(np.max(da.values)))) # just in case + ax.set_ylim(min_, 1.05) + ax.set_yscale("log", nonposy="clip") + legend_translucent(ax, leg_kw=combinekw(leg_kw, dict(title="$p_{mj}$"))) + ax.set_title("HFSS $p_{mj}$ normed") from matplotlib.widgets import Button + class Index(object): ind = 0 + def __init__(self, button, ax): self.ax = ax - self.button = button # so it doesnt get erased + self.button = button # so it doesnt get erased def next(self, event): - i = self.ind = (self.ind+1) % 5 + i = self.ind = (self.ind + 1) % 5 ax = self.ax - if i==0: - ax.set_ylim(min_,1.05) - ax.set_yscale("log", nonposy='clip') - elif i==1: - ax.set_ylim(min_,1.02) - ax.set_yscale("linear", nonposy='clip') - elif i==2: - ax.set_ylim(0.8,1.02) - ax.set_yscale("linear", nonposy='clip') - elif i==3: - ax.set_ylim(10**-3,10**-1) - ax.set_yscale("log", nonposy='clip') - elif i==4: - ax.set_ylim(5*10**-5,2*10**-3) - ax.set_yscale("log", nonposy='clip') - self.button.label.set_text('Next %d'%(self.ind)) + if i == 0: + ax.set_ylim(min_, 1.05) + ax.set_yscale("log", nonposy="clip") + elif i == 1: + ax.set_ylim(min_, 1.02) + ax.set_yscale("linear", nonposy="clip") + elif i == 2: + ax.set_ylim(0.8, 1.02) + ax.set_yscale("linear", nonposy="clip") + elif i == 3: + ax.set_ylim(10**-3, 10**-1) + ax.set_yscale("log", nonposy="clip") + elif i == 4: + ax.set_ylim(5 * 10**-5, 2 * 10**-3) + ax.set_yscale("log", nonposy="clip") + self.button.label.set_text("Next %d" % (self.ind)) fig.canvas.draw() fig.canvas.flush_events() - pos1 = ax.get_position() - pos2 = [pos1.x0 + 0., pos1.y0 + pos1.height+0.002, 0.07, 0.04] + pos1 = ax.get_position() + pos2 = [pos1.x0 + 0.0, pos1.y0 + pos1.height + 0.002, 0.07, 0.04] axnext = plt.axes(pos2) - bnext = Button(axnext, 'Next') + bnext = Button(axnext, "Next") callback = Index(bnext, ax) bnext.on_clicked(callback.next) for ax in np.ndarray.flatten(axs): - ax.set_xlabel('Pass number') - #ax.autoscale(tight=True) + ax.set_xlabel("Pass number") + # ax.autoscale(tight=True) - fig.tight_layout() #pad=0.4, w_pad=0.5, h_pad=1.0) + fig.tight_layout() # pad=0.4, w_pad=0.5, h_pad=1.0) fig.show() plt.pause(0.01) return df -#do_plot(RES) -#import threading -#t = threading.Thread(target=do_plot, args = (RES,)) -#t.start() +# do_plot(RES) + +# import threading +# t = threading.Thread(target=do_plot, args = (RES,)) +# t.start() #%% import time + if 1: for pass_ in passes: - print(' Running pass #%s'%(pass_), end='') + print(" Running pass #%s" % (pass_), end="") setup.passes = str(pass_) try: - ret = setup.solve() # I tried to use a worker thread but this gets complicated with COM blocking interface - if ret in [0, '0']: - print('. Normal completion.') + ret = ( + setup.solve() + ) # I tried to use a worker thread but this gets complicated with COM blocking interface + if ret in [0, "0"]: + print(". Normal completion.") time.sleep(0.5) do_analysis(pass_) do_plot(RES) - elif ret in ['-1',-1]: - print('. Simulation error.') + elif ret in ["-1", -1]: + print(". Simulation error.") print(ret) except KeyboardInterrupt: - print('\n\n Keyboard interruption...') + print("\n\n Keyboard interruption...") break # ABORT: -2147352567, 'Exception occurred.', (0, None, None, None, 0, -2147024349), None) do_plot(RES) @@ -325,6 +402,6 @@ def next(self, event): epr_hfss.do_EPR_analysis() # 4. Hamiltonian analysis - epr = QuantumAnalysis(epr_hfss.data_filename) - epr.analyze_all_variations(cos_trunc = 8, fock_trunc = 7) - epr.plot_hamiltonian_results() \ No newline at end of file + epr = QuantumAnalysis(epr_hfss.data_filename) + epr.analyze_all_variations(cos_trunc=8, fock_trunc=7) + epr.plot_hamiltonian_results() diff --git a/scripts/minev/_low_level/com_browse.py b/scripts/minev/_low_level/com_browse.py index 91986f6..bd65985 100644 --- a/scripts/minev/_low_level/com_browse.py +++ b/scripts/minev/_low_level/com_browse.py @@ -7,16 +7,17 @@ import win32com.client import win32com.client.combrowse -#win32com.client.combrowse.main() + +# win32com.client.combrowse.main() # A tree heading for registered type libraries" c = win32com.client.combrowse.HLIHeadingRegisterdTypeLibs() for s in c.GetSubList(): - #print(s) + # print(s) name = s.GetText() - if ('ansys' in name.lower()) or ('hfss' in name.lower()): + if ("ansys" in name.lower()) or ("hfss" in name.lower()): print(name) # HFSSAppDLL 1.0 Type Library - # C:\Program Files\AnsysEM\AnsysEM17.0\Win64\HfssDesktop.tlb \ No newline at end of file + # C:\Program Files\AnsysEM\AnsysEM17.0\Win64\HfssDesktop.tlb diff --git a/scripts/minev/hfss-scripts/2017_10 R3C1 resim.py b/scripts/minev/hfss-scripts/2017_10 R3C1 resim.py index f0eb107..b99af47 100644 --- a/scripts/minev/hfss-scripts/2017_10 R3C1 resim.py +++ b/scripts/minev/hfss-scripts/2017_10 R3C1 resim.py @@ -6,36 +6,47 @@ if 1: # Specify the HFSS project to be analyzed project_info = ProjectInfo(r"C:\Users\rslqulab\Desktop\zkm\2017_pyEPR_data\\") - project_info.project_name = '2017-10 re-sim SM22-R3C1' - project_info.design_name = '3. sweep both' - project_info.setup_name = None + project_info.project_name = "2017-10 re-sim SM22-R3C1" + project_info.design_name = "3. sweep both" + project_info.setup_name = None ## Describe the junctions in the HFSS design - project_info.junctions['jBright'] = {'rect':'juncV', 'line': 'juncH_line', 'Lj_variable':'LJ1', 'length':0.0001} - project_info.junctions['jDark'] = {'rect':'juncH', 'line': 'juncV_line', 'Lj_variable':'LJ2', 'length':0.0001} + project_info.junctions["jBright"] = { + "rect": "juncV", + "line": "juncH_line", + "Lj_variable": "LJ1", + "length": 0.0001, + } + project_info.junctions["jDark"] = { + "rect": "juncH", + "line": "juncV_line", + "Lj_variable": "LJ2", + "length": 0.0001, + } # Dissipative elements EPR - project_info.dissipative['dielectric_surfaces'] = None # supply names here, there are more options in project_info.dissipative. + project_info.dissipative[ + "dielectric_surfaces" + ] = None # supply names here, there are more options in project_info.dissipative. # Run analysis - epr_hfss = DistributedAnalysis(project_info) + epr_hfss = DistributedAnalysis(project_info) epr_hfss.do_EPR_analysis() -if 1: # Analysis result +if 1: # Analysis result filename = epr_hfss.data_filename - #filename = r'C:\Users\rslqulab\Desktop\zkm\2017_pyEPR_data\\/2017-10 re-sim SM22-R3C1/1. R3C1/1. R3C1_20171016_110756.hdf5' - epr = QuantumAnalysis(filename) + # filename = r'C:\Users\rslqulab\Desktop\zkm\2017_pyEPR_data\\/2017-10 re-sim SM22-R3C1/1. R3C1/1. R3C1_20171016_110756.hdf5' + epr = QuantumAnalysis(filename) epr.plot_convergence_f_lin() epr._renorm_pj = True - plt.close('all') - epr.analyze_all_variations(cos_trunc = 10, fock_trunc = 8) + plt.close("all") + epr.analyze_all_variations(cos_trunc=10, fock_trunc=8) epr.plot_hamiltonian_results() print(epr.data_filename) - #%% if 1: import numpy as np @@ -43,36 +54,37 @@ import seaborn as sns import matplotlib.pyplot as plt - sns.reset_orig() - #epr.hfss_variables.loc['_LJ2'] + # epr.hfss_variables.loc['_LJ2'] - kw_map = dict(vmin = -20, vmax = 20, linewidths=0.5, annot=True,\ - cmap='seismic' ) # RdYlGn_r + kw_map = dict( + vmin=-20, vmax=20, linewidths=0.5, annot=True, cmap="seismic" + ) # RdYlGn_r - target_f = pd.Series([4688, 5300, 9003], index=['D','B','C']) - target_alpha = pd.Series([148, 174], index=['D', 'B']) - target_chi = pd.Series([85, 5, 0.33], index=['DB', 'BC', 'DC']) + target_f = pd.Series([4688, 5300, 9003], index=["D", "B", "C"]) + target_alpha = pd.Series([148, 174], index=["D", "B"]) + target_chi = pd.Series([85, 5, 0.33], index=["DB", "BC", "DC"]) - results = epr.results - f_ND = results.get_frequencies_ND().rename(\ - index ={0:'D',1:'B',2:'C'}) - f_error = f_ND.apply(lambda x: 100*(x.values-target_f)/x, axis = 'index') + results = epr.results + f_ND = results.get_frequencies_ND().rename(index={0: "D", 1: "B", 2: "C"}) + f_error = f_ND.apply(lambda x: 100 * (x.values - target_f) / x, axis="index") - fig, axs = plt.subplots(1, 3, figsize = (15,7.5)) - sns.heatmap(f_error.transpose(), ax = axs[0], **kw_map) + fig, axs = plt.subplots(1, 3, figsize=(15, 7.5)) + sns.heatmap(f_error.transpose(), ax=axs[0], **kw_map) - chis = results.get_chi_ND() - chis = xarray_unravel_levels(chis, ['variation','m', 'n']) - alpha_ND = sort_df_col(chis.sel_points(m = [0,1], n=[0,1]).to_pandas()) + chis = results.get_chi_ND() + chis = xarray_unravel_levels(chis, ["variation", "m", "n"]) + alpha_ND = sort_df_col(chis.sel_points(m=[0, 1], n=[0, 1]).to_pandas()) alpha_ND.index = target_alpha.index - alpha_ND_err = alpha_ND.apply(lambda x: 100*(x.values-target_alpha)/x, axis = 'index') - sns.heatmap(alpha_ND_err.transpose(), ax = axs[1], **kw_map) + alpha_ND_err = alpha_ND.apply( + lambda x: 100 * (x.values - target_alpha) / x, axis="index" + ) + sns.heatmap(alpha_ND_err.transpose(), ax=axs[1], **kw_map) - chi_ND = sort_df_col(chis.sel_points(m = [0,1,0], n=[1,2,2]).to_pandas()) + chi_ND = sort_df_col(chis.sel_points(m=[0, 1, 0], n=[1, 2, 2]).to_pandas()) chi_ND.index = target_chi.index - chi_ND_err = chi_ND.apply(lambda x: 100*(x.values-target_chi)/x, axis = 'index') - sns.heatmap(chi_ND_err.transpose(), ax = axs[2], **kw_map) - axs[0].set_title('Freq.') - axs[1].set_title('Anharmonicities') - axs[2].set_title('cross-Kerrs') + chi_ND_err = chi_ND.apply(lambda x: 100 * (x.values - target_chi) / x, axis="index") + sns.heatmap(chi_ND_err.transpose(), ax=axs[2], **kw_map) + axs[0].set_title("Freq.") + axs[1].set_title("Anharmonicities") + axs[2].set_title("cross-Kerrs") diff --git a/scripts/minev/hfss-scripts/import_pyEPR.py b/scripts/minev/hfss-scripts/import_pyEPR.py index 6d6e1fb..99d1e70 100644 --- a/scripts/minev/hfss-scripts/import_pyEPR.py +++ b/scripts/minev/hfss-scripts/import_pyEPR.py @@ -9,27 +9,43 @@ if 1: # Specify the HFSS project to be analyzed - project_info = ProjectInfo(r"C:\\Users\\rslqulab\Desktop\\Lysander\participation_ratio_project\\Shyam's autonomous stabilization simulations\\") - project_info.project_name = '2017_08_Zlatko_Shyam_AutStab' # Name of the project file (string). "None" will get the current active one. - project_info.design_name = '2 pyEPR' # Name of the design file (string). "None" will get the current active one. - project_info.setup_name = None # Name of the setup(string). "None" will get the current active one. + project_info = ProjectInfo( + r"C:\\Users\\rslqulab\Desktop\\Lysander\participation_ratio_project\\Shyam's autonomous stabilization simulations\\" + ) + project_info.project_name = "2017_08_Zlatko_Shyam_AutStab" # Name of the project file (string). "None" will get the current active one. + project_info.design_name = "2 pyEPR" # Name of the design file (string). "None" will get the current active one. + project_info.setup_name = ( + None # Name of the setup(string). "None" will get the current active one. + ) ## Describe the junctions in the HFSS design - project_info.junctions['jAlice'] = {'rect':'qubitAlice', 'line': 'alice_line', 'Lj_variable':'LJAlice', 'length':0.0001} - project_info.junctions['jBob'] = {'rect':'qubitBob', 'line': 'bob_line', 'Lj_variable':'LJBob', 'length':0.0001} + project_info.junctions["jAlice"] = { + "rect": "qubitAlice", + "line": "alice_line", + "Lj_variable": "LJAlice", + "length": 0.0001, + } + project_info.junctions["jBob"] = { + "rect": "qubitBob", + "line": "bob_line", + "Lj_variable": "LJBob", + "length": 0.0001, + } # Dissipative elements EPR - project_info.dissipative['dielectric_surfaces'] = None # supply names here, there are more options in project_info.dissipative. + project_info.dissipative[ + "dielectric_surfaces" + ] = None # supply names here, there are more options in project_info.dissipative. # Run analysis - epr_hfss = DistributedAnalysis(project_info) + epr_hfss = DistributedAnalysis(project_info) epr_hfss.do_EPR_analysis() -if 1: # Hamiltonian analysis +if 1: # Hamiltonian analysis filename = epr_hfss.data_filename - #filename = r'C:\\Users\\rslqulab\\Desktop\\zkm\\2017_pyEPR_data\\\\/2017_08_Zlatko_Shyam_AutStab/2 pyEPR/2 pyEPR_20170825_170550.hdf5' - epr = QuantumAnalysis(filename) + # filename = r'C:\\Users\\rslqulab\\Desktop\\zkm\\2017_pyEPR_data\\\\/2017_08_Zlatko_Shyam_AutStab/2 pyEPR/2 pyEPR_20170825_170550.hdf5' + epr = QuantumAnalysis(filename) - #result = epr.analyze_variation('1', cos_trunc = 8, fock_trunc = 7) - epr.analyze_all_variations(cos_trunc = 8, fock_trunc = 7) + # result = epr.analyze_variation('1', cos_trunc = 8, fock_trunc = 7) + epr.analyze_all_variations(cos_trunc=8, fock_trunc=7) epr.plot_hamiltonian_results() diff --git a/scripts/my-name/example1.py b/scripts/my-name/example1.py index be63d21..41363d3 100644 --- a/scripts/my-name/example1.py +++ b/scripts/my-name/example1.py @@ -6,29 +6,30 @@ from pyEPR import * # 1. Project and design. Open link to HFSS controls. -project_info = ProjectInfo(r'C:\zkm\my-first-pyEPR\\', - project_name = 'HelloWorld-pyEPR', # Project file name (string). "None" will get the current active one. - design_name = 'MyFirstTest' # Design name (string). "None" will get the current active one. - ) +project_info = ProjectInfo( + r"C:\zkm\my-first-pyEPR\\", + project_name="HelloWorld-pyEPR", # Project file name (string). "None" will get the current active one. + design_name="MyFirstTest", # Design name (string). "None" will get the current active one. +) project_info.connect_to_project() # ## 2a. Junctions. Specify junctions in HFSS model -#project_info.junctions['jAlice'] = {'Lj_variable':'LJAlice', 'rect':'qubitAlice', 'line': 'alice_line', 'length':0.0001} -#project_info.junctions['jBob'] = {'Lj_variable':'LJBob', 'rect':'qubitBob', 'line': 'bob_line', 'length':0.0001} +# project_info.junctions['jAlice'] = {'Lj_variable':'LJAlice', 'rect':'qubitAlice', 'line': 'alice_line', 'length':0.0001} +# project_info.junctions['jBob'] = {'Lj_variable':'LJBob', 'rect':'qubitBob', 'line': 'bob_line', 'length':0.0001} # ## 2b. Dissipative elements. -#project_info.dissipative['dielectrics_bulk'] = ['si_substrate'] # supply names here, there are more options in project_info.dissipative. -#project_info.dissipative['dielectric_surfaces'] = ['interface'] +# project_info.dissipative['dielectrics_bulk'] = ['si_substrate'] # supply names here, there are more options in project_info.dissipative. +# project_info.dissipative['dielectric_surfaces'] = ['interface'] # ## 3. Run analysis -#epr_hfss = DistributedAnalysis(project_info) -#epr_hfss.do_EPR_analysis() +# epr_hfss = DistributedAnalysis(project_info) +# epr_hfss.do_EPR_analysis() # ## 4. Hamiltonian analysis -#epr = QuantumAnalysis(epr_hfss.data_filename) -#epr.analyze_all_variations(cos_trunc = 8, fock_trunc = 7) -#epr.plot_hamiltonian_results() +# epr = QuantumAnalysis(epr_hfss.data_filename) +# epr.analyze_all_variations(cos_trunc = 8, fock_trunc = 7) +# epr.plot_hamiltonian_results() # diff --git a/scripts/nick/import_pyEPR.py b/scripts/nick/import_pyEPR.py index 0564998..2d2c347 100644 --- a/scripts/nick/import_pyEPR.py +++ b/scripts/nick/import_pyEPR.py @@ -10,36 +10,46 @@ if 0: # Specify the HFSS project to be analyzed project_info = ProjectInfo(r"X:\Simulation\\hfss\\KC\\") - project_info.project_name = '2013-12-03_9GHzCavity' # Name of the project file (string). "None" will get the current active one. - project_info.design_name = '9GHz_EM_center_SNAIL' # Name of the design file (string). "None" will get the current active one. - project_info.setup_name = None # Name of the setup(string). "None" will get the current active one. + project_info.project_name = "2013-12-03_9GHzCavity" # Name of the project file (string). "None" will get the current active one. + project_info.design_name = "9GHz_EM_center_SNAIL" # Name of the design file (string). "None" will get the current active one. + project_info.setup_name = ( + None # Name of the setup(string). "None" will get the current active one. + ) ## Describe the junctions in the HFSS design - project_info.junctions['snail'] = {'rect':'qubit', 'line': 'JunctionLine', 'Lj_variable':'LJ', 'length':0.0001} -# project_info.junctions['jBob'] = {'rect':'qubitBob', 'line': 'bob_line', 'Lj_variable':'LJBob', 'length':0.0001} + project_info.junctions["snail"] = { + "rect": "qubit", + "line": "JunctionLine", + "Lj_variable": "LJ", + "length": 0.0001, + } + # project_info.junctions['jBob'] = {'rect':'qubitBob', 'line': 'bob_line', 'Lj_variable':'LJBob', 'length':0.0001} # Dissipative elements EPR - project_info.dissipative['dielectric_surfaces'] = None # supply names here, there are more options in project_info.dissipative. + project_info.dissipative[ + "dielectric_surfaces" + ] = None # supply names here, there are more options in project_info.dissipative. # Run analysis - epr_hfss = DistributedAnalysis(project_info) - epr_hfss.do_EPR_analysis() #variations = ['1', '70'] + epr_hfss = DistributedAnalysis(project_info) + epr_hfss.do_EPR_analysis() # variations = ['1', '70'] -if 1: # Hamiltonian analysis -# filename = epr_hfss.data_filename - filename = r'X:\Simulation\hfss\KC\pyEPR_results_2018\2013-12-03_9GHzCavity\9GHz_EM_center_SNAIL\9GHz_EM_center_SNAIL_20180726_170049.hdf5' - #filename = r'C:\\Users\\rslqulab\\Desktop\\zkm\\2017_pyEPR_data\\\\/2017_08_Zlatko_Shyam_AutStab/2 pyEPR/2 pyEPR_20170825_170550.hdf5' - epr = QuantumAnalysis(filename) +if 1: # Hamiltonian analysis + # filename = epr_hfss.data_filename + filename = r"X:\Simulation\hfss\KC\pyEPR_results_2018\2013-12-03_9GHzCavity\9GHz_EM_center_SNAIL\9GHz_EM_center_SNAIL_20180726_170049.hdf5" + # filename = r'C:\\Users\\rslqulab\\Desktop\\zkm\\2017_pyEPR_data\\\\/2017_08_Zlatko_Shyam_AutStab/2 pyEPR/2 pyEPR_20170825_170550.hdf5' + epr = QuantumAnalysis(filename) - #result = epr.analyze_variation('1', cos_trunc = 8, fock_trunc = 7) - epr.analyze_all_variations(cos_trunc = None, fock_trunc = 4) # only quadratic part + # result = epr.analyze_variation('1', cos_trunc = 8, fock_trunc = 7) + epr.analyze_all_variations(cos_trunc=None, fock_trunc=4) # only quadratic part epr.plot_hamiltonian_results() if 1: from pyEPR.toolbox_plotting import cmap_discrete + f0 = epr.results.get_frequencies_HFSS() f1 = epr.results.get_frequencies_O1() chi = epr.results.get_chi_O1() mode_idx = list(f0.index) - nmodes = len(mode_idx) - cmap = cmap_discrete(nmodes) + nmodes = len(mode_idx) + cmap = cmap_discrete(nmodes) diff --git a/setup.py b/setup.py index 52361c4..42f647c 100644 --- a/setup.py +++ b/setup.py @@ -27,25 +27,26 @@ with open(here / "requirements.txt", encoding="utf-8") as f: requirements = f.read().splitlines() -doclines = __doc__.split('\n') +doclines = __doc__.split("\n") setup( - name='pyEPR-quantum', - version='0.9.0', + name="pyEPR-quantum", + version="0.9.0", description=doclines[0], long_description=long_description, long_description_content_type="text/markdown", - author='Zlatko K. Minev', + author="Zlatko K. Minev", packages=find_packages(), - author_email='zlatko.minev@aya.yale.edu', - maintainer='Zlatko Minev, pyEPR team', - license='BSD-3-Clause', - url=r'https://github.com/zlatko-minev/pyEPR', + author_email="zlatko.minev@aya.yale.edu", + maintainer="Zlatko Minev, pyEPR team", + license="BSD-3-Clause", + url=r"https://github.com/zlatko-minev/pyEPR", classifiers=[ "Intended Audience :: Developers", "Intended Audience :: Science/Research", "Operating System :: Microsoft :: Windows", - "Operating System :: MacOS", "Operating System :: POSIX :: Linux", + "Operating System :: MacOS", + "Operating System :: POSIX :: Linux", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", @@ -53,9 +54,11 @@ "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", - "Topic :: Scientific/Engineering", "Environment :: Console", - "License :: OSI Approved :: Apache Software License" + "Topic :: Scientific/Engineering", + "Environment :: Console", + "License :: OSI Approved :: Apache Software License", ], python_requires=">=3.5, <4", # install_requires=['numpy','pandas','pint','matplotlib','addict','sympy','IPython'], - install_requires=requirements) + install_requires=requirements, +) diff --git a/tests/test_project_info.py b/tests/test_project_info.py index 828462c..f9b696b 100644 --- a/tests/test_project_info.py +++ b/tests/test_project_info.py @@ -1,36 +1,80 @@ import unittest -import sys; sys.path.insert(0, '..') # noqa +import sys + +sys.path.insert(0, "..") # noqa import pyEPR as epr + class TestProjectInfo(unittest.TestCase): - '''Test pyEPR.project_info.py''' + """Test pyEPR.project_info.py""" + def setUp(self): - path_to_project = r'..\_example_files' + path_to_project = r"..\_example_files" try: - self.pinfo = epr.ProjectInfo(project_path=path_to_project, - project_name='pyEPR_tutorial1', - design_name='1. single_transmon') + self.pinfo = epr.ProjectInfo( + project_path=path_to_project, + project_name="pyEPR_tutorial1", + design_name="1. single_transmon", + ) except: - assert ConnectionError('Failed to connect to HFSS. Opening it manually') + assert ConnectionError("Failed to connect to HFSS. Opening it manually") def test_dissipative(self): - '''Test change of _Dissipative from a class to a dict with deprecation warnings''' - self.assertRaises(Exception, self.pinfo.dissipative.__getattr__, 'mot_exist', - msg='Failed calling non-existing attr') - self.assertRaises(Exception, self.pinfo.dissipative.__getitem__, 'not_exist', - msg='Failed calling non-existing item') - self.assertRaises(Exception, self.pinfo.dissipative.__setattr__, 'seams', 1, - msg='Failed setting invalid attr') - self.assertRaises(Exception, self.pinfo.dissipative.__setitem__, 'seams', 1, - msg='Failed setting invalid item') - self.assertRaises(Exception, self.pinfo.dissipative.__setitem__, 'seams', ['a'], - msg='Failed setting item to non-existing HFSS obj') - self.assertRaises(Exception, self.pinfo.dissipative.__setattr__, 'seams', ['a'], - msg='Failed setting attr to non-existing HFSS obj') - self.assertRaises(Exception, self.pinfo.dissipative.__setattr__, 'not_exist', 1, - msg='Failed setting invalid value and attr') - self.assertRaises(Exception, self.pinfo.dissipative.__setitem__, 'not_exist', 1, - msg='Failed setting invalid value and key') - self.pinfo.dissipative['seams'] - self.pinfo.dissipative['seams'] = [] - self.pinfo.dissipative['seams'] = ['substrate'] + """Test change of _Dissipative from a class to a dict with deprecation warnings""" + self.assertRaises( + Exception, + self.pinfo.dissipative.__getattr__, + "mot_exist", + msg="Failed calling non-existing attr", + ) + self.assertRaises( + Exception, + self.pinfo.dissipative.__getitem__, + "not_exist", + msg="Failed calling non-existing item", + ) + self.assertRaises( + Exception, + self.pinfo.dissipative.__setattr__, + "seams", + 1, + msg="Failed setting invalid attr", + ) + self.assertRaises( + Exception, + self.pinfo.dissipative.__setitem__, + "seams", + 1, + msg="Failed setting invalid item", + ) + self.assertRaises( + Exception, + self.pinfo.dissipative.__setitem__, + "seams", + ["a"], + msg="Failed setting item to non-existing HFSS obj", + ) + self.assertRaises( + Exception, + self.pinfo.dissipative.__setattr__, + "seams", + ["a"], + msg="Failed setting attr to non-existing HFSS obj", + ) + self.assertRaises( + Exception, + self.pinfo.dissipative.__setattr__, + "not_exist", + 1, + msg="Failed setting invalid value and attr", + ) + self.assertRaises( + Exception, + self.pinfo.dissipative.__setitem__, + "not_exist", + 1, + msg="Failed setting invalid value and key", + ) + self.pinfo.dissipative["seams"] + self.pinfo.dissipative["seams"] = [] + self.pinfo.dissipative["seams"] = ["substrate"] diff --git a/tests/test_quantum_analysis.py b/tests/test_quantum_analysis.py index 27672de..65efa32 100644 --- a/tests/test_quantum_analysis.py +++ b/tests/test_quantum_analysis.py @@ -1,42 +1,45 @@ -''' +""" Unit tests for quantum analysis. Takes in pre-made data with known results, computes the results from the data and checks everything is correct. -''' +""" import unittest import pickle import numpy as np import sys -sys.path.append('..') # noqa + +sys.path.append("..") # noqa import pyEPR as epr # Files location -save_file = './data.npz' -correct_results = './correct_results.pkl' +save_file = "./data.npz" +correct_results = "./correct_results.pkl" class TestQuantumAnalysis(unittest.TestCase): - def setUp(self): self.epra = epr.QuantumAnalysis(save_file) - with open(correct_results, 'rb') as file: + with open(correct_results, "rb") as file: self.correct_res = pickle.load(file) def test_analyze_all_variations(self): - ''' + """ Check that the calculated results matches the known correct ones - ''' + """ results = self.epra.analyze_all_variations( - cos_trunc=8, fock_trunc=15, print_result=False)['0'] # Variation 0 + cos_trunc=8, fock_trunc=15, print_result=False + )[ + "0" + ] # Variation 0 # TODO: Remove start/finish diagonalization messages (back_box_numeric L:153) for key, value in results.items(): - if key == 'hfss_variables': # All numeric-only datatypes + if key == "hfss_variables": # All numeric-only datatypes return value = np.array(value) corr_value = np.array(self.correct_res[key]) self.assertTrue(np.allclose(value, corr_value)) - epr.logger.info(key+' '+'-'*(13 - len(key))+'-> OK!') + epr.logger.info(key + " " + "-" * (13 - len(key)) + "-> OK!") def test_analyze_variation(self): pass @@ -46,4 +49,3 @@ def test_hamiltonian(self): def test_properties(self): pass -