Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

REF/ENH: subclass TaoCore with interface methods, add special parsers #75

Merged
merged 16 commits into from
Jun 27, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
39 changes: 39 additions & 0 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
# See https://pre-commit.com for more information
# See https://pre-commit.com/hooks.html for more hooks
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks.git
rev: v4.6.0
hooks:
# Remove unnecessary whitespace at the end of lines:
# - id: trailing-whitespace
# Ensure that text files have a newline at the end:
# - id: end-of-file-fixer
# Verify that Python source code is valid:
- id: check-ast
# Ensure filenames won't have conflicts on case insensitive platforms:
- id: check-case-conflict
# Check JSON files for valid syntax:
- id: check-json
# Check XML files for valid syntax:
- id: check-xml
# Check YAML files for valid syntax:
- id: check-yaml
# Check TOML files for valid syntax:
- id: check-toml
# Check that there are no remnants of merge conflicts in files:
- id: check-merge-conflict
# Check that symlinks are valid:
- id: check-symlinks
# Check that there's no code before a docstring
- id: check-docstring-first
# Check that too large of files are not committed (50MB):
- id: check-added-large-files
args: ["--maxkb=100000"]

- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.4.8
hooks:
- id: ruff
args: ["--config", "python/pyproject.toml"]
types_or: [python]
exclude: "^(pytao/_version.py)$"
2 changes: 1 addition & 1 deletion dev-environment.yml
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ dependencies:
- python >=3.9
- openPMD-beamphysics
- numpydoc
- bmad
- bmad >=20240626
- bokeh
- jupyterlab>3
- ipywidgets
Expand Down
2 changes: 0 additions & 2 deletions docs/api/pytao.md
Original file line number Diff line number Diff line change
@@ -1,3 +1 @@
::: pytao.Tao
::: pytao.interface_commands
::: pytao.tao_ctypes.extra_commands
156 changes: 73 additions & 83 deletions docs/examples/advanced.ipynb

Large diffs are not rendered by default.

510 changes: 312 additions & 198 deletions docs/examples/basic.ipynb

Large diffs are not rendered by default.

220 changes: 110 additions & 110 deletions docs/examples/bunch.ipynb

Large diffs are not rendered by default.

399 changes: 174 additions & 225 deletions docs/examples/fodo.ipynb

Large diffs are not rendered by default.

34 changes: 17 additions & 17 deletions docs/examples/special_parsers.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@
"metadata": {},
"outputs": [],
"source": [
"tao=Tao('-init $ACC_ROOT_DIR/regression_tests/python_test/cesr/tao.init -noplot')"
"tao = Tao(\"-init $ACC_ROOT_DIR/regression_tests/python_test/cesr/tao.init -noplot\")"
]
},
{
Expand Down Expand Up @@ -69,7 +69,7 @@
}
],
"source": [
"tao.data_d_array('orbit', 'x')[7]"
"tao.data_d_array(\"orbit\", \"x\")[7]"
]
},
{
Expand All @@ -87,10 +87,10 @@
"metadata": {},
"outputs": [],
"source": [
"tao.cmd('veto var *;veto dat *;')\n",
"tao.cmd('use var quad_k1[3:5]')\n",
"tao.cmd('set dat orbit.x[1:5]|meas=0')\n",
"tao.cmd('use dat orbit.x[1:5]');"
"tao.cmd(\"veto var *;veto dat *;\")\n",
"tao.cmd(\"use var quad_k1[3:5]\")\n",
"tao.cmd(\"set dat orbit.x[1:5]|meas=0\")\n",
"tao.cmd(\"use dat orbit.x[1:5]\");"
]
},
{
Expand Down Expand Up @@ -173,7 +173,7 @@
}
],
"source": [
"tao.ele_control_var('H01W')"
"tao.ele_control_var(\"H01W\")"
]
},
{
Expand Down Expand Up @@ -246,7 +246,7 @@
}
],
"source": [
"tao.matrix('beginning', 'end')"
"tao.matrix(\"beginning\", \"end\")"
]
},
{
Expand Down Expand Up @@ -319,7 +319,7 @@
}
],
"source": [
"result = tao.plot_list('r')\n",
"result = tao.plot_list(\"r\")\n",
"\n",
"result[0:2]"
]
Expand All @@ -343,9 +343,9 @@
],
"source": [
"# 't' gives a mapping of template plot to index\n",
"result = tao.plot_list('t')\n",
"result = tao.plot_list(\"t\")\n",
"\n",
"result['cbar']"
"result[\"cbar\"]"
]
},
{
Expand Down Expand Up @@ -380,7 +380,7 @@
}
],
"source": [
"tao.spin_invariant('l0')"
"tao.spin_invariant(\"l0\")"
]
},
{
Expand Down Expand Up @@ -578,7 +578,7 @@
}
],
"source": [
"tt = tao.taylor_map('beginning', 'end', order=2)\n",
"tt = tao.taylor_map(\"beginning\", \"end\", order=2)\n",
"tt"
]
},
Expand All @@ -603,7 +603,7 @@
],
"source": [
"# Compare some terms with the matrix calc:\n",
"tao.matrix('beginning', 'end')['mat6'][0,0], tt[1][(1,0,0,0,0,0)]"
"tao.matrix(\"beginning\", \"end\")[\"mat6\"][0, 0], tt[1][(1, 0, 0, 0, 0, 0)]"
]
},
{
Expand All @@ -626,7 +626,7 @@
}
],
"source": [
"tao.matrix('beginning', 'end')['mat6'][1,0], tt[2][(1,0,0,0,0,0)]"
"tao.matrix(\"beginning\", \"end\")[\"mat6\"][1, 0], tt[2][(1, 0, 0, 0, 0, 0)]"
]
},
{
Expand Down Expand Up @@ -670,7 +670,7 @@
}
],
"source": [
"result = tao.var_v_array('quad_k1')\n",
"result = tao.var_v_array(\"quad_k1\")\n",
"result[0:2]"
]
}
Expand All @@ -691,7 +691,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.4"
"version": "3.12.0"
},
"vscode": {
"interpreter": {
Expand Down
2 changes: 1 addition & 1 deletion environment.yml
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ dependencies:
- ipykernel
- ipywidgets
- jupyterlab
- bmad
- bmad >=20240626
- openPMD-beamphysics
- numpy
- h5py
Expand Down
117 changes: 51 additions & 66 deletions generate_interface_commands.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
import json
import keyword
import os
import shutil

CMDS_OUTPUT = "./pytao/interface_commands.py"
TEST_OUTPUT = "./pytao/tests/test_interface_commands.py"
Expand All @@ -18,6 +19,8 @@
with open(f_name, 'r') as f:
cmds_from_tao = json.load(f)

with open("interface.tpl.py", 'r') as f:
interface_tpl_py = f.read()

# ### Utilitary Functions
def sanitize_method_name(method):
Expand Down Expand Up @@ -61,7 +64,7 @@ def generate_params(params):
E.g.: tao, s, *, ix_uni="1", ix_branch="0", which="model", verbose=False, as_dict=True
"""

args = ['tao']
args = ['self']
kwargs = []
for idx, p in enumerate(params):
name = sanitize(p.name)
Expand Down Expand Up @@ -127,80 +130,40 @@ def generate_method_code(docs, method, command, returns):
if special_parser:
parser_docs = NumpyDocString(special_parser.__doc__)
docs['Returns'] = parser_docs['Returns']
code_list.append(f"return __execute(tao, cmd, as_dict, raises, method_name='{method}', cmd_type='{tp}')")
code_list.append(f"return self.__execute(cmd, as_dict, raises, method_name='{method}', cmd_type='{tp}')")
else:
code_list.append(f"{r.desc[0]}:\n return __execute(tao, cmd, as_dict, raises, method_name='{method}', cmd_type='{tp}')")
code_list.append(f"{r.desc[0]}:\n return self.__execute(cmd, as_dict, raises, method_name='{method}', cmd_type='{tp}')")
return '\n'.join(code_list)


# ## Parse the JSON Dictionary and Write the Python module

cmds_to_module = [f"""# ==============================================================================
cmds_to_module = [
f'''# ==============================================================================
# AUTOGENERATED FILE - DO NOT MODIFY
# This file was generated by the script `generate_interface_commands.py`.
# Any modifications may be overwritten.
# Generated on: {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")}
# ==============================================================================
"""+"""
from pytao.tao_ctypes.util import parse_tao_python_data
from pytao.util.parameters import tao_parameter_dict
from pytao.util import parsers as __parsers

{interface_tpl_py}
'''
]

def __execute(tao, cmd, as_dict=True, raises=True, method_name=None, cmd_type="string_list"):
\"""

A wrapper to handle commonly used options when running a command through tao.
print()

Parameters
----------
tao : Tao
The handle to tao to run the command on
cmd : str
The command to run
as_dict : bool, optional
Return string data as a dict? by default True
raises : bool, optional
Raise exception on tao errors? by default True
method_name : str/None, optional
Name of the caller. Required for custom parsers for commands, by default None
cmd_type : str, optional
The type of data returned by tao in its common memory, by default "string_list"

Returns
-------
Any
Result from running tao. The type of data depends on configuration, but is generally a list of strings, a dict, or a
numpy array.
\"""
func_for_type = {
"string_list": tao.cmd,
"real_array": tao.cmd_real,
"integer_array": tao.cmd_integer
# TODO: bring these back to bmad
hotfixes = {
"var": {
"command_str": "python var {var} {slaves}",
}
func = func_for_type.get(cmd_type, tao.cmd)
ret = func(cmd, raises=raises)
special_parser = getattr(__parsers, f'parse_{method_name}', "")
if special_parser:
data = special_parser(ret)
return data
if "string" in cmd_type:
try:
if as_dict:
data = parse_tao_python_data(ret)
else:
data = tao_parameter_dict(ret)
except Exception as ex:
# TODO: use logger instead of: print('Failed to parse string data. Returning raw value. Exception was: ', ex)
return ret

return data

return ret

"""]
}

for method, metadata in cmds_from_tao.items():
if method in hotfixes:
metadata.update(hotfixes[method])

docstring = metadata['description']
command_str = sanitize(metadata['command_str'])

Expand All @@ -212,13 +175,14 @@ def __execute(tao, cmd, as_dict=True, raises=True, method_name=None, cmd_type="s
code = generate_method_code(np_docs, clean_method, command_str, np_docs['Returns'])
except Exception as ex:
print(f'***Error generating code for: {method}. Exception was: {ex}')
raise

method_template = f'''
def {clean_method}({params}):
{add_tabs('"""', 1)}
{add_tabs(str(np_docs), 1)}
{add_tabs('"""', 1)}
{add_tabs(code, 1)}
def {clean_method}({params}):
{add_tabs('"""', 2)}
{add_tabs(str(np_docs), 2)}
{add_tabs('"""', 2)}
{add_tabs(code, 2)}

'''
cmds_to_module.append(method_template)
Expand Down Expand Up @@ -263,10 +227,26 @@ def get_tests(examples):
# Generated on: {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")}
# ==============================================================================

import contextlib
import os
import logging

import pytest

from pytao import Tao
from pytao import interface_commands

def new_tao(init):
return Tao(os.path.expandvars(f"{{init}} -noplot"))


@contextlib.contextmanager
def ensure_successful_parsing(caplog):
yield
errors = [record for record in caplog.get_records("call") if record.levelno == logging.ERROR]
for error in errors:
if "Failed to parse string data" in error.message:
pytest.fail(error.message)
"""]

for method, metadata in cmds_from_tao.items():
Expand All @@ -281,13 +261,15 @@ def get_tests(examples):
print(f'No examples found for: {method}')

for test_name, test_meta in tests.items():
args = ['tao'] + [f"{k}='{v}'" for k, v in test_meta['args'].items()]
args = [f"{k}='{v}'" for k, v in test_meta['args'].items()]
args.append("verbose=True")
test_code = f'''
tao = Tao(os.path.expandvars('{test_meta['init']} -noplot'))
ret = interface_commands.{clean_method}({', '.join(args)})
tao = new_tao('{test_meta['init']}')
with ensure_successful_parsing(caplog):
tao.{clean_method}({', '.join(args)})
'''
method_template = f'''
def test_{clean_method}_{test_name}():
def test_{clean_method}_{test_name}(caplog):
{add_tabs(test_code, 1)}
'''
cmds_to_test_module.append(method_template)
Expand All @@ -296,3 +278,6 @@ def test_{clean_method}_{test_name}():
out.writelines(cmds_to_test_module)

print(f'Generated file: {TEST_OUTPUT}')

if shutil.which("ruff"):
os.system(f'ruff format "{CMDS_OUTPUT}" "{TEST_OUTPUT}"')
Loading