From ec10ab0837774565222c2a4a65d72c70c0a97182 Mon Sep 17 00:00:00 2001 From: Michal Habera Date: Fri, 27 Jan 2023 16:15:41 +0100 Subject: [PATCH 01/44] Raise on invalid function name (#539) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Raise on invalid function name * Fix typo * Add test for invalid name * Update invalid name test * Update invalid name test * Update invalid name test --------- Co-authored-by: Jørgen Schartum Dokken Co-authored-by: Garth N. Wells --- ffcx/codegeneration/jit.py | 39 +++++++++++++++++++++++--------------- ffcx/ir/representation.py | 4 ++++ test/test_jit_forms.py | 23 ++++++++++++++++++++++ 3 files changed, 51 insertions(+), 15 deletions(-) diff --git a/ffcx/codegeneration/jit.py b/ffcx/codegeneration/jit.py index e82cc1654..2b8af2bac 100644 --- a/ffcx/codegeneration/jit.py +++ b/ffcx/codegeneration/jit.py @@ -143,11 +143,14 @@ def compile_elements(elements, options=None, cache_dir=None, timeout=10, cffi_ex impl = _compile_objects(decl, elements, names, module_name, p, cache_dir, cffi_extra_compile_args, cffi_verbose, cffi_debug, cffi_libraries) - except Exception: - # remove c file so that it will not timeout next time - c_filename = cache_dir.joinpath(module_name + ".c") - os.replace(c_filename, c_filename.with_suffix(".c.failed")) - raise + except Exception as e: + try: + # remove c file so that it will not timeout next time + c_filename = cache_dir.joinpath(module_name + ".c") + os.replace(c_filename, c_filename.with_suffix(".c.failed")) + except Exception: + pass + raise e objects, module = _load_objects(cache_dir, module_name, names) # Pair up elements with dofmaps @@ -185,11 +188,14 @@ def compile_forms(forms, options=None, cache_dir=None, timeout=10, cffi_extra_co impl = _compile_objects(decl, forms, form_names, module_name, p, cache_dir, cffi_extra_compile_args, cffi_verbose, cffi_debug, cffi_libraries) - except Exception: - # remove c file so that it will not timeout next time - c_filename = cache_dir.joinpath(module_name + ".c") - os.replace(c_filename, c_filename.with_suffix(".c.failed")) - raise + except Exception as e: + try: + # remove c file so that it will not timeout next time + c_filename = cache_dir.joinpath(module_name + ".c") + os.replace(c_filename, c_filename.with_suffix(".c.failed")) + except Exception: + pass + raise e obj, module = _load_objects(cache_dir, module_name, form_names) return obj, module, (decl, impl) @@ -230,11 +236,14 @@ def compile_expressions(expressions, options=None, cache_dir=None, timeout=10, c impl = _compile_objects(decl, expressions, expr_names, module_name, p, cache_dir, cffi_extra_compile_args, cffi_verbose, cffi_debug, cffi_libraries) - except Exception: - # remove c file so that it will not timeout next time - c_filename = cache_dir.joinpath(module_name + ".c") - os.replace(c_filename, c_filename.with_suffix(".c.failed")) - raise + except Exception as e: + try: + # remove c file so that it will not timeout next time + c_filename = cache_dir.joinpath(module_name + ".c") + os.replace(c_filename, c_filename.with_suffix(".c.failed")) + except Exception: + pass + raise e obj, module = _load_objects(cache_dir, module_name, expr_names) return obj, module, (decl, impl) diff --git a/ffcx/ir/representation.py b/ffcx/ir/representation.py index cfcfa3f0a..4dbeeed45 100644 --- a/ffcx/ir/representation.py +++ b/ffcx/ir/representation.py @@ -533,6 +533,8 @@ def _compute_form_ir(form_data, form_id, prefix, form_names, integral_names, ele fs = {} for function in form_data.original_form.arguments() + tuple(form_data.reduced_coefficients): name = object_names.get(id(function), str(function)) + if not str(name).isidentifier(): + raise ValueError(f"Function name \"{name}\" must be a valid object identifier.") el = convert_element(convert_element(function.ufl_function_space().ufl_element())) cmap = function.ufl_function_space().ufl_domain().ufl_coordinate_element() # Default point spacing for CoordinateElement is equispaced @@ -642,6 +644,8 @@ def _compute_expression_ir(expression, index, prefix, analysis, options, visuali fs = {} for function in tuple(original_coefficients) + tuple(arguments): name = object_names.get(id(function), str(function)) + if not str(name).isidentifier(): + raise ValueError(f"Function name \"{name}\" must be a valid object identifier.") el = convert_element(function.ufl_function_space().ufl_element()) cmap = convert_element(function.ufl_function_space().ufl_domain().ufl_coordinate_element()) family = cmap.family() diff --git a/test/test_jit_forms.py b/test/test_jit_forms.py index 3d7b6fda7..2d15de8c0 100644 --- a/test/test_jit_forms.py +++ b/test/test_jit_forms.py @@ -721,3 +721,26 @@ def test_complex_operations(compile_args): assert np.allclose(J_2, expected_result) assert np.allclose(J_1, J_2) + + +def test_invalid_function_name(compile_args): + # Monkey patch to force invalid name + old_str = ufl.Coefficient.__str__ + ufl.Coefficient.__str__ = lambda self: "invalid function name" + + V = ufl.FiniteElement("Lagrange", ufl.triangle, 1) + u = ufl.Coefficient(V) + a = ufl.inner(u, u) * ufl.dx + + forms = [a] + + try: + compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms( + forms, cffi_extra_compile_args=compile_args) + except ValueError: + pass + except Exception: + raise RuntimeError("Compilation should fail with ValueError.") + + # Revert monkey patch for other tests + ufl.Coefficient.__str__ = old_str From a55fca9abd742d3be19ee60af4f39ac84adae72d Mon Sep 17 00:00:00 2001 From: Michal Habera Date: Thu, 2 Feb 2023 12:19:53 +0100 Subject: [PATCH 02/44] Tweak root handlers (#554) --- ffcx/codegeneration/jit.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/ffcx/codegeneration/jit.py b/ffcx/codegeneration/jit.py index 2b8af2bac..4867b0080 100644 --- a/ffcx/codegeneration/jit.py +++ b/ffcx/codegeneration/jit.py @@ -21,6 +21,7 @@ import ffcx.naming logger = logging.getLogger("ffcx") +root_logger = logging.getLogger() # Get declarations directly from ufcx.h file_dir = os.path.dirname(os.path.abspath(__file__)) @@ -275,6 +276,10 @@ def _compile_objects(decl, ufl_objects, object_names, module_name, options, cach t0 = time.time() f = io.StringIO() + # Temporarily set root logger handlers to string buffer only + # since CFFI logs into root logger + old_handlers = root_logger.handlers.copy() + root_logger.handlers = [logging.StreamHandler(f)] with redirect_stdout(f): ffibuilder.compile(tmpdir=cache_dir, verbose=True, debug=cffi_debug) s = f.getvalue() @@ -290,6 +295,10 @@ def _compile_objects(decl, ufl_objects, object_names, module_name, options, cach fd.write(s) fd.close() + # Copy back the original handlers (in case someone is logging into + # root logger and has custom handlers) + root_logger.handlers = old_handlers + return code_body From 8784ba0589102baa1302bb8a4a06beae9d4c044e Mon Sep 17 00:00:00 2001 From: "Garth N. Wells" Date: Mon, 20 Feb 2023 17:45:40 +0000 Subject: [PATCH 03/44] Bump versions (#555) --- ChangeLog.rst | 9 ++++++--- cmake/CMakeLists.txt | 2 +- setup.cfg | 4 ++-- 3 files changed, 9 insertions(+), 6 deletions(-) diff --git a/ChangeLog.rst b/ChangeLog.rst index cb6bb4902..539d138d8 100644 --- a/ChangeLog.rst +++ b/ChangeLog.rst @@ -1,18 +1,21 @@ Changelog ========= +0.6.0 +----- +See https://github.com/FEniCS/ffcx/compare/v0.5.0...v0.6.0 0.5.0 ----- -See: https://github.com/FEniCS/ffcx/compare/v0.5.0...v0.4.0 for details +See: https://github.com/FEniCS/ffcx/compare/v0.5.0...v0.4.0 0.4.0 ----- -See: https://github.com/FEniCS/ffcx/compare/v0.4.0...v0.3.0 for details +See: https://github.com/FEniCS/ffcx/compare/v0.4.0...v0.3.0 0.3.0 ----- -See: https://github.com/FEniCS/ffcx/compare/v0.3.0...v0.2.0 for details +See: https://github.com/FEniCS/ffcx/compare/v0.3.0...v0.2.0 0.2.0 ----- diff --git a/cmake/CMakeLists.txt b/cmake/CMakeLists.txt index 7392614e9..16ebfce92 100644 --- a/cmake/CMakeLists.txt +++ b/cmake/CMakeLists.txt @@ -1,6 +1,6 @@ cmake_minimum_required(VERSION 3.19) -project(ufcx VERSION 0.6.0 DESCRIPTION "UFCx interface header for finite element kernels" +project(ufcx VERSION 0.7.0 DESCRIPTION "UFCx interface header for finite element kernels" LANGUAGES C HOMEPAGE_URL https://github.com/fenics/ffcx) include(GNUInstallDirs) diff --git a/setup.cfg b/setup.cfg index 4d79c8a7f..ae02f8f23 100644 --- a/setup.cfg +++ b/setup.cfg @@ -2,7 +2,7 @@ # future [metadata] name = fenics-ffcx -version = 0.6.0.dev0 +version = 0.7.0.dev0 author = FEniCS Project Contributors email = fenics-dev@googlegroups.com maintainer = FEniCS Project Steering Council @@ -46,7 +46,7 @@ install_requires = numpy cffi setuptools - fenics-basix >= 0.6.0.dev0, <0.7.0 + fenics-basix >= 0.7.0.dev0, <0.8.0 fenics-ufl >= 2022.3.0.dev0, <2022.4.0 [options.extras_require] From 82a7d120119d605245b6d3e31420225923e80c95 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B8rgen=20Schartum=20Dokken?= Date: Wed, 22 Feb 2023 11:35:23 +0000 Subject: [PATCH 04/44] Update version number in `ufcx.h` --- ffcx/codegeneration/ufcx.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ffcx/codegeneration/ufcx.h b/ffcx/codegeneration/ufcx.h index b1b3d56f1..805998cf0 100644 --- a/ffcx/codegeneration/ufcx.h +++ b/ffcx/codegeneration/ufcx.h @@ -10,7 +10,7 @@ #pragma once #define UFCX_VERSION_MAJOR 0 -#define UFCX_VERSION_MINOR 6 +#define UFCX_VERSION_MINOR 7 #define UFCX_VERSION_MAINTENANCE 0 #define UFCX_VERSION_RELEASE 0 From 0033b8e49e74f7db1bc326b5e2056bca71b9919f Mon Sep 17 00:00:00 2001 From: "Garth N. Wells" Date: Fri, 24 Feb 2023 09:51:53 +0000 Subject: [PATCH 05/44] Fix ufl version in setup files (#558) * Fix UFL version in setup files * Version fix * Remove setup.py --- pyproject.toml | 2 +- setup.cfg | 4 ++-- setup.py | 15 --------------- 3 files changed, 3 insertions(+), 18 deletions(-) delete mode 100644 setup.py diff --git a/pyproject.toml b/pyproject.toml index d7133250b..b8ed95612 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,5 +1,5 @@ [build-system] -requires = ["setuptools>=58", "wheel"] +requires = ["setuptools>=62", "wheel"] build-backend = "setuptools.build_meta" diff --git a/setup.cfg b/setup.cfg index ae02f8f23..53f6a5f82 100644 --- a/setup.cfg +++ b/setup.cfg @@ -40,14 +40,14 @@ include_package_data = True zip_safe = False python_requires = >= 3.7 setup_requires = - setuptools >= 58 + setuptools >= 62 wheel install_requires = numpy cffi setuptools fenics-basix >= 0.7.0.dev0, <0.8.0 - fenics-ufl >= 2022.3.0.dev0, <2022.4.0 + fenics-ufl >= 2023.2.0.dev0, <2023.3.0 [options.extras_require] docs = sphinx; sphinx_rtd_theme diff --git a/setup.py b/setup.py deleted file mode 100644 index 2b7a7f26c..000000000 --- a/setup.py +++ /dev/null @@ -1,15 +0,0 @@ -import setuptools - -try: - import pip - from packaging import version - if version.parse(pip.__version__) < version.parse("21.3"): - # Issue with older version of pip https://github.com/pypa/pip/issues/7953 - import site - import sys - site.ENABLE_USER_SITE = "--user" in sys.argv[1:] - -except ImportError: - pass - -setuptools.setup() From 6f6e67140e840670e3f9ef48ddcc745525ccb85f Mon Sep 17 00:00:00 2001 From: "Garth N. Wells" Date: Sat, 25 Feb 2023 11:39:16 +0000 Subject: [PATCH 06/44] Remove import of private basix definitions (#559) * Remove import of private basix definitions * Consistency updates * Fixes * Improve readability * Small fix --- ffcx/analysis.py | 45 +++++++++++------------ ffcx/codegeneration/C/cnodes.py | 20 +++++----- ffcx/codegeneration/geometry.py | 12 +++--- ffcx/element_interface.py | 50 ++++++++++++------------- ffcx/ir/analysis/graph.py | 4 +- ffcx/ir/elementtables.py | 62 ++++++++++++++++--------------- ffcx/ir/integral.py | 6 +-- ffcx/ir/representation.py | 65 +++++++++++++++------------------ ffcx/ir/representationutils.py | 12 +++--- ffcx/naming.py | 10 ++--- test/test_elements.py | 10 ++--- 11 files changed, 143 insertions(+), 153 deletions(-) diff --git a/ffcx/analysis.py b/ffcx/analysis.py index ee4ddf854..de54c776c 100644 --- a/ffcx/analysis.py +++ b/ffcx/analysis.py @@ -13,14 +13,14 @@ import logging import typing +from warnings import warn -import numpy -import numpy.typing +import numpy as np +import numpy.typing as npt import basix.ufl_wrapper import ufl -from ffcx.element_interface import convert_element, QuadratureElement -from warnings import warn +from ffcx.element_interface import QuadratureElement, convert_element logger = logging.getLogger("ffcx") @@ -32,7 +32,7 @@ class UFLData(typing.NamedTuple): element_numbers: typing.Dict[basix.ufl_wrapper._BasixElementBase, int] unique_coordinate_elements: typing.List[basix.ufl_wrapper._BasixElementBase] # List of unique coordinate elements # List of ufl Expressions as tuples (expression, points, original_expression) - expressions: typing.List[typing.Tuple[ufl.core.expr.Expr, numpy.typing.NDArray[numpy.float64], ufl.core.expr.Expr]] + expressions: typing.List[typing.Tuple[ufl.core.expr.Expr, npt.NDArray[np.float64], ufl.core.expr.Expr]] def analyze_ufl_objects(ufl_objects: typing.List, options: typing.Dict) -> UFLData: @@ -55,7 +55,9 @@ def analyze_ufl_objects(ufl_objects: typing.List, options: typing.Dict) -> UFLDa unique_coordinate_elements Unique coordinate elements across all forms and expressions expressions - List of all expressions after post-processing, with its evaluation points and the original expression + List of all expressions after post-processing, with its + evaluation points and the original expression + """ logger.info(79 * "*") logger.info("Compiler stage 1: Analyzing UFL objects") @@ -78,7 +80,7 @@ def analyze_ufl_objects(ufl_objects: typing.List, options: typing.Dict) -> UFLDa coordinate_elements.append(convert_element(ufl_object.ufl_coordinate_element())) elif isinstance(ufl_object[0], ufl.core.expr.Expr): original_expression = ufl_object[0] - points = numpy.asarray(ufl_object[1]) + points = np.asarray(ufl_object[1]) expressions.append((original_expression, points)) else: raise TypeError("UFL objects not recognised.") @@ -130,20 +132,17 @@ def _analyze_expression(expression: ufl.core.expr.Expr, options: typing.Dict): def _analyze_form(form: ufl.form.Form, options: typing.Dict) -> ufl.algorithms.formdata.FormData: """Analyzes UFL form and attaches metadata. - Options - ---------- - form - options + Args: + form: forms + options: options - Returns - ------- - form_data - Form data computed by UFL with metadata attached + Returns: + Form data computed by UFL with metadata attached - Note - ---- - The main workload of this function is extraction of unique/default metadata - from options, integral metadata or inherited from UFL - (in case of quadrature degree) + Note: + The main workload of this function is extraction of + unique/default metadata from options, integral metadata or + inherited from UFL (in case of quadrature degree). """ if form.empty(): @@ -179,8 +178,8 @@ def _analyze_form(form: ufl.form.Form, options: typing.Dict) -> ufl.algorithms.f if custom_q is None: custom_q = e._points, e._weights else: - assert numpy.allclose(e._points, custom_q[0]) - assert numpy.allclose(e._weights, custom_q[1]) + assert np.allclose(e._points, custom_q[0]) + assert np.allclose(e._weights, custom_q[1]) # Determine unique quadrature degree, quadrature scheme and # precision per each integral data @@ -203,7 +202,7 @@ def _analyze_form(form: ufl.form.Form, options: typing.Dict) -> ufl.algorithms.f p = precisions.pop() elif len(precisions) == 0: # Default precision - p = numpy.finfo("double").precision + 1 # == 16 + p = np.finfo("double").precision + 1 # == 16 else: raise RuntimeError("Only one precision allowed within integrals grouped by subdomain.") @@ -217,7 +216,7 @@ def _analyze_form(form: ufl.form.Form, options: typing.Dict) -> ufl.algorithms.f if custom_q is None: # Extract quadrature degree qd_metadata = integral.metadata().get("quadrature_degree", qd_default) - pd_estimated = numpy.max(integral.metadata()["estimated_polynomial_degree"]) + pd_estimated = np.max(integral.metadata()["estimated_polynomial_degree"]) if qd_metadata != qd_default: qd = qd_metadata else: diff --git a/ffcx/codegeneration/C/cnodes.py b/ffcx/codegeneration/C/cnodes.py index 792b0b66a..d967239e0 100644 --- a/ffcx/codegeneration/C/cnodes.py +++ b/ffcx/codegeneration/C/cnodes.py @@ -7,7 +7,7 @@ import logging import numbers -import numpy +import numpy as np from ffcx.codegeneration.C.format_lines import Indented, format_indented_lines from ffcx.codegeneration.C.format_value import (format_float, format_int, @@ -265,7 +265,7 @@ class LiteralFloat(CExprLiteral): precedence = PRECEDENCE.LITERAL def __init__(self, value): - assert isinstance(value, (float, complex, int, numpy.number)) + assert isinstance(value, (float, complex, int, np.number)) self.value = value def ce_format(self, precision=None): @@ -293,7 +293,7 @@ class LiteralInt(CExprLiteral): precedence = PRECEDENCE.LITERAL def __init__(self, value): - assert isinstance(value, (int, numpy.number)) + assert isinstance(value, (int, np.number)) self.value = value def ce_format(self, precision=None): @@ -915,7 +915,7 @@ def _is_zero_valued(values): elif isinstance(values, (numbers.Number, LiteralFloat)): return float(values) == 0.0 else: - return numpy.count_nonzero(values) == 0 + return np.count_nonzero(values) == 0 def as_cexpr(node): @@ -1264,7 +1264,7 @@ def formatter(x, p): return str(x) tokens = ["{ "] - if numpy.product(values.shape) > 0: + if np.product(values.shape) > 0: sep = ", " fvalues = [formatter(v, precision) for v in values] for v in fvalues[:-1]: @@ -1295,12 +1295,12 @@ def build_initializer_lists(values, sizes, level, formatter, padlen=0, precision def formatter(x, p): return str(x) - values = numpy.asarray(values) - assert numpy.product(values.shape) == numpy.product(sizes) + values = np.asarray(values) + assert np.product(values.shape) == np.product(sizes) assert len(sizes) > 0 assert len(values.shape) > 0 assert len(sizes) == len(values.shape) - assert numpy.all(values.shape == sizes) + assert np.all(values.shape == sizes) r = len(sizes) assert r > 0 @@ -1362,7 +1362,7 @@ def __init__(self, typename, symbol, sizes=None, values=None, padlen=0): # NB! No type checking, assuming nested lists of literal values. Not applying as_cexpr. if isinstance(values, (list, tuple)): - self.values = numpy.asarray(values) + self.values = np.asarray(values) else: self.values = values @@ -1397,7 +1397,7 @@ def cs_format(self, precision=None): formatter = format_float elif self.values.dtype.kind == "i": formatter = format_int - elif self.values.dtype == numpy.bool_: + elif self.values.dtype == np.bool_: def format_bool(x, precision=None): return "true" if x is True else "false" formatter = format_bool diff --git a/ffcx/codegeneration/geometry.py b/ffcx/codegeneration/geometry.py index 090f0bb81..2df2bcd93 100644 --- a/ffcx/codegeneration/geometry.py +++ b/ffcx/codegeneration/geometry.py @@ -4,7 +4,7 @@ # # SPDX-License-Identifier: LGPL-3.0-or-later -import numpy +import numpy as np import basix @@ -47,7 +47,7 @@ def facet_edge_vertices(L, tablename, cellname): else: raise ValueError("Only triangular and quadrilateral faces supported.") - out = numpy.array(edge_vertices, dtype=int) + out = np.array(edge_vertices, dtype=int) return L.ArrayDecl("static const unsigned int", f"{cellname}_{tablename}", out.shape, out) @@ -67,7 +67,7 @@ def reference_facet_volume(L, tablename, cellname, type: str): celltype = getattr(basix.CellType, cellname) volumes = basix.cell.facet_reference_volumes(celltype) for i in volumes[1:]: - if not numpy.isclose(i, volumes[0]): + if not np.isclose(i, volumes[0]): raise ValueError("Reference facet volume not supported for this cell type.") return L.VariableDecl(f"static const {type}", f"{cellname}_{tablename}", volumes[0]) @@ -77,7 +77,7 @@ def reference_edge_vectors(L, tablename, cellname, type: str): topology = basix.topology(celltype) geometry = basix.geometry(celltype) edge_vectors = [geometry[j] - geometry[i] for i, j in topology[1]] - out = numpy.array(edge_vectors[cellname]) + out = np.array(edge_vectors[cellname]) return L.ArrayDecl(f"static const {type}", f"{cellname}_{tablename}", out.shape, out) @@ -100,7 +100,7 @@ def facet_reference_edge_vectors(L, tablename, cellname, type: str): else: raise ValueError("Only triangular and quadrilateral faces supported.") - out = numpy.array(edge_vectors) + out = np.array(edge_vectors) return L.ArrayDecl(f"static const {type}", f"{cellname}_{tablename}", out.shape, out) @@ -113,4 +113,4 @@ def reference_facet_normals(L, tablename, cellname, type: str): def facet_orientation(L, tablename, cellname, type: str): celltype = getattr(basix.CellType, cellname) out = basix.cell.facet_orientations(celltype) - return L.ArrayDecl(f"static const {type}", f"{cellname}_{tablename}", out.shape, out) + return L.ArrayDecl(f"static const {type}", f"{cellname}_{tablename}", len(out), out) diff --git a/ffcx/element_interface.py b/ffcx/element_interface.py index ac1bd91f2..d48241264 100644 --- a/ffcx/element_interface.py +++ b/ffcx/element_interface.py @@ -11,11 +11,12 @@ import warnings from functools import lru_cache -import numpy import basix import basix.ufl_wrapper import ufl +import numpy as np +import numpy.typing as npt def convert_element(element: ufl.finiteelement.FiniteElementBase) -> basix.ufl_wrapper._BasixElementBase: @@ -65,11 +66,11 @@ def basix_index(indices: typing.Tuple[int]) -> int: return basix.index(*indices) -def create_quadrature(cellname, degree, rule) -> typing.Tuple[numpy.typing.NDArray[numpy.float64], - numpy.typing.NDArray[numpy.float64]]: +def create_quadrature(cellname, degree, rule) -> typing.Tuple[npt.NDArray[np.float64], + npt.NDArray[np.float64]]: """Create a quadrature rule.""" if cellname == "vertex": - return (numpy.ones((1, 0), dtype=numpy.float64), numpy.ones(1, dtype=numpy.float64)) + return (np.ones((1, 0), dtype=np.float64), np.ones(1, dtype=np.float64)) quadrature = basix.make_quadrature( basix.quadrature.string_to_type(rule), basix.cell.string_to_type(cellname), degree) @@ -85,33 +86,31 @@ def create_quadrature(cellname, degree, rule) -> typing.Tuple[numpy.typing.NDArr return quadrature -def reference_cell_vertices(cellname: str) -> numpy.typing.NDArray[numpy.float64]: +def reference_cell_vertices(cellname: str) -> npt.NDArray[np.float64]: """Get the vertices of a reference cell.""" return basix.geometry(basix.cell.string_to_type(cellname)) -def map_facet_points(points: numpy.typing.NDArray[numpy.float64], facet: int, - cellname: str) -> numpy.typing.NDArray[numpy.float64]: +def map_facet_points(points: npt.NDArray[np.float64], facet: int, + cellname: str) -> npt.NDArray[np.float64]: """Map points from a reference facet to a physical facet.""" geom = basix.geometry(basix.cell.string_to_type(cellname)) facet_vertices = [geom[i] for i in basix.topology(basix.cell.string_to_type(cellname))[-2][facet]] - return numpy.asarray([facet_vertices[0] + sum((i - facet_vertices[0]) * j for i, j in zip(facet_vertices[1:], p)) - for p in points], dtype=numpy.float64) + return np.asarray([facet_vertices[0] + sum((i - facet_vertices[0]) * j for i, j in zip(facet_vertices[1:], p)) + for p in points], dtype=np.float64) class QuadratureElement(basix.ufl_wrapper._BasixElementBase): """A quadrature element.""" - _points: basix.ufl_wrapper._nda_f64 - _weights: basix.ufl_wrapper._nda_f64 + _points: npt.NDArray[np.float64] + _weights: npt.NDArray[np.float64] _entity_counts: typing.List[int] _cellname: str - def __init__( - self, cellname: str, value_shape: typing.Tuple[int, ...], scheme: typing.Optional[str] = None, - degree: typing.Optional[int] = None, points: typing.Optional[basix.ufl_wrapper._nda_f64] = None, - weights: typing.Optional[basix.ufl_wrapper._nda_f64] = None, mapname: str = "identity" - ): + def __init__(self, cellname: str, value_shape: typing.Tuple[int, ...], scheme: typing.Optional[str] = None, + degree: typing.Optional[int] = None, points: typing.Optional[npt.NDArray[np.float64]] = None, + weights: typing.Optional[npt.NDArray[np.float64]] = None, mapname: str = "identity"): """Initialise the element.""" if scheme is not None: assert degree is not None @@ -140,15 +139,13 @@ def basix_sobolev_space(self): def __eq__(self, other) -> bool: """Check if two elements are equal.""" - return isinstance(other, QuadratureElement) and numpy.allclose(self._points, other._points) + return isinstance(other, QuadratureElement) and np.allclose(self._points, other._points) def __hash__(self) -> int: """Return a hash.""" return super().__hash__() - def tabulate( - self, nderivs: int, points: basix.ufl_wrapper._nda_f64 - ) -> basix.ufl_wrapper._nda_f64: + def tabulate(self, nderivs: int, points: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: """Tabulate the basis functions of the element. Args: @@ -163,7 +160,7 @@ def tabulate( if points.shape != self._points.shape: raise ValueError("Mismatch of tabulation points and element points.") - tables = numpy.asarray([numpy.eye(points.shape[0], points.shape[0])]) + tables = np.asarray([np.eye(points.shape[0], points.shape[0])]) return tables def get_component_element(self, flat_component: int) -> typing.Tuple[basix.ufl_wrapper._BasixElementBase, int, int]: @@ -231,7 +228,7 @@ def reference_topology(self) -> typing.List[typing.List[typing.List[int]]]: raise NotImplementedError() @property - def reference_geometry(self) -> basix.ufl_wrapper._nda_f64: + def reference_geometry(self) -> npt.NDArray[np.float64]: """Geometry of the reference element.""" raise NotImplementedError() @@ -305,9 +302,7 @@ def __hash__(self) -> int: """Return a hash.""" return super().__hash__() - def tabulate( - self, nderivs: int, points: basix.ufl_wrapper._nda_f64 - ) -> basix.ufl_wrapper._nda_f64: + def tabulate(self, nderivs: int, points: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: """Tabulate the basis functions of the element. Args: @@ -317,7 +312,7 @@ def tabulate( Returns: Tabulated basis functions """ - out = numpy.zeros((nderivs + 1, len(points), 1)) + out = np.zeros((nderivs + 1, len(points), 1)) out[0, :] = 1. return out @@ -329,6 +324,7 @@ def get_component_element(self, flat_component: int) -> typing.Tuple[basix.ufl_w Returns: component element, offset of the component, stride of the component + """ assert flat_component < self.value_size return self, 0, 1 @@ -387,7 +383,7 @@ def reference_topology(self) -> typing.List[typing.List[typing.List[int]]]: raise NotImplementedError() @property - def reference_geometry(self) -> basix.ufl_wrapper._nda_f64: + def reference_geometry(self) -> npt.NDArray[np.float64]: """Geometry of the reference element.""" raise NotImplementedError() diff --git a/ffcx/ir/analysis/graph.py b/ffcx/ir/analysis/graph.py index 7f7dcdac4..d643e9986 100644 --- a/ffcx/ir/analysis/graph.py +++ b/ffcx/ir/analysis/graph.py @@ -7,7 +7,7 @@ import logging -import numpy +import numpy as np import ufl from ffcx.ir.analysis.modified_terminals import is_modified_terminal @@ -126,7 +126,7 @@ def rebuild_with_scalar_subexpressions(G): total_unique_symbols = value_numberer.symbol_count # Array to store the scalar subexpression in for each symbol - W = numpy.empty(total_unique_symbols, dtype=object) + W = np.empty(total_unique_symbols, dtype=object) # Iterate over each graph node in order for i, v in G.nodes.items(): diff --git a/ffcx/ir/elementtables.py b/ffcx/ir/elementtables.py index 8f7d90126..2cac930b4 100644 --- a/ffcx/ir/elementtables.py +++ b/ffcx/ir/elementtables.py @@ -8,18 +8,20 @@ import logging import typing -import numpy +import numpy as np +import numpy.typing as npt import ufl import ufl.utils.derivativetuples -from ffcx.element_interface import basix_index, convert_element, QuadratureElement +from ffcx.element_interface import (QuadratureElement, basix_index, + convert_element) from ffcx.ir.representationutils import (create_quadrature_points_and_weights, integral_type_to_entity_dim, map_integral_points) logger = logging.getLogger("ffcx") -# Using same defaults as numpy.allclose +# Using same defaults as np.allclose default_rtol = 1e-6 default_atol = 1e-9 @@ -36,7 +38,7 @@ class ModifiedTerminalElement(typing.NamedTuple): class UniqueTableReferenceT(typing.NamedTuple): name: str - values: numpy.typing.NDArray[numpy.float64] + values: npt.NDArray[np.float64] offset: int block_size: int ttype: str @@ -46,12 +48,12 @@ class UniqueTableReferenceT(typing.NamedTuple): def equal_tables(a, b, rtol=default_rtol, atol=default_atol): - a = numpy.asarray(a) - b = numpy.asarray(b) + a = np.asarray(a) + b = np.asarray(b) if a.shape != b.shape: return False else: - return numpy.allclose(a, b, rtol=rtol, atol=atol) + return np.allclose(a, b, rtol=rtol, atol=atol) def clamp_table_small_numbers(table, @@ -60,9 +62,9 @@ def clamp_table_small_numbers(table, numbers=(-1.0, 0.0, 1.0)): """Clamp almost 0,1,-1 values to integers. Returns new table.""" # Get shape of table and number of columns, defined as the last axis - table = numpy.asarray(table) + table = np.asarray(table) for n in numbers: - table[numpy.where(numpy.isclose(table, n, rtol=rtol, atol=atol))] = n + table[np.where(np.isclose(table, n, rtol=rtol, atol=atol))] = n return table @@ -130,8 +132,8 @@ def get_ffcx_table_values(points, cell, integral_type, element, avg, entitytype, wsum = sum(weights) for entity, tbl in enumerate(component_tables): num_dofs = tbl.shape[1] - tbl = numpy.dot(tbl, weights) / wsum - tbl = numpy.reshape(tbl, (1, num_dofs)) + tbl = np.dot(tbl, weights) / wsum + tbl = np.reshape(tbl, (1, num_dofs)) component_tables[entity] = tbl # Loop over entities and fill table blockwise (each block = points x dofs) @@ -139,7 +141,7 @@ def get_ffcx_table_values(points, cell, integral_type, element, avg, entitytype, assert len(component_tables) == num_entities num_points, num_dofs = component_tables[0].shape shape = (1, num_entities, num_points, num_dofs) - res = numpy.zeros(shape) + res = np.zeros(shape) for entity in range(num_entities): res[:, entity, :, :] = component_tables[entity] @@ -236,8 +238,8 @@ def get_modified_terminal_element(mt) -> typing.Optional[ModifiedTerminalElement def permute_quadrature_interval(points, reflections=0): output = points.copy() for p in output: - assert len(p) < 2 or numpy.isclose(p[1], 0) - assert len(p) < 3 or numpy.isclose(p[2], 0) + assert len(p) < 2 or np.isclose(p[1], 0) + assert len(p) < 3 or np.isclose(p[2], 0) for i in range(reflections): for n, p in enumerate(output): output[n] = [1 - p[0]] @@ -247,7 +249,7 @@ def permute_quadrature_interval(points, reflections=0): def permute_quadrature_triangle(points, reflections=0, rotations=0): output = points.copy() for p in output: - assert len(p) < 3 or numpy.isclose(p[2], 0) + assert len(p) < 3 or np.isclose(p[2], 0) for i in range(rotations): for n, p in enumerate(output): output[n] = [p[1], 1 - p[0] - p[1]] @@ -260,7 +262,7 @@ def permute_quadrature_triangle(points, reflections=0, rotations=0): def permute_quadrature_quadrilateral(points, reflections=0, rotations=0): output = points.copy() for p in output: - assert len(p) < 3 or numpy.isclose(p[2], 0) + assert len(p) < 3 or np.isclose(p[2], 0) for i in range(rotations): for n, p in enumerate(output): output[n] = [p[1], 1 - p[0]] @@ -333,7 +335,7 @@ def build_optimized_tables(quadrature_rule, cell, integral_type, entitytype, integral_type, element, avg, entitytype, local_derivatives, flat_component)) t = new_table[0] - t['array'] = numpy.vstack([td['array'] for td in new_table]) + t['array'] = np.vstack([td['array'] for td in new_table]) elif tdim == 3: cell_type = cell.cellname() if cell_type == "tetrahedron": @@ -346,7 +348,7 @@ def build_optimized_tables(quadrature_rule, cell, integral_type, entitytype, cell, integral_type, element, avg, entitytype, local_derivatives, flat_component)) t = new_table[0] - t['array'] = numpy.vstack([td['array'] for td in new_table]) + t['array'] = np.vstack([td['array'] for td in new_table]) elif cell_type == "hexahedron": new_table = [] for rot in range(4): @@ -356,7 +358,7 @@ def build_optimized_tables(quadrature_rule, cell, integral_type, entitytype, quadrature_rule.points, ref, rot), cell, integral_type, element, avg, entitytype, local_derivatives, flat_component)) t = new_table[0] - t['array'] = numpy.vstack([td['array'] for td in new_table]) + t['array'] = np.vstack([td['array'] for td in new_table]) else: t = get_ffcx_table_values(quadrature_rule.points, cell, integral_type, element, avg, entitytype, @@ -407,39 +409,39 @@ def build_optimized_tables(quadrature_rule, cell, integral_type, entitytype, def is_zeros_table(table, rtol=default_rtol, atol=default_atol): - return (numpy.product(table.shape) == 0 - or numpy.allclose(table, numpy.zeros(table.shape), rtol=rtol, atol=atol)) + return (np.product(table.shape) == 0 + or np.allclose(table, np.zeros(table.shape), rtol=rtol, atol=atol)) def is_ones_table(table, rtol=default_rtol, atol=default_atol): - return numpy.allclose(table, numpy.ones(table.shape), rtol=rtol, atol=atol) + return np.allclose(table, np.ones(table.shape), rtol=rtol, atol=atol) def is_quadrature_table(table, rtol=default_rtol, atol=default_atol): _, num_entities, num_points, num_dofs = table.shape - Id = numpy.eye(num_points) + Id = np.eye(num_points) return (num_points == num_dofs and all( - numpy.allclose(table[0, i, :, :], Id, rtol=rtol, atol=atol) for i in range(num_entities))) + np.allclose(table[0, i, :, :], Id, rtol=rtol, atol=atol) for i in range(num_entities))) def is_permuted_table(table, rtol=default_rtol, atol=default_atol): return not all( - numpy.allclose(table[0, :, :, :], - table[i, :, :, :], rtol=rtol, atol=atol) + np.allclose(table[0, :, :, :], + table[i, :, :, :], rtol=rtol, atol=atol) for i in range(1, table.shape[0])) def is_piecewise_table(table, rtol=default_rtol, atol=default_atol): return all( - numpy.allclose(table[0, :, 0, :], - table[0, :, i, :], rtol=rtol, atol=atol) + np.allclose(table[0, :, 0, :], + table[0, :, i, :], rtol=rtol, atol=atol) for i in range(1, table.shape[2])) def is_uniform_table(table, rtol=default_rtol, atol=default_atol): return all( - numpy.allclose(table[0, 0, :, :], - table[0, i, :, :], rtol=rtol, atol=atol) + np.allclose(table[0, 0, :, :], + table[0, i, :, :], rtol=rtol, atol=atol) for i in range(1, table.shape[1])) diff --git a/ffcx/ir/integral.py b/ffcx/ir/integral.py index 7db59c4e3..b8f26cdfa 100644 --- a/ffcx/ir/integral.py +++ b/ffcx/ir/integral.py @@ -10,7 +10,7 @@ import logging import typing -import numpy +import numpy as np import ufl from ffcx.ir.analysis.factorization import compute_argument_factorization @@ -96,7 +96,7 @@ def compute_integral_ir(cell, integral_type, entitytype, integrands, argument_sh tables = {v.name: v.values for v in mt_table_reference.values()} S_targets = [i for i, v in S.nodes.items() if v.get('target', False)] - num_components = numpy.int32(numpy.prod(expression.ufl_shape)) + num_components = np.int32(np.prod(expression.ufl_shape)) if 'zeros' in table_types.values(): # If there are any 'zero' tables, replace symbolically and rebuild graph @@ -118,7 +118,7 @@ def compute_integral_ir(cell, integral_type, entitytype, integrands, argument_sh for comp in S.nodes[target]["component"]: assert expressions[comp] is None expressions[comp] = S.nodes[target]["expression"] - expression = ufl.as_tensor(numpy.reshape(expressions, expression.ufl_shape)) + expression = ufl.as_tensor(np.reshape(expressions, expression.ufl_shape)) # Rebuild scalar list-based graph representation S = build_scalar_graph(expression) diff --git a/ffcx/ir/representation.py b/ffcx/ir/representation.py index 4dbeeed45..6b595d535 100644 --- a/ffcx/ir/representation.py +++ b/ffcx/ir/representation.py @@ -22,7 +22,8 @@ import typing import warnings -import numpy +import numpy as np +import numpy.typing as npt import basix import ufl @@ -59,9 +60,9 @@ class FormIR(typing.NamedTuple): class CustomElementIR(typing.NamedTuple): cell_type: basix.CellType value_shape: typing.Tuple[int, ...] - wcoeffs: numpy.typing.NDArray[numpy.float64] - x: typing.List[typing.List[numpy.typing.NDArray[numpy.float64]]] - M: typing.List[typing.List[numpy.typing.NDArray[numpy.float64]]] + wcoeffs: npt.NDArray[np.float64] + x: typing.List[typing.List[npt.NDArray[np.float64]]] + M: typing.List[typing.List[npt.NDArray[np.float64]]] map_type: basix.MapType sobolev_space: basix.SobolevSpace interpolation_nderivs: int @@ -128,7 +129,7 @@ class IntegralIR(typing.NamedTuple): original_constant_offsets: typing.Dict[ufl.Constant, int] options: dict cell_shape: str - unique_tables: typing.Dict[str, numpy.typing.NDArray[numpy.float64]] + unique_tables: typing.Dict[str, npt.NDArray[np.float64]] unique_table_types: typing.Dict[str, str] integrand: typing.Dict[QuadratureRule, dict] name: str @@ -141,7 +142,7 @@ class ExpressionIR(typing.NamedTuple): name: str element_dimensions: typing.Dict[ufl.FiniteElementBase, int] options: dict - unique_tables: typing.Dict[str, numpy.typing.NDArray[numpy.float64]] + unique_tables: typing.Dict[str, npt.NDArray[np.float64]] unique_table_types: typing.Dict[str, str] integrand: typing.Dict[QuadratureRule, dict] coefficient_numbering: typing.Dict[ufl.Coefficient, int] @@ -151,7 +152,7 @@ class ExpressionIR(typing.NamedTuple): tensor_shape: typing.List[int] expression_shape: typing.List[int] original_constant_offsets: typing.Dict[ufl.Constant, int] - points: numpy.typing.NDArray[numpy.float64] + points: npt.NDArray[np.float64] coefficient_names: typing.List[str] constant_names: typing.List[str] needs_facet_permutations: bool @@ -187,28 +188,20 @@ def compute_ir(analysis: UFLData, object_names, prefix, options, visualise): integral_names[(fd_index, itg_index)] = naming.integral_name(fd.original_form, itg_data.integral_type, fd_index, itg_data.subdomain_id, prefix) - ir_elements = [ - _compute_element_ir(e, analysis.element_numbers, finite_element_names) - for e in analysis.unique_elements - ] + ir_elements = [_compute_element_ir(e, analysis.element_numbers, finite_element_names) + for e in analysis.unique_elements] - ir_dofmaps = [ - _compute_dofmap_ir(e, analysis.element_numbers, dofmap_names) - for e in analysis.unique_elements - ] + ir_dofmaps = [_compute_dofmap_ir(e, analysis.element_numbers, dofmap_names) + for e in analysis.unique_elements] - irs = [ - _compute_integral_ir(fd, i, analysis.element_numbers, integral_names, finite_element_names, - options, visualise) - for (i, fd) in enumerate(analysis.form_data) - ] + irs = [_compute_integral_ir(fd, i, analysis.element_numbers, integral_names, finite_element_names, + options, visualise) + for (i, fd) in enumerate(analysis.form_data)] ir_integrals = list(itertools.chain(*irs)) - ir_forms = [ - _compute_form_ir(fd, i, prefix, form_names, integral_names, analysis.element_numbers, finite_element_names, - dofmap_names, object_names) - for (i, fd) in enumerate(analysis.form_data) - ] + ir_forms = [_compute_form_ir(fd, i, prefix, form_names, integral_names, analysis.element_numbers, + finite_element_names, dofmap_names, object_names) + for (i, fd) in enumerate(analysis.form_data)] ir_expressions = [_compute_expression_ir(expr, i, prefix, analysis, options, visualise, object_names, finite_element_names, dofmap_names) @@ -415,22 +408,22 @@ def _compute_integral_ir(form_data, form_index, element_numbers, integral_names, "Explicitly selected vertex quadrature (degree 1), but requested degree is {}.". format(degree)) if cellname == "tetrahedron": - points, weights = (numpy.array([[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], - [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]), - numpy.array([1.0 / 24.0, 1.0 / 24.0, 1.0 / 24.0, 1.0 / 24.0])) + points, weights = (np.array([[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], + [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]), + np.array([1.0 / 24.0, 1.0 / 24.0, 1.0 / 24.0, 1.0 / 24.0])) elif cellname == "triangle": - points, weights = (numpy.array([[0.0, 0.0], [1.0, 0.0], [0.0, 1.0]]), - numpy.array([1.0 / 6.0, 1.0 / 6.0, 1.0 / 6.0])) + points, weights = (np.array([[0.0, 0.0], [1.0, 0.0], [0.0, 1.0]]), + np.array([1.0 / 6.0, 1.0 / 6.0, 1.0 / 6.0])) elif cellname == "interval": # Trapezoidal rule - return (numpy.array([[0.0], [1.0]]), numpy.array([1.0 / 2.0, 1.0 / 2.0])) + return (np.array([[0.0], [1.0]]), np.array([1.0 / 2.0, 1.0 / 2.0])) else: degree = md["quadrature_degree"] points, weights = create_quadrature_points_and_weights( integral_type, cell, degree, scheme) - points = numpy.asarray(points) - weights = numpy.asarray(weights) + points = np.asarray(points) + weights = np.asarray(weights) rule = QuadratureRule(points, weights) @@ -473,7 +466,7 @@ def _compute_integral_ir(form_data, form_index, element_numbers, integral_names, _offset = 0 for constant in form_data.original_form.constants(): original_constant_offsets[constant] = _offset - _offset += numpy.product(constant.ufl_shape, dtype=int) + _offset += np.product(constant.ufl_shape, dtype=int) ir["original_constant_offsets"] = original_constant_offsets ir["precision"] = itg_data.metadata["precision"] @@ -681,13 +674,13 @@ def _compute_expression_ir(expression, index, prefix, analysis, options, visuali _offset = 0 for constant in ufl.algorithms.analysis.extract_constants(expression): original_constant_offsets[constant] = _offset - _offset += numpy.product(constant.ufl_shape, dtype=int) + _offset += np.product(constant.ufl_shape, dtype=int) ir["original_constant_offsets"] = original_constant_offsets ir["points"] = points - weights = numpy.array([1.0] * points.shape[0]) + weights = np.array([1.0] * points.shape[0]) rule = QuadratureRule(points, weights) integrands = {rule: expression} diff --git a/ffcx/ir/representationutils.py b/ffcx/ir/representationutils.py index 3eda83ac0..49a99d6c4 100644 --- a/ffcx/ir/representationutils.py +++ b/ffcx/ir/representationutils.py @@ -8,7 +8,7 @@ import hashlib import logging -import numpy +import numpy as np import ufl from ffcx.element_interface import (create_quadrature, map_facet_points, @@ -19,7 +19,7 @@ class QuadratureRule: def __init__(self, points, weights): - self.points = numpy.ascontiguousarray(points) # TODO: change basix to make this unnecessary + self.points = np.ascontiguousarray(points) # TODO: change basix to make this unnecessary self.weights = weights self._hash = None @@ -30,7 +30,7 @@ def __hash__(self): return self._hash def __eq__(self, other): - return numpy.allclose(self.points, other.points) and numpy.allclose(self.weights, other.weights) + return np.allclose(self.points, other.points) and np.allclose(self.weights, other.weights) def id(self): """Return unique deterministic identifier. @@ -87,11 +87,11 @@ def map_integral_points(points, integral_type, cell, entity): if entity_dim == tdim: assert points.shape[1] == tdim assert entity == 0 - return numpy.asarray(points) + return np.asarray(points) elif entity_dim == tdim - 1: assert points.shape[1] == tdim - 1 - return numpy.asarray(map_facet_points(points, entity, cell.cellname())) + return np.asarray(map_facet_points(points, entity, cell.cellname())) elif entity_dim == 0: - return numpy.asarray([reference_cell_vertices(cell.cellname())[entity]]) + return np.asarray([reference_cell_vertices(cell.cellname())[entity]]) else: raise RuntimeError(f"Can't map points from entity_dim={entity_dim}") diff --git a/ffcx/naming.py b/ffcx/naming.py index 5d802f817..a1c260e0f 100644 --- a/ffcx/naming.py +++ b/ffcx/naming.py @@ -7,8 +7,8 @@ import hashlib import typing -import numpy -import numpy.typing +import numpy as np +import numpy.typing as npt import ffcx import ufl @@ -17,13 +17,13 @@ def compute_signature(ufl_objects: typing.List[ - typing.Union[ufl.Form, - ufl.FiniteElementBase, - typing.Tuple[ufl.core.expr.Expr, numpy.typing.NDArray[numpy.float64]]]], tag: str) -> str: + typing.Union[ufl.Form, ufl.FiniteElementBase, + typing.Tuple[ufl.core.expr.Expr, npt.NDArray[np.float64]]]], tag: str) -> str: """Compute the signature hash. Based on the UFL type of the objects and an additional optional 'tag'. + """ object_signature = "" for ufl_object in ufl_objects: diff --git a/test/test_elements.py b/test/test_elements.py index 02625a0ae..eb8cb460b 100644 --- a/test/test_elements.py +++ b/test/test_elements.py @@ -20,7 +20,7 @@ "Unit tests for FFCx" -import numpy +import numpy as np import pytest from ffcx.element_interface import create_element @@ -43,8 +43,8 @@ def element_coords(cell): def random_point(shape): - w = numpy.random.random(len(shape)) - return sum([numpy.array(shape[i]) * w[i] for i in range(len(shape))]) / sum(w) + w = np.random.random(len(shape)) + return sum([np.array(shape[i]) * w[i] for i in range(len(shape))]) / sum(w) @pytest.mark.parametrize("degree, expected_dim", [(1, 3), (2, 6), (3, 10)]) @@ -192,7 +192,7 @@ def test_values(self, family, cell, degree, reference): basis = table[0] if sum(element.value_shape()) == 1: for i, value in enumerate(basis[0]): - assert numpy.isclose(value, reference[i](x)) + assert np.isclose(value, reference[i](x)) else: for i, ref in enumerate(reference): - assert numpy.allclose(basis[0][i::len(reference)], ref(x)) + assert np.allclose(basis[0][i::len(reference)], ref(x)) From 2b4b8e552f4f2006339e1857628d78f8038f7588 Mon Sep 17 00:00:00 2001 From: Chris Richardson Date: Fri, 3 Mar 2023 11:51:41 +0100 Subject: [PATCH 07/44] Use simplified basix interface (#560) * Adjustment for basix interface * flake8 --- test/test_jit_expression.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/test/test_jit_expression.py b/test/test_jit_expression.py index 99d406e0f..bd93bcb2f 100644 --- a/test/test_jit_expression.py +++ b/test/test_jit_expression.py @@ -175,14 +175,18 @@ def test_elimiate_zero_tables_tensor(compile_args): # Get vertices of cell # Coords storage XYZXYZXYZ - basix_c_e = basix.create_element(basix.ElementFamily.P, basix.cell.string_to_type(cell), 1, False) + basix_c_e = basix.create_element(basix.ElementFamily.P, + basix.cell.string_to_type(cell), 1, + basix.LagrangeVariant.unset, + basix.DPCVariant.unset, False) coords = basix_c_e.points # Using same basix element for coordinate element and coefficient coeff_points = basix_c_e.points # Compile expression at interpolation points of second order Lagrange space - b_el = basix.create_element(basix.ElementFamily.P, basix.cell.string_to_type(cell), 0, True) + b_el = basix.create_element(basix.ElementFamily.P, basix.cell.string_to_type(cell), + 0, basix.LagrangeVariant.unset, basix.DPCVariant.unset, True) points = b_el.points obj, module, code = ffcx.codegeneration.jit.compile_expressions( [(expr, points)], cffi_extra_compile_args=compile_args) From c1dc25e7e6ec2661743449a27906f203760c0627 Mon Sep 17 00:00:00 2001 From: Chris Richardson Date: Tue, 7 Mar 2023 14:18:09 +0000 Subject: [PATCH 08/44] Interface fixes (#561) * Interface fixes --- .github/workflows/dolfin-tests.yml | 3 --- demo/ExpressionInterpolation.py | 2 +- test/test_jit_expression.py | 5 ++--- 3 files changed, 3 insertions(+), 7 deletions(-) diff --git a/.github/workflows/dolfin-tests.yml b/.github/workflows/dolfin-tests.yml index 6b040023c..ba5a90df7 100644 --- a/.github/workflows/dolfin-tests.yml +++ b/.github/workflows/dolfin-tests.yml @@ -28,9 +28,6 @@ jobs: container: fenicsproject/test-env:nightly-openmpi env: - CC: clang - CXX: clang++ - PETSC_ARCH: linux-gnu-complex-32 OMPI_ALLOW_RUN_AS_ROOT: 1 OMPI_ALLOW_RUN_AS_ROOT_CONFIRM: 1 diff --git a/demo/ExpressionInterpolation.py b/demo/ExpressionInterpolation.py index 5507969c7..ecd8d90d9 100644 --- a/demo/ExpressionInterpolation.py +++ b/demo/ExpressionInterpolation.py @@ -55,7 +55,7 @@ # Get interpolation points for output space family = basix.finite_element.string_to_family("Lagrange", cell.cellname()) -b_element = basix.create_element(family, b_cell, 4, basix.LagrangeVariant.gll_warped, True) +b_element = basix.create_element(family, b_cell, 4, basix.LagrangeVariant.gll_warped, discontinuous=True) interpolation_points = b_element.points # Create expressions that can be used for interpolation diff --git a/test/test_jit_expression.py b/test/test_jit_expression.py index bd93bcb2f..1b3ce1623 100644 --- a/test/test_jit_expression.py +++ b/test/test_jit_expression.py @@ -177,8 +177,7 @@ def test_elimiate_zero_tables_tensor(compile_args): # Coords storage XYZXYZXYZ basix_c_e = basix.create_element(basix.ElementFamily.P, basix.cell.string_to_type(cell), 1, - basix.LagrangeVariant.unset, - basix.DPCVariant.unset, False) + discontinuous=False) coords = basix_c_e.points # Using same basix element for coordinate element and coefficient @@ -186,7 +185,7 @@ def test_elimiate_zero_tables_tensor(compile_args): # Compile expression at interpolation points of second order Lagrange space b_el = basix.create_element(basix.ElementFamily.P, basix.cell.string_to_type(cell), - 0, basix.LagrangeVariant.unset, basix.DPCVariant.unset, True) + 0, discontinuous=True) points = b_el.points obj, module, code = ffcx.codegeneration.jit.compile_expressions( [(expr, points)], cffi_extra_compile_args=compile_args) From eefe0c82bfb3850a8c27bd949b77c00ac8b5f6ac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B8rgen=20Schartum=20Dokken?= Date: Thu, 16 Mar 2023 15:50:07 +0000 Subject: [PATCH 09/44] Fix interval integrals with vertex rules (#562) * Fix interval integrals with vertex rules * Add simple test that fails on main --- ffcx/ir/representation.py | 2 +- test/test_jit_forms.py | 35 +++++++++++++++++++++++++++++++++++ 2 files changed, 36 insertions(+), 1 deletion(-) diff --git a/ffcx/ir/representation.py b/ffcx/ir/representation.py index 6b595d535..321d6378c 100644 --- a/ffcx/ir/representation.py +++ b/ffcx/ir/representation.py @@ -416,7 +416,7 @@ def _compute_integral_ir(form_data, form_index, element_numbers, integral_names, np.array([1.0 / 6.0, 1.0 / 6.0, 1.0 / 6.0])) elif cellname == "interval": # Trapezoidal rule - return (np.array([[0.0], [1.0]]), np.array([1.0 / 2.0, 1.0 / 2.0])) + points, weights = (np.array([[0.0], [1.0]]), np.array([1.0 / 2.0, 1.0 / 2.0])) else: degree = md["quadrature_degree"] points, weights = create_quadrature_points_and_weights( diff --git a/test/test_jit_forms.py b/test/test_jit_forms.py index 2d15de8c0..8a1644af5 100644 --- a/test/test_jit_forms.py +++ b/test/test_jit_forms.py @@ -744,3 +744,38 @@ def test_invalid_function_name(compile_args): # Revert monkey patch for other tests ufl.Coefficient.__str__ = old_str + + +def test_interval_vertex_quadrature(compile_args): + + cell = ufl.interval + c_el = ufl.VectorElement("Lagrange", cell, 1) + mesh = ufl.Mesh(c_el) + + x = ufl.SpatialCoordinate(mesh) + dx = ufl.Measure( + "dx", metadata={"quadrature_rule": "vertex"}) + b = x[0] * dx + + forms = [b] + compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms( + forms, cffi_extra_compile_args=compile_args) + + ffi = module.ffi + form0 = compiled_forms[0] + assert form0.num_integrals(module.lib.cell) == 1 + + default_integral = form0.integrals(module.lib.cell)[0] + J = np.zeros(1, dtype=np.float64) + a = np.pi + b = np.exp(1) + coords = np.array([a, 0.0, 0.0, + + b, 0.0, 0.0], dtype=np.float64) + + kernel = getattr(default_integral, "tabulate_tensor_float64") + kernel(ffi.cast('double *', J.ctypes.data), + ffi.NULL, + ffi.NULL, + ffi.cast('double *', coords.ctypes.data), ffi.NULL, ffi.NULL) + assert np.isclose(J[0], (0.5 * a + 0.5 * b) * np.abs(b - a)) From a21b30345c8615f13a810ebdcc6358336df70f64 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B8rgen=20Schartum=20Dokken?= Date: Sat, 18 Mar 2023 13:14:45 +0100 Subject: [PATCH 10/44] Add quads and hexes for vertex scheme (#564) * Add quads and hexes for vertex scheme * Flake8 * Apply suggestions from code review Co-authored-by: Matthew Scroggs --- ffcx/ir/representation.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/ffcx/ir/representation.py b/ffcx/ir/representation.py index 321d6378c..922d20ebe 100644 --- a/ffcx/ir/representation.py +++ b/ffcx/ir/representation.py @@ -417,6 +417,16 @@ def _compute_integral_ir(form_data, form_index, element_numbers, integral_names, elif cellname == "interval": # Trapezoidal rule points, weights = (np.array([[0.0], [1.0]]), np.array([1.0 / 2.0, 1.0 / 2.0])) + elif cellname == "quadrilateral": + points, weights = (np.array([[0., 0], [1., 0.], [0., 1.], [1., 1]]), + np.array([1. / 4., 1. / 4., 1. / 4., 1. / 4.])) + elif cellname == "hexahedron": + points, weights = (np.array([[0., 0., 0.], [1., 0., 0.], [0., 1., 0.], [1., 1., 0.], + [0., 0., 1.], [1., 0., 1.], [0., 1., 1.], [1., 1., 1.]]), + np.array([1. / 8., 1. / 8., 1. / 8., 1. / 8., + 1. / 8., 1. / 8., 1. / 8., 1. / 8.])) + else: + raise RuntimeError(f"Vertex scheme is not supported for cell: {cellname}") else: degree = md["quadrature_degree"] points, weights = create_quadrature_points_and_weights( From 879d42025f0b9bc8e61c3fdccabc067c3168bc10 Mon Sep 17 00:00:00 2001 From: Matthew Scroggs Date: Tue, 28 Mar 2023 11:52:54 +0100 Subject: [PATCH 11/44] Rename basix.ufl_wrapper -> basix.ufl (#563) * rename ufl_wrapper -> ufl * branches * deprecate directly created ufl elements * remove deprecated conversion from test * flake * enriched_element * update function name in tests * symmetry not symmetric * update tests and demos * flake * _BasixElementBase -> _ElementBase * a few more * use functions not constructors * shape= * tweaks * isinstance BlockedElement not blocked_element * flake * _ * more consistent imports * one more basix.ufl. * and another * basix main --- .github/workflows/dolfin-tests.yml | 1 + demo/BiharmonicHHJ.py | 13 +++---- demo/BiharmonicRegge.py | 15 ++++---- demo/CellGeometry.py | 6 ++-- demo/Components.py | 6 ++-- demo/Conditional.py | 7 ++-- demo/ExpressionInterpolation.py | 21 ++++++------ demo/FacetIntegrals.py | 7 ++-- demo/FacetRestrictionAD.py | 7 ++-- demo/HyperElasticity.py | 15 ++++---- demo/MassDG0.py | 6 ++-- demo/MassHcurl_2D_1.py | 5 +-- demo/MassHdiv_2D_1.py | 5 +-- demo/MathFunctions.py | 7 ++-- demo/MetaData.py | 8 ++--- demo/Mini.py | 14 ++++---- demo/MixedCoefficient.py | 14 ++++---- demo/MixedGradient.py | 10 +++--- demo/MixedPoissonDual.py | 16 ++++----- demo/Normals.py | 6 ++-- demo/Poisson1D.py | 6 ++-- demo/PoissonQuad.py | 9 ++--- demo/ProjectionManifold.py | 13 +++---- demo/ReactionDiffusion.py | 6 ++-- demo/SpatialCoordinates.py | 7 ++-- demo/StabilisedStokes.py | 11 +++--- demo/Symmetry.py | 6 ++-- demo/TraceElement.py | 5 +-- demo/VectorPoisson.py | 6 ++-- ffcx/analysis.py | 12 +++---- ffcx/codegeneration/access.py | 18 +++++----- ffcx/codegeneration/definitions.py | 4 +-- ffcx/element_interface.py | 55 ++++++++++++++++-------------- ffcx/ir/representation.py | 3 +- test/Poisson.py | 20 +++++------ test/test_add_mode.py | 7 ++-- test/test_blocked_elements.py | 8 ++--- test/test_cache.py | 4 +-- test/test_elements.py | 21 ++++++------ test/test_flops.py | 5 +-- test/test_jit_expression.py | 9 ++--- test/test_jit_forms.py | 55 ++++++++++++------------------ 42 files changed, 242 insertions(+), 237 deletions(-) diff --git a/.github/workflows/dolfin-tests.yml b/.github/workflows/dolfin-tests.yml index ba5a90df7..538490e3d 100644 --- a/.github/workflows/dolfin-tests.yml +++ b/.github/workflows/dolfin-tests.yml @@ -64,6 +64,7 @@ jobs: with: path: ./dolfinx repository: FEniCS/dolfinx + ref: main - name: Get DOLFINx source (specified branch/tag) if: github.event_name == 'workflow_dispatch' uses: actions/checkout@v3 diff --git a/demo/BiharmonicHHJ.py b/demo/BiharmonicHHJ.py index de007ec47..61ff3b164 100644 --- a/demo/BiharmonicHHJ.py +++ b/demo/BiharmonicHHJ.py @@ -3,16 +3,17 @@ # The bilinear form a(u, v) and linear form L(v) for # Biharmonic equation in Hellan-Herrmann-Johnson (HHJ) # formulation. -from ufl import (Coefficient, FacetNormal, FiniteElement, TestFunctions, - TrialFunctions, dot, dS, ds, dx, grad, inner, jump, triangle) +import basix.ufl +from ufl import (Coefficient, FacetNormal, TestFunctions, TrialFunctions, dot, + dS, ds, dx, grad, inner, jump, triangle) -HHJ = FiniteElement('HHJ', triangle, 2) -CG = FiniteElement('CG', triangle, 3) -mixed_element = HHJ * CG +HHJ = basix.ufl.element('HHJ', "triangle", 2) +P = basix.ufl.element('P', "triangle", 3) +mixed_element = basix.ufl.mixed_element([HHJ, P]) (sigma, u) = TrialFunctions(mixed_element) (tau, v) = TestFunctions(mixed_element) -f = Coefficient(CG) +f = Coefficient(P) def b(sigma, v): diff --git a/demo/BiharmonicRegge.py b/demo/BiharmonicRegge.py index 20f61087d..63b98cf72 100644 --- a/demo/BiharmonicRegge.py +++ b/demo/BiharmonicRegge.py @@ -2,17 +2,18 @@ # # The bilinear form a(u, v) and linear form L(v) for # Biharmonic equation in Regge formulation. -from ufl import (Coefficient, FacetNormal, FiniteElement, Identity, - TestFunctions, TrialFunctions, dot, dS, ds, dx, grad, inner, - jump, tetrahedron, tr) +import basix.ufl +from ufl import (Coefficient, FacetNormal, Identity, TestFunctions, + TrialFunctions, dot, dS, ds, dx, grad, inner, jump, + tetrahedron, tr) -REG = FiniteElement('Regge', tetrahedron, 1) -CG = FiniteElement('Lagrange', tetrahedron, 2) -mixed_element = REG * CG +REG = basix.ufl.element("Regge", "tetrahedron", 1) +P = basix.ufl.element("Lagrange", "tetrahedron", 2) +mixed_element = basix.ufl.mixed_element([REG, P]) (sigma, u) = TrialFunctions(mixed_element) (tau, v) = TestFunctions(mixed_element) -f = Coefficient(CG) +f = Coefficient(P) def S(mu): diff --git a/demo/CellGeometry.py b/demo/CellGeometry.py index 40f0abfe8..57cd9e88f 100644 --- a/demo/CellGeometry.py +++ b/demo/CellGeometry.py @@ -1,12 +1,12 @@ # Copyright (C) 2013 Martin S. Alnaes # # A functional M involving a bunch of cell geometry quantities. +import basix.ufl from ufl import (CellVolume, Circumradius, Coefficient, FacetArea, FacetNormal, - FiniteElement, SpatialCoordinate, ds, dx, tetrahedron) + SpatialCoordinate, ds, dx, tetrahedron) cell = tetrahedron - -V = FiniteElement("CG", cell, 1) +V = basix.ufl.element("P", cell.cellname(), 1) u = Coefficient(V) # TODO: Add all geometry for all cell types to this and other demo files, need for regression test. diff --git a/demo/Components.py b/demo/Components.py index 2b098a421..c4183061b 100644 --- a/demo/Components.py +++ b/demo/Components.py @@ -16,10 +16,10 @@ # along with FFCx. If not, see . # # This example demonstrates how to create vectors component-wise -from ufl import (Coefficient, TestFunction, VectorElement, as_vector, dot, dx, - tetrahedron) +import basix.ufl +from ufl import Coefficient, TestFunction, as_vector, dot, dx -element = VectorElement("Lagrange", tetrahedron, 1) +element = basix.ufl.element("Lagrange", "tetrahedron", 1, rank=1) v = TestFunction(element) f = Coefficient(element) diff --git a/demo/Conditional.py b/demo/Conditional.py index 0818d87b2..1f8416765 100644 --- a/demo/Conditional.py +++ b/demo/Conditional.py @@ -16,10 +16,11 @@ # along with FFCx. If not, see . # # Illustration on how to use Conditional to define a source term -from ufl import (And, Constant, FiniteElement, Not, Or, SpatialCoordinate, - TestFunction, conditional, dx, ge, gt, le, lt, triangle) +import basix.ufl +from ufl import (And, Constant, Not, Or, SpatialCoordinate, TestFunction, + conditional, dx, ge, gt, le, lt, triangle) -element = FiniteElement("Lagrange", triangle, 2) +element = basix.ufl.element("Lagrange", "triangle", 2) v = TestFunction(element) g = Constant(triangle) diff --git a/demo/ExpressionInterpolation.py b/demo/ExpressionInterpolation.py index ecd8d90d9..ebccde1de 100644 --- a/demo/ExpressionInterpolation.py +++ b/demo/ExpressionInterpolation.py @@ -19,18 +19,19 @@ # a set of interpolation points import basix -from ufl import (Coefficient, FiniteElement, FunctionSpace, Mesh, MixedElement, - VectorElement, grad, triangle) +import basix.ufl +from ffcx.element_interface import QuadratureElement +from ufl import Coefficient, FunctionSpace, Mesh, grad # Define mesh -cell = triangle -v_el = VectorElement("Lagrange", cell, 1) +cell = "triangle" +v_el = basix.ufl.element("Lagrange", cell, 1, rank=1) mesh = Mesh(v_el) # Define mixed function space -el = FiniteElement("CG", cell, 2) -el_int = VectorElement("Discontinuous Lagrange", cell, 1) -me = MixedElement([el, el_int]) +el = basix.ufl.element("P", cell, 2) +el_int = basix.ufl.element("Discontinuous Lagrange", cell, 1, rank=1) +me = basix.ufl.mixed_element([el, el_int]) V = FunctionSpace(mesh, me) u = Coefficient(V) @@ -41,20 +42,20 @@ # Define an expression using quadrature elements q_rule = "gauss_jacobi" q_degree = 3 -q_el = FiniteElement("Quadrature", cell, q_degree, quad_scheme=q_rule) +q_el = QuadratureElement(cell, (), q_rule, q_degree) Q = FunctionSpace(mesh, q_el) q = Coefficient(Q) powq = 3 * q**2 # Extract basix cell type -b_cell = basix.cell.string_to_type(cell.cellname()) +b_cell = basix.cell.string_to_type(cell) # Find quadrature points for quadrature element b_rule = basix.quadrature.string_to_type(q_rule) quadrature_points, _ = basix.make_quadrature(b_rule, b_cell, q_degree) # Get interpolation points for output space -family = basix.finite_element.string_to_family("Lagrange", cell.cellname()) +family = basix.finite_element.string_to_family("Lagrange", cell) b_element = basix.create_element(family, b_cell, 4, basix.LagrangeVariant.gll_warped, discontinuous=True) interpolation_points = b_element.points diff --git a/demo/FacetIntegrals.py b/demo/FacetIntegrals.py index f47a1fc16..2e7daf67a 100644 --- a/demo/FacetIntegrals.py +++ b/demo/FacetIntegrals.py @@ -19,10 +19,11 @@ # Last changed: 2011-03-08 # # Simple example of a form defined over exterior and interior facets. -from ufl import (FacetNormal, FiniteElement, TestFunction, TrialFunction, avg, - ds, dS, grad, inner, jump, triangle) +import basix.ufl +from ufl import (FacetNormal, TestFunction, TrialFunction, avg, ds, dS, grad, + inner, jump, triangle) -element = FiniteElement("Discontinuous Lagrange", triangle, 1) +element = basix.ufl.element("Discontinuous Lagrange", "triangle", 1) u = TrialFunction(element) v = TestFunction(element) diff --git a/demo/FacetRestrictionAD.py b/demo/FacetRestrictionAD.py index bed897294..da7fac9fe 100644 --- a/demo/FacetRestrictionAD.py +++ b/demo/FacetRestrictionAD.py @@ -14,10 +14,11 @@ # # You should have received a copy of the GNU Lesser General Public License # along with FFCx. If not, see . -from ufl import (Coefficient, FiniteElement, TestFunction, TrialFunction, avg, - derivative, dot, dS, dx, grad, inner, triangle) +import basix.ufl +from ufl import (Coefficient, TestFunction, TrialFunction, avg, derivative, + dot, dS, dx, grad, inner) -element = FiniteElement("Discontinuous Lagrange", triangle, 1) +element = basix.ufl.element("Discontinuous Lagrange", "triangle", 1) v = TestFunction(element) w = Coefficient(element) diff --git a/demo/HyperElasticity.py b/demo/HyperElasticity.py index 81bd49a29..4c01ea2ef 100644 --- a/demo/HyperElasticity.py +++ b/demo/HyperElasticity.py @@ -3,11 +3,12 @@ # Date: 2008-12-22 # +import basix.ufl # Modified by Garth N. Wells, 2009 -from ufl import (Coefficient, Constant, FacetNormal, FiniteElement, Identity, - SpatialCoordinate, TensorElement, TestFunction, TrialFunction, - VectorElement, derivative, det, diff, dot, ds, dx, exp, grad, - inner, inv, tetrahedron, tr, variable) +from ufl import (Coefficient, Constant, FacetNormal, Identity, + SpatialCoordinate, TestFunction, TrialFunction, derivative, + det, diff, dot, ds, dx, exp, grad, inner, inv, tetrahedron, + tr, variable) # Cell and its properties cell = tetrahedron @@ -16,9 +17,9 @@ x = SpatialCoordinate(cell) # Elements -u_element = VectorElement("CG", cell, 2) -p_element = FiniteElement("CG", cell, 1) -A_element = TensorElement("CG", cell, 1) +u_element = basix.ufl.element("P", cell.cellname(), 2, rank=1) +p_element = basix.ufl.element("P", cell.cellname(), 1) +A_element = basix.ufl.element("P", cell.cellname(), 1, rank=2) # Test and trial functions v = TestFunction(u_element) diff --git a/demo/MassDG0.py b/demo/MassDG0.py index 42ef4a5e1..fb9a2abb0 100644 --- a/demo/MassDG0.py +++ b/demo/MassDG0.py @@ -16,10 +16,10 @@ # along with FFCx. If not, see . # # The bilinear form for a mass matrix. -from ufl import (FiniteElement, TestFunction, TrialFunction, dx, inner, - tetrahedron) +import basix.ufl +from ufl import TestFunction, TrialFunction, dx, inner -element = FiniteElement("DG", tetrahedron, 0) +element = basix.ufl.element("DG", "tetrahedron", 0) v = TestFunction(element) u = TrialFunction(element) diff --git a/demo/MassHcurl_2D_1.py b/demo/MassHcurl_2D_1.py index 8212e984e..ed43e3955 100644 --- a/demo/MassHcurl_2D_1.py +++ b/demo/MassHcurl_2D_1.py @@ -14,9 +14,10 @@ # # You should have received a copy of the GNU Lesser General Public License # along with FFCx. If not, see . -from ufl import FiniteElement, TestFunction, TrialFunction, dx, inner, triangle +import basix.ufl +from ufl import TestFunction, TrialFunction, dx, inner -element = FiniteElement("N1curl", triangle, 1) +element = basix.ufl.element("N1curl", "triangle", 1) v = TestFunction(element) u = TrialFunction(element) diff --git a/demo/MassHdiv_2D_1.py b/demo/MassHdiv_2D_1.py index 81b9e67ae..beb9260f8 100644 --- a/demo/MassHdiv_2D_1.py +++ b/demo/MassHdiv_2D_1.py @@ -14,9 +14,10 @@ # # You should have received a copy of the GNU Lesser General Public License # along with FFCx. If not, see . -from ufl import FiniteElement, TestFunction, TrialFunction, dx, inner, triangle +import basix.ufl +from ufl import TestFunction, TrialFunction, dx, inner -element = FiniteElement("BDM", triangle, 1) +element = basix.ufl.element("BDM", "triangle", 1) v = TestFunction(element) u = TrialFunction(element) diff --git a/demo/MathFunctions.py b/demo/MathFunctions.py index 4eababab9..34af3d7ad 100644 --- a/demo/MathFunctions.py +++ b/demo/MathFunctions.py @@ -16,10 +16,11 @@ # along with FFCx. If not, see . # # Test all algebra operators on Coefficients. -from ufl import (Coefficient, FiniteElement, acos, asin, atan, bessel_J, - bessel_Y, cos, dx, erf, exp, ln, sin, sqrt, tan, triangle) +import basix.ufl +from ufl import (Coefficient, acos, asin, atan, bessel_J, bessel_Y, cos, dx, + erf, exp, ln, sin, sqrt, tan) -element = FiniteElement("Lagrange", triangle, 1) +element = basix.ufl.element("Lagrange", "triangle", 1) c0 = Coefficient(element) c1 = Coefficient(element) diff --git a/demo/MetaData.py b/demo/MetaData.py index bef33e0de..bc94b7465 100644 --- a/demo/MetaData.py +++ b/demo/MetaData.py @@ -16,11 +16,11 @@ # along with FFCx. If not, see . # # Test form for metadata. -from ufl import (Coefficient, FiniteElement, TestFunction, TrialFunction, - VectorElement, dx, grad, inner, triangle) +import basix.ufl +from ufl import Coefficient, TestFunction, TrialFunction, dx, grad, inner -element = FiniteElement("Lagrange", triangle, 1) -vector_element = VectorElement("Lagrange", triangle, 1) +element = basix.ufl.element("Lagrange", "triangle", 1) +vector_element = basix.ufl.element("Lagrange", "triangle", 1, rank=1) u = TrialFunction(element) diff --git a/demo/Mini.py b/demo/Mini.py index c1f86ac10..2c937d94a 100644 --- a/demo/Mini.py +++ b/demo/Mini.py @@ -19,15 +19,15 @@ # bilinear form a(u, v) for the Stokes equations using a mixed # formulation involving the Mini element. The velocity element is # composed of a P1 element augmented by the cubic bubble function. -from ufl import (FiniteElement, TestFunctions, TrialFunctions, VectorElement, - div, dx, grad, inner, triangle) +import basix.ufl +from ufl import TestFunctions, TrialFunctions, div, dx, grad, inner -P1 = FiniteElement("Lagrange", triangle, 1) -B = FiniteElement("Bubble", triangle, 3) -V = VectorElement(P1 + B) -Q = FiniteElement("CG", triangle, 1) +P1 = basix.ufl.element("Lagrange", "triangle", 1) +B = basix.ufl.element("Bubble", "triangle", 3) +V = basix.ufl.blocked_element(basix.ufl.enriched_element([P1, B]), shape=(2, )) +Q = basix.ufl.element("P", "triangle", 1) -Mini = V * Q +Mini = basix.ufl.mixed_element([V, Q]) (u, p) = TrialFunctions(Mini) (v, q) = TestFunctions(Mini) diff --git a/demo/MixedCoefficient.py b/demo/MixedCoefficient.py index aa96c59c0..9082fcc3f 100644 --- a/demo/MixedCoefficient.py +++ b/demo/MixedCoefficient.py @@ -18,16 +18,14 @@ # along with FFCx. If not, see . # # Mixed coefficient. -from ufl import (Coefficients, FiniteElement, MixedElement, VectorElement, dot, - dS, dx, triangle) +import basix.ufl +from ufl import Coefficients, dot, dS, dx -cell = triangle +DG = basix.ufl.element("DG", "triangle", 0, rank=1) +CG = basix.ufl.element("Lagrange", "triangle", 2) +RT = basix.ufl.element("RT", "triangle", 3) -DG = VectorElement("DG", cell, 0) -CG = FiniteElement("Lagrange", cell, 2) -RT = FiniteElement("RT", cell, 3) - -element = MixedElement(DG, CG, RT) +element = basix.ufl.mixed_element([DG, CG, RT]) f, g, h = Coefficients(element) diff --git a/demo/MixedGradient.py b/demo/MixedGradient.py index 2dcf28e2e..8b7d8ab5f 100644 --- a/demo/MixedGradient.py +++ b/demo/MixedGradient.py @@ -1,9 +1,9 @@ -from ufl import (FiniteElement, MixedElement, TestFunctions, TrialFunctions, - ds, grad, inner, triangle) +import basix.ufl +from ufl import TestFunctions, TrialFunctions, ds, grad, inner -element1 = FiniteElement("DG", triangle, 1) -element2 = FiniteElement("DGT", triangle, 1) -element = MixedElement(element1, element2) +element1 = basix.ufl.element("DG", "triangle", 1) +element2 = basix.ufl.element("DGT", "triangle", 1) +element = basix.ufl.mixed_element([element1, element2]) u = TrialFunctions(element)[0] v = TestFunctions(element)[0] diff --git a/demo/MixedPoissonDual.py b/demo/MixedPoissonDual.py index cbc0d7177..858f1fda5 100644 --- a/demo/MixedPoissonDual.py +++ b/demo/MixedPoissonDual.py @@ -20,19 +20,19 @@ # # The bilinear form a(u, v) and linear form L(v) for a two-field # (mixed) formulation of Poisson's equation -from ufl import (Coefficient, FiniteElement, TestFunctions, TrialFunctions, - dot, ds, dx, grad, triangle) +import basix.ufl +from ufl import Coefficient, TestFunctions, TrialFunctions, dot, ds, dx, grad -DRT = FiniteElement("DRT", triangle, 2) -CG = FiniteElement("CG", triangle, 3) -W = DRT * CG +DRT = basix.ufl.element("Discontinuous RT", "triangle", 2) +P = basix.ufl.element("P", "triangle", 3) +W = basix.ufl.mixed_element([DRT, P]) (sigma, u) = TrialFunctions(W) (tau, v) = TestFunctions(W) -CG1 = FiniteElement("CG", triangle, 1) -f = Coefficient(CG1) -g = Coefficient(CG1) +P1 = basix.ufl.element("P", "triangle", 1) +f = Coefficient(P1) +g = Coefficient(P1) a = (dot(sigma, tau) + dot(grad(u), tau) + dot(sigma, grad(v))) * dx L = - f * v * dx - g * v * ds diff --git a/demo/Normals.py b/demo/Normals.py index e5d00d864..c93a5f539 100644 --- a/demo/Normals.py +++ b/demo/Normals.py @@ -17,12 +17,12 @@ # # This example demonstrates how to use the facet normals # Merely project the normal onto a vector section. -from ufl import (FacetNormal, TestFunction, TrialFunction, VectorElement, dot, - ds, triangle) +import basix.ufl +from ufl import FacetNormal, TestFunction, TrialFunction, dot, ds, triangle cell = triangle -element = VectorElement("Lagrange", cell, 1) +element = basix.ufl.element("Lagrange", cell.cellname(), 1, rank=1) n = FacetNormal(cell) diff --git a/demo/Poisson1D.py b/demo/Poisson1D.py index de7c7ac2b..dc53dec36 100644 --- a/demo/Poisson1D.py +++ b/demo/Poisson1D.py @@ -17,10 +17,10 @@ # # The bilinear form a(u, v) and linear form L(v) for # Poisson's equation. -from ufl import (Coefficient, FiniteElement, TestFunction, TrialFunction, dx, - grad, inner, interval) +import basix.ufl +from ufl import Coefficient, TestFunction, TrialFunction, dx, grad, inner -element = FiniteElement("Lagrange", interval, 1) +element = basix.ufl.element("Lagrange", "interval", 1) u = TrialFunction(element) v = TestFunction(element) diff --git a/demo/PoissonQuad.py b/demo/PoissonQuad.py index 2ba9310e4..1cdc6c95a 100644 --- a/demo/PoissonQuad.py +++ b/demo/PoissonQuad.py @@ -17,14 +17,15 @@ # # The bilinear form a(u, v) and linear form L(v) for # Poisson's equation using bilinear elements on bilinear mesh geometry. -from ufl import (Coefficient, FiniteElement, FunctionSpace, Mesh, TestFunction, - TrialFunction, VectorElement, dx, grad, inner, triangle) +import basix.ufl +from ufl import (Coefficient, FunctionSpace, Mesh, TestFunction, TrialFunction, + dx, grad, inner) -coords = VectorElement("P", triangle, 2) +coords = basix.ufl.element("P", "triangle", 2, rank=1) mesh = Mesh(coords) dx = dx(mesh) -element = FiniteElement("P", mesh.ufl_cell(), 2) +element = basix.ufl.element("P", mesh.ufl_cell().cellname(), 2) space = FunctionSpace(mesh, element) u = TrialFunction(space) diff --git a/demo/ProjectionManifold.py b/demo/ProjectionManifold.py index f49ae332b..b94b2df43 100644 --- a/demo/ProjectionManifold.py +++ b/demo/ProjectionManifold.py @@ -17,16 +17,13 @@ # # This demo illustrates use of finite element spaces defined over # simplicies embedded in higher dimensions -from ufl import (Cell, FiniteElement, TestFunctions, TrialFunctions, div, dx, - inner) - -# Define interval embedded in 3D: -domain = Cell("triangle", geometric_dimension=3) +import basix.ufl +from ufl import TestFunctions, TrialFunctions, div, dx, inner # Define element over this domain -V = FiniteElement("RT", domain, 1) -Q = FiniteElement("DG", domain, 0) -element = V * Q +V = basix.ufl.element("RT", "triangle", 1, gdim=3) +Q = basix.ufl.element("DG", "triangle", 0, gdim=3) +element = basix.ufl.mixed_element([V, Q]) (u, p) = TrialFunctions(element) (v, q) = TestFunctions(element) diff --git a/demo/ReactionDiffusion.py b/demo/ReactionDiffusion.py index 2bea4369f..bd650d936 100644 --- a/demo/ReactionDiffusion.py +++ b/demo/ReactionDiffusion.py @@ -17,10 +17,10 @@ # # The bilinear form a(u, v) and linear form L(v) for a simple # reaction-diffusion equation using simplified tuple notation. -from ufl import (Coefficient, FiniteElement, TestFunction, TrialFunction, dx, - grad, inner, triangle) +import basix.ufl +from ufl import Coefficient, TestFunction, TrialFunction, dx, grad, inner -element = FiniteElement("Lagrange", triangle, 1) +element = basix.ufl.element("Lagrange", "triangle", 1) u = TrialFunction(element) v = TestFunction(element) diff --git a/demo/SpatialCoordinates.py b/demo/SpatialCoordinates.py index 8f130d1a9..3862228f3 100644 --- a/demo/SpatialCoordinates.py +++ b/demo/SpatialCoordinates.py @@ -18,10 +18,11 @@ # The bilinear form a(u, v) and linear form L(v) for # Poisson's equation where spatial coordinates are used to define the source # and boundary flux terms. -from ufl import (FiniteElement, SpatialCoordinate, TestFunction, TrialFunction, - ds, dx, exp, grad, inner, sin, triangle) +import basix.ufl +from ufl import (SpatialCoordinate, TestFunction, TrialFunction, ds, dx, exp, + grad, inner, sin, triangle) -element = FiniteElement("Lagrange", triangle, 2) +element = basix.ufl.element("Lagrange", "triangle", 2) u = TrialFunction(element) v = TestFunction(element) diff --git a/demo/StabilisedStokes.py b/demo/StabilisedStokes.py index e40df2633..5d1e7b55c 100644 --- a/demo/StabilisedStokes.py +++ b/demo/StabilisedStokes.py @@ -17,12 +17,13 @@ # # The bilinear form a(u, v) and Linear form L(v) for the Stokes # equations using a mixed formulation (equal-order stabilized). -from ufl import (Coefficient, FiniteElement, TestFunctions, TrialFunctions, - VectorElement, div, dot, dx, grad, inner, triangle) +import basix.ufl +from ufl import (Coefficient, TestFunctions, TrialFunctions, div, dot, dx, + grad, inner) -vector = VectorElement("Lagrange", triangle, 1) -scalar = FiniteElement("Lagrange", triangle, 1) -system = vector * scalar +vector = basix.ufl.element("Lagrange", "triangle", 1, rank=1) +scalar = basix.ufl.element("Lagrange", "triangle", 1) +system = basix.ufl.mixed_element([vector, scalar]) (u, p) = TrialFunctions(system) (v, q) = TestFunctions(system) diff --git a/demo/Symmetry.py b/demo/Symmetry.py index 124bccf5c..72d0b3866 100644 --- a/demo/Symmetry.py +++ b/demo/Symmetry.py @@ -1,7 +1,7 @@ -from ufl import (TensorElement, TestFunction, TrialFunction, dx, grad, inner, - triangle) +import basix.ufl +from ufl import TestFunction, TrialFunction, dx, grad, inner -P1 = TensorElement("CG", triangle, 1, (2, 2), symmetry={(1, 0): (0, 1)}) +P1 = basix.ufl.element("P", "triangle", 1, shape=(2, 2), symmetry=True) u = TrialFunction(P1) v = TestFunction(P1) diff --git a/demo/TraceElement.py b/demo/TraceElement.py index d6a9f703f..d20ddd3e0 100644 --- a/demo/TraceElement.py +++ b/demo/TraceElement.py @@ -14,8 +14,9 @@ # # You should have received a copy of the GNU Lesser General Public License # along with FFCx. If not, see . -from ufl import FiniteElement, TestFunction, avg, ds, dS +import basix.ufl +from ufl import TestFunction, avg, ds, dS -element = FiniteElement("HDiv Trace", "triangle", 0) +element = basix.ufl.element("HDiv Trace", "triangle", 0) v = TestFunction(element) L = v * ds + avg(v) * dS diff --git a/demo/VectorPoisson.py b/demo/VectorPoisson.py index 79c4f3c3f..a5e4064b7 100644 --- a/demo/VectorPoisson.py +++ b/demo/VectorPoisson.py @@ -17,10 +17,10 @@ # # The bilinear form a(u, v) and linear form L(v) for # the vector-valued Poisson's equation. -from ufl import (Coefficient, TestFunction, TrialFunction, VectorElement, dx, - grad, inner, triangle) +import basix.ufl +from ufl import Coefficient, TestFunction, TrialFunction, dx, grad, inner -element = VectorElement("Lagrange", triangle, 1) +element = basix.ufl.element("Lagrange", "triangle", 1, rank=1) u = TrialFunction(element) v = TestFunction(element) diff --git a/ffcx/analysis.py b/ffcx/analysis.py index de54c776c..51ea1610f 100644 --- a/ffcx/analysis.py +++ b/ffcx/analysis.py @@ -18,7 +18,7 @@ import numpy as np import numpy.typing as npt -import basix.ufl_wrapper +import basix.ufl import ufl from ffcx.element_interface import QuadratureElement, convert_element @@ -27,10 +27,10 @@ class UFLData(typing.NamedTuple): form_data: typing.Tuple[ufl.algorithms.formdata.FormData, ...] # Tuple of ufl form data - unique_elements: typing.List[basix.ufl_wrapper._BasixElementBase] # List of unique elements + unique_elements: typing.List[basix.ufl._ElementBase] # List of unique elements # Lookup table from each unique element to its index in `unique_elements` - element_numbers: typing.Dict[basix.ufl_wrapper._BasixElementBase, int] - unique_coordinate_elements: typing.List[basix.ufl_wrapper._BasixElementBase] # List of unique coordinate elements + element_numbers: typing.Dict[basix.ufl._ElementBase, int] + unique_coordinate_elements: typing.List[basix.ufl._ElementBase] # List of unique coordinate elements # List of ufl Expressions as tuples (expression, points, original_expression) expressions: typing.List[typing.Tuple[ufl.core.expr.Expr, npt.NDArray[np.float64], ufl.core.expr.Expr]] @@ -102,7 +102,7 @@ def analyze_ufl_objects(ufl_objects: typing.List, options: typing.Dict) -> UFLDa unique_coordinate_element_list = sorted(set(coordinate_elements), key=lambda x: repr(x)) for e in unique_elements: - assert isinstance(e, basix.ufl_wrapper._BasixElementBase) + assert isinstance(e, basix.ufl._ElementBase) # Compute dict (map) from element to index element_numbers = {element: i for i, element in enumerate(unique_elements)} @@ -153,7 +153,7 @@ def _analyze_form(form: ufl.form.Form, options: typing.Dict) -> ufl.algorithms.f # Set default spacing for coordinate elements to be equispaced for n, i in enumerate(form._integrals): element = i._ufl_domain._ufl_coordinate_element - if not isinstance(element, basix.ufl_wrapper._BasixElementBase) and element.degree() > 2: + if not isinstance(element, basix.ufl._ElementBase) and element.degree() > 2: warn("UFL coordinate elements using elements not created via Basix may not work with DOLFINx") # Check for complex mode diff --git a/ffcx/codegeneration/access.py b/ffcx/codegeneration/access.py index c30f22412..9d1dcc985 100644 --- a/ffcx/codegeneration/access.py +++ b/ffcx/codegeneration/access.py @@ -9,8 +9,8 @@ import warnings import ufl -from basix.ufl_wrapper import BlockedElement -from ffcx.element_interface import convert_element, create_element +import basix.ufl +from ffcx.element_interface import convert_element logger = logging.getLogger("ffcx") @@ -257,10 +257,10 @@ def cell_vertices(self, e, mt, tabledata, num_points): coordinate_element = convert_element(domain.ufl_coordinate_element()) # Get dimension and dofmap of scalar element - assert isinstance(coordinate_element, BlockedElement) + assert isinstance(coordinate_element, basix.ufl._BlockedElement) assert coordinate_element.value_shape() == (gdim, ) ufl_scalar_element, = set(coordinate_element.sub_elements()) - scalar_element = create_element(ufl_scalar_element) + scalar_element = convert_element(ufl_scalar_element) assert scalar_element.value_size == 1 and scalar_element.block_size == 1 vertex_scalar_dofs = scalar_element.entity_dofs[0] @@ -288,10 +288,10 @@ def cell_edge_vectors(self, e, mt, tabledata, num_points): raise RuntimeError(f"Unhandled cell types {cellname}.") # Get dimension and dofmap of scalar element - assert isinstance(coordinate_element, BlockedElement) + assert isinstance(coordinate_element, basix.ufl._BlockedElement) assert coordinate_element.value_shape() == (gdim, ) ufl_scalar_element, = set(coordinate_element.sub_elements()) - scalar_element = create_element(ufl_scalar_element) + scalar_element = convert_element(ufl_scalar_element) assert scalar_element.value_size == 1 and scalar_element.block_size == 1 vertex_scalar_dofs = scalar_element.entity_dofs[0] @@ -330,13 +330,13 @@ def facet_edge_vectors(self, e, mt, tabledata, num_points): raise RuntimeError(f"Unhandled cell types {cellname}.") # Get dimension and dofmap of scalar element - assert isinstance(coordinate_element, BlockedElement) + assert isinstance(coordinate_element, basix.ufl._BlockedElement) assert coordinate_element.value_shape() == (gdim, ) ufl_scalar_element, = set(coordinate_element.sub_elements()) - scalar_element = create_element(ufl_scalar_element) + scalar_element = convert_element(ufl_scalar_element) assert scalar_element.value_size == 1 and scalar_element.block_size == 1 - scalar_element = create_element(ufl_scalar_element) + scalar_element = convert_element(ufl_scalar_element) num_scalar_dofs = scalar_element.dim # Get edge vertices diff --git a/ffcx/codegeneration/definitions.py b/ffcx/codegeneration/definitions.py index 1ea67f5e8..1b26de95f 100644 --- a/ffcx/codegeneration/definitions.py +++ b/ffcx/codegeneration/definitions.py @@ -8,7 +8,7 @@ import logging import ufl -from ffcx.element_interface import create_element +from ffcx.element_interface import convert_element from ffcx.naming import scalar_to_value_type logger = logging.getLogger("ffcx") @@ -124,7 +124,7 @@ def _define_coordinate_dofs_lincomb(self, e, mt, tabledata, quadrature_rule, acc # Get properties of domain domain = ufl.domain.extract_unique_domain(mt.terminal) coordinate_element = domain.ufl_coordinate_element() - num_scalar_dofs = create_element(coordinate_element).sub_element.dim + num_scalar_dofs = convert_element(coordinate_element).sub_element.dim num_dofs = tabledata.values.shape[3] begin = tabledata.offset diff --git a/ffcx/element_interface.py b/ffcx/element_interface.py index d48241264..77ce23ff4 100644 --- a/ffcx/element_interface.py +++ b/ffcx/element_interface.py @@ -13,21 +13,21 @@ import basix -import basix.ufl_wrapper +import basix.ufl import ufl import numpy as np import numpy.typing as npt -def convert_element(element: ufl.finiteelement.FiniteElementBase) -> basix.ufl_wrapper._BasixElementBase: +def convert_element(element: ufl.finiteelement.FiniteElementBase) -> basix.ufl._ElementBase: """Convert and element to a FFCx element.""" - if isinstance(element, basix.ufl_wrapper._BasixElementBase): + if isinstance(element, basix.ufl._ElementBase): return element - return create_element(element) + return _cached_conversion(element) @lru_cache() -def create_element(element: ufl.finiteelement.FiniteElementBase) -> basix.ufl_wrapper._BasixElementBase: +def _cached_conversion(element: ufl.finiteelement.FiniteElementBase) -> basix.ufl._ElementBase: """Create an FFCx element from a UFL element. Args: @@ -36,29 +36,34 @@ def create_element(element: ufl.finiteelement.FiniteElementBase) -> basix.ufl_wr Returns: A Basix finite element """ - if isinstance(element, basix.ufl_wrapper._BasixElementBase): + if isinstance(element, basix.ufl._ElementBase): return element - elif isinstance(element, ufl.VectorElement): - return basix.ufl_wrapper.VectorElement(create_element(element.sub_elements()[0]), element.num_sub_elements()) - elif isinstance(element, ufl.TensorElement): - if len(element.symmetry()) == 0: - return basix.ufl_wrapper.TensorElement(create_element(element.sub_elements()[0]), element._value_shape) - else: - assert element.symmetry()[(1, 0)] == (0, 1) - return basix.ufl_wrapper.TensorElement(create_element( - element.sub_elements()[0]), element._value_shape, symmetric=True) - elif isinstance(element, ufl.MixedElement): - return basix.ufl_wrapper.MixedElement([create_element(e) for e in element.sub_elements()]) - elif isinstance(element, ufl.EnrichedElement): - return basix.ufl_wrapper._create_enriched_element([create_element(e) for e in element._elements]) elif element.family() == "Quadrature": return QuadratureElement(element.cell().cellname(), element.value_shape(), scheme=element.quadrature_scheme(), degree=element.degree()) - elif element.family() == "Real": return RealElement(element) + + warnings.warn( + "Use of elements created by UFL is deprecated. You should create elements directly using Basix.", + DeprecationWarning) + + if hasattr(ufl, "VectorElement") and isinstance(element, ufl.VectorElement): + return basix.ufl.blocked_element( + _cached_conversion(element.sub_elements()[0]), shape=(element.num_sub_elements(), )) + elif hasattr(ufl, "TensorElement") and isinstance(element, ufl.TensorElement): + if len(element.symmetry()) == 0: + return basix.ufl.blocked_element(_cached_conversion(element.sub_elements()[0]), shape=element._value_shape) + else: + assert element.symmetry()[(1, 0)] == (0, 1) + return basix.ufl.blocked_element(_cached_conversion( + element.sub_elements()[0]), element._value_shape, symmetry=True) + elif hasattr(ufl, "MixedElement") and isinstance(element, ufl.MixedElement): + return basix.ufl.mixed_element([_cached_conversion(e) for e in element.sub_elements()]) + elif hasattr(ufl, "EnrichedElement") and isinstance(element, ufl.EnrichedElement): + return basix.ufl.enriched_element([_cached_conversion(e) for e in element._elements]) else: - return basix.ufl_wrapper.convert_ufl_element(element) + return basix.ufl.convert_ufl_element(element) def basix_index(indices: typing.Tuple[int]) -> int: @@ -100,7 +105,7 @@ def map_facet_points(points: npt.NDArray[np.float64], facet: int, for p in points], dtype=np.float64) -class QuadratureElement(basix.ufl_wrapper._BasixElementBase): +class QuadratureElement(basix.ufl._ElementBase): """A quadrature element.""" _points: npt.NDArray[np.float64] @@ -163,7 +168,7 @@ def tabulate(self, nderivs: int, points: npt.NDArray[np.float64]) -> npt.NDArray tables = np.asarray([np.eye(points.shape[0], points.shape[0])]) return tables - def get_component_element(self, flat_component: int) -> typing.Tuple[basix.ufl_wrapper._BasixElementBase, int, int]: + def get_component_element(self, flat_component: int) -> typing.Tuple[basix.ufl._ElementBase, int, int]: """Get element that represents a component of the element, and the offset and stride of the component. Args: @@ -268,7 +273,7 @@ def map_type(self) -> basix.MapType: return basix.MapType.identity -class RealElement(basix.ufl_wrapper._BasixElementBase): +class RealElement(basix.ufl._ElementBase): """A real element.""" _family_name: str @@ -316,7 +321,7 @@ def tabulate(self, nderivs: int, points: npt.NDArray[np.float64]) -> npt.NDArray out[0, :] = 1. return out - def get_component_element(self, flat_component: int) -> typing.Tuple[basix.ufl_wrapper._BasixElementBase, int, int]: + def get_component_element(self, flat_component: int) -> typing.Tuple[basix.ufl._ElementBase, int, int]: """Get element that represents a component of the element, and the offset and stride of the component. Args: diff --git a/ffcx/ir/representation.py b/ffcx/ir/representation.py index 922d20ebe..e0d61df94 100644 --- a/ffcx/ir/representation.py +++ b/ffcx/ir/representation.py @@ -26,6 +26,7 @@ import numpy.typing as npt import basix +import basix.ufl import ufl from ffcx import naming from ffcx.analysis import UFLData @@ -541,7 +542,7 @@ def _compute_form_ir(form_data, form_id, prefix, form_names, integral_names, ele el = convert_element(convert_element(function.ufl_function_space().ufl_element())) cmap = function.ufl_function_space().ufl_domain().ufl_coordinate_element() # Default point spacing for CoordinateElement is equispaced - if not isinstance(cmap, basix.ufl_wrapper._BasixElementBase) and cmap.variant() is None: + if not isinstance(cmap, basix.ufl._ElementBase) and cmap.variant() is None: cmap._sub_element._variant = "equispaced" cmap = convert_element(cmap) family = cmap.family() diff --git a/test/Poisson.py b/test/Poisson.py index 39188923f..6dbe7e8b5 100644 --- a/test/Poisson.py +++ b/test/Poisson.py @@ -19,20 +19,20 @@ # Poisson's equation. # # Compile this form with FFCx: ffcx Poisson.ufl -from ufl import (Coefficient, Constant, FiniteElement, Mesh, TestFunction, - TrialFunction, VectorElement, dx, grad, inner, triangle) +from ufl import (Coefficient, Constant, Mesh, TestFunction, + TrialFunction, dx, grad, inner) +import basix.ufl -cell = triangle -mesh = Mesh(VectorElement('P', cell, 2)) +mesh = Mesh(basix.ufl.element('P', "triangle", 2, rank=1)) -element = FiniteElement("Lagrange", triangle, 2) +e = basix.ufl.element("Lagrange", "triangle", 2) -u = TrialFunction(element) -v = TestFunction(element) -f = Coefficient(element) +u = TrialFunction(e) +v = TestFunction(e) +f = Coefficient(e) -kappa1 = Constant(triangle, shape=(2, 2)) -kappa2 = Constant(triangle, shape=(2, 2)) +kappa1 = Constant(mesh.ufl_cell(), shape=(2, 2)) +kappa2 = Constant(mesh.ufl_cell(), shape=(2, 2)) a = inner(kappa1, kappa2) * inner(grad(u), grad(v)) * dx L = f * v * dx diff --git a/test/test_add_mode.py b/test/test_add_mode.py index c4fd5fd3b..7ee4aba31 100644 --- a/test/test_add_mode.py +++ b/test/test_add_mode.py @@ -8,6 +8,7 @@ import pytest import ffcx.codegeneration.jit +import basix.ufl import ufl from ffcx.naming import cdtype_to_numpy, scalar_to_value_type @@ -21,8 +22,7 @@ "float _Complex" ]) def test_additive_facet_integral(mode, compile_args): - cell = ufl.triangle - element = ufl.FiniteElement("Lagrange", cell, 1) + element = basix.ufl.element("Lagrange", "triangle", 1) u, v = ufl.TrialFunction(element), ufl.TestFunction(element) a = ufl.inner(u, v) * ufl.ds forms = [a] @@ -70,8 +70,7 @@ def test_additive_facet_integral(mode, compile_args): @pytest.mark.parametrize("mode", ["double", "float", "long double", "double _Complex", "float _Complex"]) def test_additive_cell_integral(mode, compile_args): - cell = ufl.triangle - element = ufl.FiniteElement("Lagrange", cell, 1) + element = basix.ufl.element("Lagrange", "triangle", 1) u, v = ufl.TrialFunction(element), ufl.TestFunction(element) a = ufl.inner(ufl.grad(u), ufl.grad(v)) * ufl.dx forms = [a] diff --git a/test/test_blocked_elements.py b/test/test_blocked_elements.py index b0c1aeec7..3f9e03143 100644 --- a/test/test_blocked_elements.py +++ b/test/test_blocked_elements.py @@ -8,11 +8,11 @@ import ffcx import ffcx.codegeneration.jit -import ufl +import basix.ufl def test_finite_element(compile_args): - ufl_element = ufl.FiniteElement("Lagrange", ufl.triangle, 1) + ufl_element = basix.ufl.element("Lagrange", "triangle", 1) jit_compiled_elements, module, code = ffcx.codegeneration.jit.compile_elements( [ufl_element], cffi_extra_compile_args=compile_args) ufcx_element, ufcx_dofmap = jit_compiled_elements[0] @@ -44,7 +44,7 @@ def test_finite_element(compile_args): def test_vector_element(compile_args): - ufl_element = ufl.VectorElement("Lagrange", ufl.triangle, 1) + ufl_element = basix.ufl.element("Lagrange", "triangle", 1, rank=1) jit_compiled_elements, module, code = ffcx.codegeneration.jit.compile_elements( [ufl_element], cffi_extra_compile_args=compile_args) ufcx_element, ufcx_dofmap = jit_compiled_elements[0] @@ -78,7 +78,7 @@ def test_vector_element(compile_args): def test_tensor_element(compile_args): - ufl_element = ufl.TensorElement("Lagrange", ufl.triangle, 1) + ufl_element = basix.ufl.element("Lagrange", "triangle", 1, rank=2) jit_compiled_elements, module, code = ffcx.codegeneration.jit.compile_elements( [ufl_element], cffi_extra_compile_args=compile_args) ufcx_element, ufcx_dofmap = jit_compiled_elements[0] diff --git a/test/test_cache.py b/test/test_cache.py index 4da2b4cf4..beb21b34f 100644 --- a/test/test_cache.py +++ b/test/test_cache.py @@ -8,11 +8,11 @@ import ffcx.codegeneration.jit import ufl +import basix.ufl def test_cache_modes(compile_args): - cell = ufl.triangle - element = ufl.FiniteElement("Lagrange", cell, 1) + element = basix.ufl.element("Lagrange", "triangle", 1) u, v = ufl.TrialFunction(element), ufl.TestFunction(element) a = ufl.inner(ufl.grad(u), ufl.grad(v)) * ufl.dx forms = [a] diff --git a/test/test_elements.py b/test/test_elements.py index eb8cb460b..443ae85df 100644 --- a/test/test_elements.py +++ b/test/test_elements.py @@ -23,8 +23,7 @@ import numpy as np import pytest -from ffcx.element_interface import create_element -from ufl import FiniteElement +import basix.ufl def element_coords(cell): @@ -50,28 +49,28 @@ def random_point(shape): @pytest.mark.parametrize("degree, expected_dim", [(1, 3), (2, 6), (3, 10)]) def test_continuous_lagrange(degree, expected_dim): "Test space dimensions of continuous Lagrange elements." - P = create_element(FiniteElement("Lagrange", "triangle", degree)) + P = basix.ufl.element("Lagrange", "triangle", degree) assert P.dim == expected_dim @pytest.mark.parametrize("degree, expected_dim", [(1, 4), (2, 9), (3, 16)]) def xtest_continuous_lagrange_quadrilateral(degree, expected_dim): "Test space dimensions of continuous TensorProduct elements (quadrilateral)." - P = create_element(FiniteElement("Lagrange", "quadrilateral", degree)) + P = basix.ufl.element("Lagrange", "quadrilateral", degree) assert P.dim == expected_dim @pytest.mark.parametrize("degree, expected_dim", [(1, 4), (2, 9), (3, 16)]) def xtest_continuous_lagrange_quadrilateral_spectral(degree, expected_dim): "Test space dimensions of continuous TensorProduct elements (quadrilateral)." - P = create_element(FiniteElement("Lagrange", "quadrilateral", degree, variant="spectral")) + P = basix.ufl.element("Lagrange", "quadrilateral", degree, variant="spectral") assert P.dim == expected_dim @pytest.mark.parametrize("degree, expected_dim", [(0, 1), (1, 3), (2, 6), (3, 10)]) def test_discontinuous_lagrange(degree, expected_dim): "Test space dimensions of discontinuous Lagrange elements." - P = create_element(FiniteElement("DG", "triangle", degree)) + P = basix.ufl.element("DG", "triangle", degree) assert P.dim == expected_dim @@ -79,7 +78,7 @@ def test_discontinuous_lagrange(degree, expected_dim): [(0, 3), (1, 9), (2, 18), (3, 30)]) def test_regge(degree, expected_dim): "Test space dimensions of generalized Regge element." - P = create_element(FiniteElement("Regge", "triangle", degree)) + P = basix.ufl.element("Regge", "triangle", degree) assert P.dim == expected_dim @@ -87,7 +86,7 @@ def test_regge(degree, expected_dim): [(0, 3), (1, 9), (2, 18), (3, 30)]) def xtest_hhj(degree, expected_dim): "Test space dimensions of Hellan-Herrmann-Johnson element." - P = create_element(FiniteElement("HHJ", "triangle", degree)) + P = basix.ufl.element("HHJ", "triangle", degree) assert P.dim == expected_dim @@ -183,14 +182,14 @@ class TestFunctionValues(): @pytest.mark.parametrize("family, cell, degree, reference", tests) def test_values(self, family, cell, degree, reference): # Create element - element = create_element(FiniteElement(family, cell, degree)) + e = basix.ufl.element(family, cell, degree) # Get some points and check basis function values at points points = [random_point(element_coords(cell)) for i in range(5)] for x in points: - table = element.tabulate(0, (x,)) + table = e.tabulate(0, (x,)) basis = table[0] - if sum(element.value_shape()) == 1: + if sum(e.value_shape()) == 1: for i, value in enumerate(basis[0]): assert np.isclose(value, reference[i](x)) else: diff --git a/test/test_flops.py b/test/test_flops.py index 833e76515..9ac99ad77 100644 --- a/test/test_flops.py +++ b/test/test_flops.py @@ -6,12 +6,13 @@ import ufl +import basix.ufl from ffcx.codegeneration.flop_count import count_flops def create_form(degree): - mesh = ufl.Mesh(ufl.VectorElement("Lagrange", "triangle", 1)) - element = ufl.FiniteElement("Lagrange", ufl.triangle, degree) + mesh = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, rank=1)) + element = basix.ufl.element("Lagrange", "triangle", degree) V = ufl.FunctionSpace(mesh, element) u = ufl.TrialFunction(V) diff --git a/test/test_jit_expression.py b/test/test_jit_expression.py index 1b3ce1623..5673f6160 100644 --- a/test/test_jit_expression.py +++ b/test/test_jit_expression.py @@ -9,6 +9,7 @@ import numpy as np import basix +import basix.ufl import ffcx.codegeneration.jit import ufl from ffcx.naming import cdtype_to_numpy, scalar_to_value_type @@ -38,7 +39,7 @@ def test_matvec(compile_args): of user specified vector-valued finite element function (in P1 space). """ - e = ufl.VectorElement("P", "triangle", 1) + e = basix.ufl.element("P", "triangle", 1, rank=1) mesh = ufl.Mesh(e) V = ufl.FunctionSpace(mesh, e) f = ufl.Coefficient(V) @@ -102,7 +103,7 @@ def test_rank1(compile_args): and evaluates expression [u_y, u_x] + grad(u_x) at specified points. """ - e = ufl.VectorElement("P", "triangle", 1) + e = basix.ufl.element("P", "triangle", 1, rank=1) mesh = ufl.Mesh(e) V = ufl.FunctionSpace(mesh, e) @@ -163,10 +164,10 @@ def test_elimiate_zero_tables_tensor(compile_args): Test elimination of tensor-valued expressions with zero tables """ cell = "tetrahedron" - c_el = ufl.VectorElement("P", cell, 1) + c_el = basix.ufl.element("P", cell, 1, rank=1) mesh = ufl.Mesh(c_el) - e = ufl.FiniteElement("CG", cell, 1) + e = basix.ufl.element("P", cell, 1) V = ufl.FunctionSpace(mesh, e) u = ufl.Coefficient(V) expr = ufl.sym(ufl.as_tensor([[u, u.dx(0).dx(0), 0], diff --git a/test/test_jit_forms.py b/test/test_jit_forms.py index 8a1644af5..a017bcf86 100644 --- a/test/test_jit_forms.py +++ b/test/test_jit_forms.py @@ -9,6 +9,7 @@ import sympy from sympy.abc import x, y, z +import basix.ufl import ffcx.codegeneration.jit import ufl from ffcx.naming import cdtype_to_numpy, scalar_to_value_type @@ -23,9 +24,8 @@ dtype=np.complex128)), ]) def test_laplace_bilinear_form_2d(mode, expected_result, compile_args): - cell = ufl.triangle - element = ufl.FiniteElement("Lagrange", cell, 1) - kappa = ufl.Constant(cell, shape=(2, 2)) + element = basix.ufl.element("Lagrange", "triangle", 1) + kappa = ufl.Constant(ufl.triangle, shape=(2, 2)) u, v = ufl.TrialFunction(element), ufl.TestFunction(element) a = ufl.tr(kappa) * ufl.inner(ufl.grad(u), ufl.grad(v)) * ufl.dx @@ -96,8 +96,7 @@ def test_laplace_bilinear_form_2d(mode, expected_result, compile_args): dtype=np.complex64)), ]) def test_mass_bilinear_form_2d(mode, expected_result, compile_args): - cell = ufl.triangle - element = ufl.FiniteElement("Lagrange", cell, 1) + element = basix.ufl.element("Lagrange", "triangle", 1) u, v = ufl.TrialFunction(element), ufl.TestFunction(element) a = ufl.inner(u, v) * ufl.dx L = ufl.conj(v) * ufl.dx @@ -149,8 +148,7 @@ def test_mass_bilinear_form_2d(mode, expected_result, compile_args): - (1.0j / 24.0) * np.array([[2, 1, 1], [1, 2, 1], [1, 1, 2]], dtype=np.complex128)), ]) def test_helmholtz_form_2d(mode, expected_result, compile_args): - cell = ufl.triangle - element = ufl.FiniteElement("Lagrange", cell, 1) + element = basix.ufl.element("Lagrange", "triangle", 1) u, v = ufl.TrialFunction(element), ufl.TestFunction(element) if mode == "double": k = 1.0 @@ -205,8 +203,7 @@ def test_helmholtz_form_2d(mode, expected_result, compile_args): dtype=np.complex128)), ]) def test_laplace_bilinear_form_3d(mode, expected_result, compile_args): - cell = ufl.tetrahedron - element = ufl.FiniteElement("Lagrange", cell, 1) + element = basix.ufl.element("Lagrange", "tetrahedron", 1) u, v = ufl.TrialFunction(element), ufl.TestFunction(element) a = ufl.inner(ufl.grad(u), ufl.grad(v)) * ufl.dx forms = [a] @@ -242,8 +239,7 @@ def test_laplace_bilinear_form_3d(mode, expected_result, compile_args): def test_form_coefficient(compile_args): - cell = ufl.triangle - element = ufl.FiniteElement("Lagrange", cell, 1) + element = basix.ufl.element("Lagrange", "triangle", 1) u, v = ufl.TestFunction(element), ufl.TrialFunction(element) g = ufl.Coefficient(element) a = g * ufl.inner(u, v) * ufl.dx @@ -278,8 +274,7 @@ def test_form_coefficient(compile_args): def test_subdomains(compile_args): - cell = ufl.triangle - element = ufl.FiniteElement("Lagrange", cell, 1) + element = basix.ufl.element("Lagrange", "triangle", 1) u, v = ufl.TrialFunction(element), ufl.TestFunction(element) a0 = ufl.inner(u, v) * ufl.dx + ufl.inner(u, v) * ufl.dx(2) a1 = ufl.inner(u, v) * ufl.dx(2) + ufl.inner(u, v) * ufl.dx @@ -313,8 +308,7 @@ def test_subdomains(compile_args): @pytest.mark.parametrize("mode", ["double", "double _Complex"]) def test_interior_facet_integral(mode, compile_args): - cell = ufl.triangle - element = ufl.FiniteElement("Lagrange", cell, 1) + element = basix.ufl.element("Lagrange", "triangle", 1) u, v = ufl.TrialFunction(element), ufl.TestFunction(element) a0 = ufl.inner(ufl.jump(ufl.grad(u)), ufl.jump(ufl.grad(v))) * ufl.dS forms = [a0] @@ -359,10 +353,9 @@ def test_interior_facet_integral(mode, compile_args): @pytest.mark.parametrize("mode", ["double", "double _Complex"]) def test_conditional(mode, compile_args): - cell = ufl.triangle - element = ufl.FiniteElement("Lagrange", cell, 1) + element = basix.ufl.element("Lagrange", "triangle", 1) u, v = ufl.TrialFunction(element), ufl.TestFunction(element) - x = ufl.SpatialCoordinate(cell) + x = ufl.SpatialCoordinate(ufl.triangle) condition = ufl.Or(ufl.ge(ufl.real(x[0] + x[1]), 0.1), ufl.ge(ufl.real(x[1] + x[1]**2), 0.1)) c1 = ufl.conditional(condition, 2.0, 1.0) @@ -417,10 +410,10 @@ def test_conditional(mode, compile_args): def test_custom_quadrature(compile_args): - ve = ufl.VectorElement("P", "triangle", 1) + ve = basix.ufl.element("P", "triangle", 1, rank=1) mesh = ufl.Mesh(ve) - e = ufl.FiniteElement("P", mesh.ufl_cell(), 2) + e = basix.ufl.element("P", mesh.ufl_cell().cellname(), 2) V = ufl.FunctionSpace(mesh, e) u, v = ufl.TrialFunction(V), ufl.TestFunction(V) @@ -455,7 +448,7 @@ def test_custom_quadrature(compile_args): def test_curl_curl(compile_args): - V = ufl.FiniteElement("N1curl", "triangle", 2) + V = basix.ufl.element("N1curl", "triangle", 2) u, v = ufl.TrialFunction(V), ufl.TestFunction(V) a = ufl.inner(ufl.curl(u), ufl.curl(v)) * ufl.dx @@ -508,8 +501,7 @@ def lagrange_triangle_symbolic(order, corners=[(1, 0), (2, 0), (0, 1)], fun=lamb @pytest.mark.parametrize("order", [1, 2, 3]) def test_lagrange_triangle(compile_args, order, mode, sym_fun, ufl_fun): sym = lagrange_triangle_symbolic(order, fun=sym_fun) - cell = ufl.triangle - element = ufl.FiniteElement("Lagrange", cell, order) + element = basix.ufl.element("Lagrange", "triangle", order) v = ufl.TestFunction(element) a = ufl_fun(v) * ufl.dx @@ -600,8 +592,7 @@ def lagrange_tetrahedron_symbolic(order, corners=[(1, 0, 0), (2, 0, 0), (0, 1, 0 @pytest.mark.parametrize("order", [1, 2, 3]) def test_lagrange_tetrahedron(compile_args, order, mode, sym_fun, ufl_fun): sym = lagrange_tetrahedron_symbolic(order, fun=sym_fun) - cell = ufl.tetrahedron - element = ufl.FiniteElement("Lagrange", cell, order) + element = basix.ufl.element("Lagrange", "tetrahedron", order) v = ufl.TestFunction(element) a = ufl_fun(v) * ufl.dx @@ -639,8 +630,7 @@ def test_lagrange_tetrahedron(compile_args, order, mode, sym_fun, ufl_fun): def test_prism(compile_args): - cell = ufl.prism - element = ufl.FiniteElement("Lagrange", cell, 1) + element = basix.ufl.element("Lagrange", "prism", 1) v = ufl.TestFunction(element) L = v * ufl.dx @@ -672,10 +662,10 @@ def test_prism(compile_args): def test_complex_operations(compile_args): mode = "double _Complex" - cell = ufl.triangle - c_element = ufl.VectorElement("Lagrange", cell, 1) + cell = "triangle" + c_element = basix.ufl.element("Lagrange", cell, 1, rank=1) mesh = ufl.Mesh(c_element) - element = ufl.VectorElement("DG", cell, 0) + element = basix.ufl.element("DG", cell, 0, rank=1) V = ufl.FunctionSpace(mesh, element) u = ufl.Coefficient(V) J1 = ufl.real(u)[0] * ufl.imag(u)[1] * ufl.conj(u)[0] * ufl.dx @@ -728,7 +718,7 @@ def test_invalid_function_name(compile_args): old_str = ufl.Coefficient.__str__ ufl.Coefficient.__str__ = lambda self: "invalid function name" - V = ufl.FiniteElement("Lagrange", ufl.triangle, 1) + V = basix.ufl.element("Lagrange", "triangle", 1) u = ufl.Coefficient(V) a = ufl.inner(u, u) * ufl.dx @@ -748,8 +738,7 @@ def test_invalid_function_name(compile_args): def test_interval_vertex_quadrature(compile_args): - cell = ufl.interval - c_el = ufl.VectorElement("Lagrange", cell, 1) + c_el = basix.ufl.element("Lagrange", "interval", 1, rank=1) mesh = ufl.Mesh(c_el) x = ufl.SpatialCoordinate(mesh) From a7a8a98fbd0b548036f42bf3552bd4aba5271a8a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B8rgen=20Schartum=20Dokken?= Date: Wed, 26 Apr 2023 16:05:15 +0200 Subject: [PATCH 12/44] Remove `pkg_resources` and remove Python 3.7 support (#568) * same issue as https://github.com/FEniCS/ufl/issues/157 * Remove 3.7 support --- .github/workflows/pythonapp.yml | 2 +- ffcx/__init__.py | 5 ++--- setup.cfg | 4 ++-- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/.github/workflows/pythonapp.yml b/.github/workflows/pythonapp.yml index 9bc05fddb..e9bc4b5b3 100644 --- a/.github/workflows/pythonapp.yml +++ b/.github/workflows/pythonapp.yml @@ -22,7 +22,7 @@ jobs: strategy: matrix: os: [ubuntu-latest] - python-version: ['3.7', '3.8', '3.9', '3.10', "3.11"] + python-version: ['3.8', '3.9', '3.10', "3.11"] env: CC: gcc-10 diff --git a/ffcx/__init__.py b/ffcx/__init__.py index 918c89c4e..5f6f87aa0 100644 --- a/ffcx/__init__.py +++ b/ffcx/__init__.py @@ -10,14 +10,13 @@ """ +import importlib.metadata import logging -import pkg_resources - # Import default options from ffcx.options import get_options # noqa: F401 -__version__ = pkg_resources.get_distribution("fenics-ffcx").version +__version__ = importlib.metadata.version("fenics-ffcx") logging.basicConfig() logger = logging.getLogger("ffcx") diff --git a/setup.cfg b/setup.cfg index 53f6a5f82..62687b460 100644 --- a/setup.cfg +++ b/setup.cfg @@ -26,10 +26,10 @@ classifiers = Operating System :: MacOS :: MacOS X Programming Language :: Python Programming Language :: Python :: 3 - Programming Language :: Python :: 3.7 Programming Language :: Python :: 3.8 Programming Language :: Python :: 3.9 Programming Language :: Python :: 3.10 + Programming Language :: Python :: 3.11 Topic :: Scientific/Engineering :: Mathematics Topic :: Software Development :: Libraries :: Python Modules Topic :: Software Development :: Code Generators @@ -38,7 +38,7 @@ classifiers = packages = find: include_package_data = True zip_safe = False -python_requires = >= 3.7 +python_requires = >= 3.8 setup_requires = setuptools >= 62 wheel From cf4edb480d8127d3696d844f990430f0a2b45db7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B8rgen=20Schartum=20Dokken?= Date: Mon, 15 May 2023 11:11:34 +0200 Subject: [PATCH 13/44] Support facet integrals with vertex scheme (#566) * Support facet integrals with vertex scheme * Fix exterior face tests --- ffcx/ir/representation.py | 5 ++++ test/test_jit_forms.py | 57 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 62 insertions(+) diff --git a/ffcx/ir/representation.py b/ffcx/ir/representation.py index e0d61df94..1ac6ff3c6 100644 --- a/ffcx/ir/representation.py +++ b/ffcx/ir/representation.py @@ -403,7 +403,12 @@ def _compute_integral_ir(form_data, form_index, element_numbers, integral_names, # scheme have some properties that other schemes lack, e.g., the # mass matrix is a simple diagonal matrix. This may be # prescribed in certain cases. + degree = md["quadrature_degree"] + if integral_type != "cell": + facet_types = cell.facet_types() + assert len(facet_types) == 1 + cellname = facet_types[0].cellname() if degree > 1: warnings.warn( "Explicitly selected vertex quadrature (degree 1), but requested degree is {}.". diff --git a/test/test_jit_forms.py b/test/test_jit_forms.py index a017bcf86..9241b6372 100644 --- a/test/test_jit_forms.py +++ b/test/test_jit_forms.py @@ -768,3 +768,60 @@ def test_interval_vertex_quadrature(compile_args): ffi.NULL, ffi.cast('double *', coords.ctypes.data), ffi.NULL, ffi.NULL) assert np.isclose(J[0], (0.5 * a + 0.5 * b) * np.abs(b - a)) + + +def test_facet_vertex_quadrature(compile_args): + """ + Test facet vertex quadrature + """ + c_el = basix.ufl.element("Lagrange", "quadrilateral", 1, shape=(2,)) + mesh = ufl.Mesh(c_el) + + x = ufl.SpatialCoordinate(mesh) + ds = ufl.Measure( + "ds", metadata={"quadrature_rule": "vertex"}) + expr = (x[0] + ufl.cos(x[1])) + b1 = expr * ds + ds_c = ufl.Measure( + "ds", + metadata={ + "quadrature_rule": "custom", + "quadrature_points": np.array([[0.0], [1.0]]), + "quadrature_weights": np.array([1.0 / 2.0, 1.0 / 2.0]), + } + ) + b2 = expr * ds_c + forms = [b1, b2] + compiled_forms, module, _ = ffcx.codegeneration.jit.compile_forms( + forms, cffi_extra_compile_args=compile_args) + + ffi = module.ffi + assert len(compiled_forms) == 2 + solutions = [] + for form in compiled_forms: + assert form.num_integrals(module.lib.exterior_facet) == 1 + + default_integral = form.integrals(module.lib.exterior_facet)[0] + J = np.zeros(1, dtype=np.float64) + a = np.pi + b = np.exp(1) + coords = np.array([a, 0.1, 0.0, + a + b, 0.0, 0.0, + a, a, 0., + a + 2 * b, a, 0.], dtype=np.float64) + # First facet is between vertex 0 and 1 in coords + facets = np.array([0], dtype=np.intc) + + kernel = getattr(default_integral, "tabulate_tensor_float64") + kernel(ffi.cast('double *', J.ctypes.data), + ffi.NULL, + ffi.NULL, + ffi.cast('double *', coords.ctypes.data), + ffi.cast('int *', facets.ctypes.data), + ffi.NULL) + solutions.append(J[0]) + # Test against exact result + assert np.isclose(J[0], (0.5 * (a + np.cos(0.1)) + 0.5 * (a + b + np.cos(0))) * np.sqrt(b**2 + 0.1**2)) + + # Compare custom quadrature with vertex quadrature + assert np.isclose(solutions[0], solutions[1]) From c7129e4bcc6932ed23e6137525cbca31a82f9265 Mon Sep 17 00:00:00 2001 From: Matthew Scroggs Date: Tue, 6 Jun 2023 11:23:12 +0100 Subject: [PATCH 14/44] Remove use of derivative_listing_to_counts (#570) * derivative_listing_to_counts function is simple, no need to import * flake * typing --- ffcx/codegeneration/symbols.py | 5 ++--- ffcx/ir/elementtables.py | 6 ++---- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/ffcx/codegeneration/symbols.py b/ffcx/codegeneration/symbols.py index 5140d7c2d..7630a0d68 100644 --- a/ffcx/codegeneration/symbols.py +++ b/ffcx/codegeneration/symbols.py @@ -6,8 +6,7 @@ """FFCx/UFC specific symbol naming.""" import logging - -import ufl.utils.derivativetuples +import ufl logger = logging.getLogger("ffcx") @@ -46,7 +45,7 @@ def format_mt_name(basename, mt): if mt.local_derivatives: # Convert "listing" derivative multindex into "counting" representation gdim = ufl.domain.extract_unique_domain(mt.terminal).geometric_dimension() - ld_counting = ufl.utils.derivativetuples.derivative_listing_to_counts(mt.local_derivatives, gdim) + ld_counting = tuple(mt.local_derivatives.count(i) for i in range(gdim)) der = f"_d{''.join(map(str, ld_counting))}" access += der diff --git a/ffcx/ir/elementtables.py b/ffcx/ir/elementtables.py index 2cac930b4..a163e8a4c 100644 --- a/ffcx/ir/elementtables.py +++ b/ffcx/ir/elementtables.py @@ -12,7 +12,6 @@ import numpy.typing as npt import ufl -import ufl.utils.derivativetuples from ffcx.element_interface import (QuadratureElement, basix_index, convert_element) from ffcx.ir.representationutils import (create_quadrature_points_and_weights, @@ -32,7 +31,7 @@ class ModifiedTerminalElement(typing.NamedTuple): element: ufl.FiniteElementBase averaged: str - local_derivatives: typing.Tuple[int] + local_derivatives: typing.Tuple[int, ...] fc: int @@ -229,8 +228,7 @@ def get_modified_terminal_element(mt) -> typing.Optional[ModifiedTerminalElement assert (mt.averaged is None) or not (ld or gd) # Change derivatives format for table lookup gdim = domain.geometric_dimension() - local_derivatives = ufl.utils.derivativetuples.derivative_listing_to_counts( - ld, gdim) + local_derivatives: typing.Tuple[int, ...] = tuple(ld.count(i) for i in range(gdim)) return ModifiedTerminalElement(element, mt.averaged, local_derivatives, fc) From 2fb6a24c6a85f842fd26a06520dbccfeac120f8c Mon Sep 17 00:00:00 2001 From: Matthew Scroggs Date: Wed, 7 Jun 2023 17:12:42 +0100 Subject: [PATCH 15/44] Fix mypy (#572) * allow Nones in return values * type hint for the dict? --- ffcx/element_interface.py | 12 ++++++------ ffcx/ir/representation.py | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/ffcx/element_interface.py b/ffcx/element_interface.py index 77ce23ff4..4e0555fc1 100644 --- a/ffcx/element_interface.py +++ b/ffcx/element_interface.py @@ -243,17 +243,17 @@ def family_name(self) -> str: return "quadrature" @property - def lagrange_variant(self) -> basix.LagrangeVariant: + def lagrange_variant(self) -> typing.Union[basix.LagrangeVariant, None]: """Basix Lagrange variant used to initialise the element.""" return None @property - def dpc_variant(self) -> basix.DPCVariant: + def dpc_variant(self) -> typing.Union[basix.DPCVariant, None]: """Basix DPC variant used to initialise the element.""" return None @property - def element_family(self) -> basix.ElementFamily: + def element_family(self) -> typing.Union[basix.ElementFamily, None]: """Basix element family used to initialise the element.""" return None @@ -398,17 +398,17 @@ def family_name(self) -> str: return self._family_name @property - def lagrange_variant(self) -> basix.LagrangeVariant: + def lagrange_variant(self) -> typing.Union[basix.LagrangeVariant, None]: """Basix Lagrange variant used to initialise the element.""" return None @property - def dpc_variant(self) -> basix.DPCVariant: + def dpc_variant(self) -> typing.Union[basix.DPCVariant, None]: """Basix DPC variant used to initialise the element.""" return None @property - def element_family(self) -> basix.ElementFamily: + def element_family(self) -> typing.Union[basix.ElementFamily, None]: """Basix element family used to initialise the element.""" return None diff --git a/ffcx/ir/representation.py b/ffcx/ir/representation.py index 1ac6ff3c6..6ae85cbf3 100644 --- a/ffcx/ir/representation.py +++ b/ffcx/ir/representation.py @@ -262,7 +262,7 @@ def _compute_element_ir(element, element_numbers, finite_element_names): def _compute_custom_element_ir(basix_element: basix.finite_element.FiniteElement): """Compute intermediate representation of a custom Basix element.""" - ir = {} + ir: typing.Dict[str, typing.Any] = {} ir["cell_type"] = basix_element.cell_type ir["value_shape"] = basix_element.value_shape ir["wcoeffs"] = basix_element.wcoeffs From 26d5556d9b40314fbe0096aa494faa35199e8e61 Mon Sep 17 00:00:00 2001 From: Chris Richardson Date: Wed, 14 Jun 2023 09:18:37 +0100 Subject: [PATCH 16/44] Change arg in test to np.array (#574) --- test/test_elements.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/test_elements.py b/test/test_elements.py index 443ae85df..6fc372b63 100644 --- a/test/test_elements.py +++ b/test/test_elements.py @@ -187,7 +187,7 @@ def test_values(self, family, cell, degree, reference): # Get some points and check basis function values at points points = [random_point(element_coords(cell)) for i in range(5)] for x in points: - table = e.tabulate(0, (x,)) + table = e.tabulate(0, np.array([x], dtype=np.float64)) basis = table[0] if sum(e.value_shape()) == 1: for i, value in enumerate(basis[0]): From 237b6135912ba3a7f87c7d2c7face87245cd8a76 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B8rgen=20Schartum=20Dokken?= Date: Tue, 20 Jun 2023 17:20:10 +0200 Subject: [PATCH 17/44] Resolve double derivatives for manifolds (#576) * Add test to check that it currently fails * Use topological dimension for local derivatives * Flake8 * Check manifold direction for derivative to make the code slightly more robust --- ffcx/ir/elementtables.py | 4 ++-- test/test_jit_forms.py | 45 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 47 insertions(+), 2 deletions(-) diff --git a/ffcx/ir/elementtables.py b/ffcx/ir/elementtables.py index a163e8a4c..d1367513b 100644 --- a/ffcx/ir/elementtables.py +++ b/ffcx/ir/elementtables.py @@ -227,8 +227,8 @@ def get_modified_terminal_element(mt) -> typing.Optional[ModifiedTerminalElement assert (mt.averaged is None) or not (ld or gd) # Change derivatives format for table lookup - gdim = domain.geometric_dimension() - local_derivatives: typing.Tuple[int, ...] = tuple(ld.count(i) for i in range(gdim)) + tdim = domain.topological_dimension() + local_derivatives: typing.Tuple[int, ...] = tuple(ld.count(i) for i in range(tdim)) return ModifiedTerminalElement(element, mt.averaged, local_derivatives, fc) diff --git a/test/test_jit_forms.py b/test/test_jit_forms.py index 9241b6372..b6fe9518b 100644 --- a/test/test_jit_forms.py +++ b/test/test_jit_forms.py @@ -825,3 +825,48 @@ def test_facet_vertex_quadrature(compile_args): # Compare custom quadrature with vertex quadrature assert np.isclose(solutions[0], solutions[1]) + + +def test_manifold_derivatives(compile_args): + """ + Test higher order derivatives on manifolds + """ + + c_el = basix.ufl.element("Lagrange", "interval", 1, shape=(2,), gdim=2) + mesh = ufl.Mesh(c_el) + + x = ufl.SpatialCoordinate(mesh) + dx = ufl.Measure("dx", domain=mesh) + order = 4 + el = basix.ufl.element("Lagrange", "interval", order, gdim=2) + V = ufl.FunctionSpace(mesh, el) + + u = ufl.Coefficient(V) + d = 5.3 + f_ex = d * order * (order - 1) * x[1]**(order - 2) + expr = u.dx(1).dx(1) - f_ex + J = expr * expr * dx + + compiled_forms, module, _ = ffcx.codegeneration.jit.compile_forms( + [J], cffi_extra_compile_args=compile_args) + + default_integral = compiled_forms[0].integrals(module.lib.cell)[0] + scale = 2.5 + coords = np.array([0.0, 0.0, 0.0, 0.0, scale, 0.0], dtype=np.float64) + dof_coords = el.element.points.reshape(-1) + dof_coords *= scale + + w = np.array([d * d_c**order for d_c in dof_coords], dtype=np.float64) + c = np.array([], dtype=np.float64) + perm = np.array([0], dtype=np.uint8) + + ffi = module.ffi + J = np.zeros(1, dtype=np.float64) + kernel = getattr(default_integral, "tabulate_tensor_float64") + kernel(ffi.cast('double *', J.ctypes.data), + ffi.cast('double *', w.ctypes.data), + ffi.cast('double *', c.ctypes.data), + ffi.cast('double *', coords.ctypes.data), ffi.NULL, + ffi.cast('uint8_t *', perm.ctypes.data)) + + assert np.isclose(J[0], 0.0) From 657dee8455a8ccd0331258caaa8d8d33530eac5f Mon Sep 17 00:00:00 2001 From: "Garth N. Wells" Date: Mon, 26 Jun 2023 20:28:35 +0100 Subject: [PATCH 18/44] Remove warning on number of quadrature points. (#577) Rather annoying. Basix has good simplex schemes. --- ffcx/element_interface.py | 26 ++++++++------------------ ffcx/ir/representation.py | 5 ++--- 2 files changed, 10 insertions(+), 21 deletions(-) diff --git a/ffcx/element_interface.py b/ffcx/element_interface.py index 4e0555fc1..e91abadc0 100644 --- a/ffcx/element_interface.py +++ b/ffcx/element_interface.py @@ -23,7 +23,8 @@ def convert_element(element: ufl.finiteelement.FiniteElementBase) -> basix.ufl._ """Convert and element to a FFCx element.""" if isinstance(element, basix.ufl._ElementBase): return element - return _cached_conversion(element) + else: + return _cached_conversion(element) @lru_cache() @@ -56,8 +57,8 @@ def _cached_conversion(element: ufl.finiteelement.FiniteElementBase) -> basix.uf return basix.ufl.blocked_element(_cached_conversion(element.sub_elements()[0]), shape=element._value_shape) else: assert element.symmetry()[(1, 0)] == (0, 1) - return basix.ufl.blocked_element(_cached_conversion( - element.sub_elements()[0]), element._value_shape, symmetry=True) + return basix.ufl.blocked_element(_cached_conversion(element.sub_elements()[0]), + element._value_shape, symmetry=True) elif hasattr(ufl, "MixedElement") and isinstance(element, ufl.MixedElement): return basix.ufl.mixed_element([_cached_conversion(e) for e in element.sub_elements()]) elif hasattr(ufl, "EnrichedElement") and isinstance(element, ufl.EnrichedElement): @@ -76,19 +77,9 @@ def create_quadrature(cellname, degree, rule) -> typing.Tuple[npt.NDArray[np.flo """Create a quadrature rule.""" if cellname == "vertex": return (np.ones((1, 0), dtype=np.float64), np.ones(1, dtype=np.float64)) - - quadrature = basix.make_quadrature( - basix.quadrature.string_to_type(rule), basix.cell.string_to_type(cellname), degree) - - # The quadrature degree from UFL can be very high for some - # integrals. Print warning if number of quadrature points - # exceeds 100. - num_points = quadrature[1].size - if num_points >= 100: - warnings.warn( - f"Number of integration points per cell is: {num_points}. Consider using 'quadrature_degree' " - "to reduce number.") - return quadrature + else: + return basix.make_quadrature(basix.quadrature.string_to_type(rule), + basix.cell.string_to_type(cellname), degree) def reference_cell_vertices(cellname: str) -> npt.NDArray[np.float64]: @@ -96,8 +87,7 @@ def reference_cell_vertices(cellname: str) -> npt.NDArray[np.float64]: return basix.geometry(basix.cell.string_to_type(cellname)) -def map_facet_points(points: npt.NDArray[np.float64], facet: int, - cellname: str) -> npt.NDArray[np.float64]: +def map_facet_points(points: npt.NDArray[np.float64], facet: int, cellname: str) -> npt.NDArray[np.float64]: """Map points from a reference facet to a physical facet.""" geom = basix.geometry(basix.cell.string_to_type(cellname)) facet_vertices = [geom[i] for i in basix.topology(basix.cell.string_to_type(cellname))[-2][facet]] diff --git a/ffcx/ir/representation.py b/ffcx/ir/representation.py index 6ae85cbf3..82446bed2 100644 --- a/ffcx/ir/representation.py +++ b/ffcx/ir/representation.py @@ -410,9 +410,8 @@ def _compute_integral_ir(form_data, form_index, element_numbers, integral_names, assert len(facet_types) == 1 cellname = facet_types[0].cellname() if degree > 1: - warnings.warn( - "Explicitly selected vertex quadrature (degree 1), but requested degree is {}.". - format(degree)) + warnings.warn("Explicitly selected vertex quadrature (degree 1), but requested degree is {}.". + format(degree)) if cellname == "tetrahedron": points, weights = (np.array([[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]), From 3e4d76fe50f2915dc7c227c30e0f91f21465fdd2 Mon Sep 17 00:00:00 2001 From: Matthew Scroggs Date: Tue, 27 Jun 2023 11:20:15 +0200 Subject: [PATCH 19/44] Use `num_sub_entities(...)` (new function in UFL) (#571) * use new num_sub_entities function * ufl branch * ufl branches * another branch * xfail for now * restore "ref: main" --- demo/test_demos.py | 2 ++ ffcx/ir/elementtables.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/demo/test_demos.py b/demo/test_demos.py index d28e394e4..37579d4b5 100644 --- a/demo/test_demos.py +++ b/demo/test_demos.py @@ -13,6 +13,8 @@ @pytest.mark.parametrize("file", ufl_files) def test_demo(file): + if file == "CellGeometry": + pytest.xfail() if file in [ "MixedGradient", "TraceElement", # HDiv Trace "MixedElasticity", # VectorElement of BDM diff --git a/ffcx/ir/elementtables.py b/ffcx/ir/elementtables.py index d1367513b..b44ee8671 100644 --- a/ffcx/ir/elementtables.py +++ b/ffcx/ir/elementtables.py @@ -114,7 +114,7 @@ def get_ffcx_table_values(points, cell, integral_type, element, avg, entitytype, # Tabulate table of basis functions and derivatives in points for each entity tdim = cell.topological_dimension() entity_dim = integral_type_to_entity_dim(integral_type, tdim) - num_entities = ufl.cell.num_cell_entities[cell.cellname()][entity_dim] + num_entities = cell.num_sub_entities(entity_dim) # Extract arrays for the right scalar component component_tables = [] From 838c294d8f56328c093e64a91f9791ccf0f884da Mon Sep 17 00:00:00 2001 From: Matthew Scroggs Date: Tue, 27 Jun 2023 12:48:16 +0200 Subject: [PATCH 20/44] remove xfail (#580) --- demo/test_demos.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/demo/test_demos.py b/demo/test_demos.py index 37579d4b5..d28e394e4 100644 --- a/demo/test_demos.py +++ b/demo/test_demos.py @@ -13,8 +13,6 @@ @pytest.mark.parametrize("file", ufl_files) def test_demo(file): - if file == "CellGeometry": - pytest.xfail() if file in [ "MixedGradient", "TraceElement", # HDiv Trace "MixedElasticity", # VectorElement of BDM From 5741516d7a695f9f11d11bd14c6507b6de00cacb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Delaporte-Mathurin?= <40028739+RemDelaporteMathurin@users.noreply.github.com> Date: Fri, 30 Jun 2023 09:04:25 -0400 Subject: [PATCH 21/44] Replace deprecated np.product function by np.prod (#582) * product to prod * product to prod in elementtables * product to prod in cnodes.py --- ffcx/codegeneration/C/cnodes.py | 4 ++-- ffcx/ir/elementtables.py | 2 +- ffcx/ir/representation.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/ffcx/codegeneration/C/cnodes.py b/ffcx/codegeneration/C/cnodes.py index d967239e0..860945923 100644 --- a/ffcx/codegeneration/C/cnodes.py +++ b/ffcx/codegeneration/C/cnodes.py @@ -1264,7 +1264,7 @@ def formatter(x, p): return str(x) tokens = ["{ "] - if np.product(values.shape) > 0: + if np.prod(values.shape) > 0: sep = ", " fvalues = [formatter(v, precision) for v in values] for v in fvalues[:-1]: @@ -1296,7 +1296,7 @@ def formatter(x, p): return str(x) values = np.asarray(values) - assert np.product(values.shape) == np.product(sizes) + assert np.prod(values.shape) == np.prod(sizes) assert len(sizes) > 0 assert len(values.shape) > 0 assert len(sizes) == len(values.shape) diff --git a/ffcx/ir/elementtables.py b/ffcx/ir/elementtables.py index b44ee8671..f658cf8d3 100644 --- a/ffcx/ir/elementtables.py +++ b/ffcx/ir/elementtables.py @@ -407,7 +407,7 @@ def build_optimized_tables(quadrature_rule, cell, integral_type, entitytype, def is_zeros_table(table, rtol=default_rtol, atol=default_atol): - return (np.product(table.shape) == 0 + return (np.prod(table.shape) == 0 or np.allclose(table, np.zeros(table.shape), rtol=rtol, atol=atol)) diff --git a/ffcx/ir/representation.py b/ffcx/ir/representation.py index 82446bed2..3bdf746ff 100644 --- a/ffcx/ir/representation.py +++ b/ffcx/ir/representation.py @@ -481,7 +481,7 @@ def _compute_integral_ir(form_data, form_index, element_numbers, integral_names, _offset = 0 for constant in form_data.original_form.constants(): original_constant_offsets[constant] = _offset - _offset += np.product(constant.ufl_shape, dtype=int) + _offset += np.prod(constant.ufl_shape, dtype=int) ir["original_constant_offsets"] = original_constant_offsets ir["precision"] = itg_data.metadata["precision"] From c9831436449eeae702acf3855efc90a5327fd4d9 Mon Sep 17 00:00:00 2001 From: "Jack S. Hale" Date: Sat, 15 Jul 2023 21:33:03 +0200 Subject: [PATCH 22/44] Fix bug that led to Quadrature Elements not being blocked. (#583) --- ffcx/element_interface.py | 18 +++++++++--------- test/test_blocked_elements.py | 36 +++++++++++++++++++++++++++++++++++ 2 files changed, 45 insertions(+), 9 deletions(-) diff --git a/ffcx/element_interface.py b/ffcx/element_interface.py index e91abadc0..87722b798 100644 --- a/ffcx/element_interface.py +++ b/ffcx/element_interface.py @@ -37,18 +37,12 @@ def _cached_conversion(element: ufl.finiteelement.FiniteElementBase) -> basix.uf Returns: A Basix finite element """ - if isinstance(element, basix.ufl._ElementBase): - return element - elif element.family() == "Quadrature": - return QuadratureElement(element.cell().cellname(), element.value_shape(), scheme=element.quadrature_scheme(), - degree=element.degree()) - elif element.family() == "Real": - return RealElement(element) - warnings.warn( "Use of elements created by UFL is deprecated. You should create elements directly using Basix.", DeprecationWarning) + # Tackle compositional elements, e.g. VectorElement first, then elements + # implemented by FFCx, then finally elements convertible by Basix. if hasattr(ufl, "VectorElement") and isinstance(element, ufl.VectorElement): return basix.ufl.blocked_element( _cached_conversion(element.sub_elements()[0]), shape=(element.num_sub_elements(), )) @@ -63,6 +57,11 @@ def _cached_conversion(element: ufl.finiteelement.FiniteElementBase) -> basix.uf return basix.ufl.mixed_element([_cached_conversion(e) for e in element.sub_elements()]) elif hasattr(ufl, "EnrichedElement") and isinstance(element, ufl.EnrichedElement): return basix.ufl.enriched_element([_cached_conversion(e) for e in element._elements]) + elif element.family() == "Quadrature": + return QuadratureElement(element.cell().cellname(), element.value_shape(), scheme=element.quadrature_scheme(), + degree=element.degree()) + elif element.family() == "Real": + return RealElement(element) else: return basix.ufl.convert_ufl_element(element) @@ -134,7 +133,8 @@ def basix_sobolev_space(self): def __eq__(self, other) -> bool: """Check if two elements are equal.""" - return isinstance(other, QuadratureElement) and np.allclose(self._points, other._points) + return isinstance(other, QuadratureElement) and np.allclose(self._points, other._points) and \ + np.allclose(self._weights, other._weights) def __hash__(self) -> int: """Return a hash.""" diff --git a/test/test_blocked_elements.py b/test/test_blocked_elements.py index 3f9e03143..fd4db7b48 100644 --- a/test/test_blocked_elements.py +++ b/test/test_blocked_elements.py @@ -9,6 +9,7 @@ import ffcx import ffcx.codegeneration.jit import basix.ufl +import ufl def test_finite_element(compile_args): @@ -111,3 +112,38 @@ def test_tensor_element(compile_args): ufcx_dofmap.tabulate_entity_dofs(vals_ptr, 0, v) assert vals[0] == v assert ufcx_dofmap.num_sub_dofmaps == 4 + + +def test_vector_quadrature_element(compile_args): + ufl_element = ufl.VectorElement(ufl.FiniteElement("Quadrature", "tetrahedron", degree=2, quad_scheme="default")) + jit_compiled_elements, module, code = ffcx.codegeneration.jit.compile_elements( + [ufl_element], cffi_extra_compile_args=compile_args) + ufcx_element, ufcx_dofmap = jit_compiled_elements[0] + + assert ufcx_element.topological_dimension == 3 + assert ufcx_element.geometric_dimension == 3 + assert ufcx_element.space_dimension == 12 + assert ufcx_element.value_rank == 1 + assert ufcx_element.value_shape[0] == 3 + assert ufcx_element.value_size == 3 + assert ufcx_element.reference_value_rank == 1 + assert ufcx_element.reference_value_shape[0] == 3 + assert ufcx_element.reference_value_size == 3 + assert ufcx_element.block_size == 3 + assert ufcx_element.num_sub_elements == 3 + + assert ufcx_dofmap.block_size == 3 + assert ufcx_dofmap.num_global_support_dofs == 0 + assert ufcx_dofmap.num_global_support_dofs == 0 + assert ufcx_dofmap.num_element_support_dofs == 4 + assert ufcx_dofmap.num_entity_dofs[0] == 0 + assert ufcx_dofmap.num_entity_dofs[1] == 0 + assert ufcx_dofmap.num_entity_dofs[2] == 0 + assert ufcx_dofmap.num_entity_dofs[3] == 4 + + vals = np.zeros(4, dtype=np.int32) + vals_ptr = module.ffi.cast("int *", module.ffi.from_buffer(vals)) + ufcx_dofmap.tabulate_entity_dofs(vals_ptr, 3, 0) + assert (vals == [0, 1, 2, 3]).all() + + assert ufcx_dofmap.num_sub_dofmaps == 3 From 51ba7802cc8780ff5936a78405f316d80366e642 Mon Sep 17 00:00:00 2001 From: Chris Richardson Date: Thu, 10 Aug 2023 10:13:26 +0100 Subject: [PATCH 23/44] Add flattened entity dofs to ufcx (#586) * Update ufcx.h * Reorder in header file * Formatting --- ffcx/codegeneration/dofmap.py | 95 +++++++++++++++++++++----- ffcx/codegeneration/dofmap_template.py | 12 ++++ ffcx/codegeneration/ufcx.h | 12 ++++ 3 files changed, 103 insertions(+), 16 deletions(-) diff --git a/ffcx/codegeneration/dofmap.py b/ffcx/codegeneration/dofmap.py index d8bfc9dc9..3ca6e3174 100644 --- a/ffcx/codegeneration/dofmap.py +++ b/ffcx/codegeneration/dofmap.py @@ -15,8 +15,11 @@ logger = logging.getLogger("ffcx") -def tabulate_entity_dofs(L, entity_dofs: typing.List[typing.List[typing.List[int]]], - num_dofs_per_entity: typing.List[int]): +def tabulate_entity_dofs( + L, + entity_dofs: typing.List[typing.List[typing.List[int]]], + num_dofs_per_entity: typing.List[int], +): # Output argument array dofs = L.Symbol("dofs") @@ -30,7 +33,6 @@ def tabulate_entity_dofs(L, entity_dofs: typing.List[typing.List[typing.List[int # Generate cases for each dimension: all_cases = [] for dim in range(tdim + 1): - # Ignore if no entities for this dimension if num_dofs_per_entity[dim] == 0: continue @@ -39,7 +41,7 @@ def tabulate_entity_dofs(L, entity_dofs: typing.List[typing.List[typing.List[int cases = [] for entity in range(len(entity_dofs[dim])): casebody = [] - for (j, dof) in enumerate(entity_dofs[dim][entity]): + for j, dof in enumerate(entity_dofs[dim][entity]): casebody += [L.Assign(dofs[j], dof)] cases.append((entity, L.StatementList(casebody))) @@ -64,7 +66,7 @@ def generator(ir, options): # Attributes d["factory_name"] = ir.name - d["signature"] = f"\"{ir.signature}\"" + d["signature"] = f'"{ir.signature}"' d["num_global_support_dofs"] = ir.num_global_support_dofs d["num_element_support_dofs"] = ir.num_element_support_dofs d["num_sub_dofmaps"] = ir.num_sub_dofmaps @@ -72,27 +74,84 @@ def generator(ir, options): import ffcx.codegeneration.C.cnodes as L num_entity_dofs = ir.num_entity_dofs + [0, 0, 0, 0] - num_entity_dofs = num_entity_dofs[: 4] + num_entity_dofs = num_entity_dofs[:4] d["num_entity_dofs"] = f"num_entity_dofs_{ir.name}" - d["num_entity_dofs_init"] = L.ArrayDecl("int", f"num_entity_dofs_{ir.name}", - values=num_entity_dofs, sizes=4) + d["num_entity_dofs_init"] = L.ArrayDecl( + "int", f"num_entity_dofs_{ir.name}", values=num_entity_dofs, sizes=4 + ) num_entity_closure_dofs = ir.num_entity_closure_dofs + [0, 0, 0, 0] num_entity_closure_dofs = num_entity_closure_dofs[:4] d["num_entity_closure_dofs"] = f"num_entity_closure_dofs_{ir.name}" - d["num_entity_closure_dofs_init"] = L.ArrayDecl("int", f"num_entity_closure_dofs_{ir.name}", - values=num_entity_closure_dofs, sizes=4) + d["num_entity_closure_dofs_init"] = L.ArrayDecl( + "int", + f"num_entity_closure_dofs_{ir.name}", + values=num_entity_closure_dofs, + sizes=4, + ) + + flattened_entity_dofs = [] + entity_dof_offsets = [0] + for dim in ir.entity_dofs: + for ent in dim: + for v in ent: + flattened_entity_dofs.append(v) + entity_dof_offsets.append(len(flattened_entity_dofs)) + d["entity_dofs"] = f"entity_dofs_{ir.name}" + d["entity_dofs_init"] = L.ArrayDecl( + "int", + f"entity_dofs_{ir.name}", + values=flattened_entity_dofs, + sizes=len(flattened_entity_dofs), + ) + d["entity_dof_offsets"] = f"entity_dof_offsets_{ir.name}" + d["entity_dof_offsets_init"] = L.ArrayDecl( + "int", + f"entity_dof_offsets_{ir.name}", + values=entity_dof_offsets, + sizes=len(entity_dof_offsets), + ) + + # Closure + flattened_entity_closure_dofs = [] + entity_closure_dof_offsets = [0] + for dim in ir.entity_closure_dofs: + for ent in dim: + for v in ent: + flattened_entity_closure_dofs.append(v) + entity_closure_dof_offsets.append(len(flattened_entity_closure_dofs)) + d["entity_closure_dofs"] = f"entity_closure_dofs_{ir.name}" + d["entity_closure_dofs_init"] = L.ArrayDecl( + "int", + f"entity_closure_dofs_{ir.name}", + values=flattened_entity_closure_dofs, + sizes=len(flattened_entity_closure_dofs), + ) + d["entity_closure_dof_offsets"] = f"entity_closure_dof_offsets_{ir.name}" + d["entity_closure_dof_offsets_init"] = L.ArrayDecl( + "int", + f"entity_closure_dof_offsets_{ir.name}", + values=entity_closure_dof_offsets, + sizes=len(entity_closure_dof_offsets), + ) d["block_size"] = ir.block_size # Functions - d["tabulate_entity_dofs"] = tabulate_entity_dofs(L, ir.entity_dofs, ir.num_entity_dofs) - d["tabulate_entity_closure_dofs"] = tabulate_entity_dofs(L, ir.entity_closure_dofs, ir.num_entity_closure_dofs) + d["tabulate_entity_dofs"] = tabulate_entity_dofs( + L, ir.entity_dofs, ir.num_entity_dofs + ) + d["tabulate_entity_closure_dofs"] = tabulate_entity_dofs( + L, ir.entity_closure_dofs, ir.num_entity_closure_dofs + ) if len(ir.sub_dofmaps) > 0: d["sub_dofmaps_initialization"] = L.ArrayDecl( - "ufcx_dofmap*", f"sub_dofmaps_{ir.name}", - values=[L.AddressOf(L.Symbol(dofmap)) for dofmap in ir.sub_dofmaps], sizes=len(ir.sub_dofmaps)) + "ufcx_dofmap*", + f"sub_dofmaps_{ir.name}", + values=[L.AddressOf(L.Symbol(dofmap)) for dofmap in ir.sub_dofmaps], + sizes=len(ir.sub_dofmaps), + ) d["sub_dofmaps"] = f"sub_dofmaps_{ir.name}" else: d["sub_dofmaps_initialization"] = "" @@ -100,11 +159,15 @@ def generator(ir, options): # Check that no keys are redundant or have been missed from string import Formatter - fields = [fname for _, fname, _, _ in Formatter().parse(ufcx_dofmap.factory) if fname] + + fields = [ + fname for _, fname, _, _ in Formatter().parse(ufcx_dofmap.factory) if fname + ] # Remove square brackets from any field names fields = [f.split("[")[0] for f in fields] assert set(fields) == set( - d.keys()), "Mismatch between keys in template and in formatting dict." + d.keys() + ), "Mismatch between keys in template and in formatting dict." # Format implementation code implementation = ufcx_dofmap.factory.format_map(d) diff --git a/ffcx/codegeneration/dofmap_template.py b/ffcx/codegeneration/dofmap_template.py index 0bfae7f97..0088d9a5f 100644 --- a/ffcx/codegeneration/dofmap_template.py +++ b/ffcx/codegeneration/dofmap_template.py @@ -26,12 +26,24 @@ {num_entity_closure_dofs_init} +{entity_dofs_init} + +{entity_dof_offsets_init} + +{entity_closure_dofs_init} + +{entity_closure_dof_offsets_init} + ufcx_dofmap {factory_name} = {{ .signature = {signature}, .num_global_support_dofs = {num_global_support_dofs}, .num_element_support_dofs = {num_element_support_dofs}, .block_size = {block_size}, + .entity_dofs = {entity_dofs}, + .entity_dof_offsets = {entity_dof_offsets}, + .entity_closure_dofs = {entity_closure_dofs}, + .entity_closure_dof_offsets = {entity_closure_dof_offsets}, .num_entity_dofs = {num_entity_dofs}, .tabulate_entity_dofs = tabulate_entity_dofs_{factory_name}, .num_entity_closure_dofs = {num_entity_closure_dofs}, diff --git a/ffcx/codegeneration/ufcx.h b/ffcx/codegeneration/ufcx.h index 805998cf0..8e84112d7 100644 --- a/ffcx/codegeneration/ufcx.h +++ b/ffcx/codegeneration/ufcx.h @@ -218,6 +218,18 @@ extern "C" /// Return the block size for a VectorElement or TensorElement int block_size; + /// Flattened list of dofs associated with each entity + int *entity_dofs; + + /// Offset for dofs of each entity in entity_dofs + int *entity_dof_offsets; + + /// Flattened list of closure dofs associated with each entity + int *entity_closure_dofs; + + /// Offset for closure dofs of each entity in entity_closure_dofs + int *entity_closure_dof_offsets; + /// Number of dofs associated with each cell entity of dimension d int *num_entity_dofs; From 5cc94579f9344650d300f331e00d06b0fb04ab72 Mon Sep 17 00:00:00 2001 From: Chris Richardson Date: Fri, 11 Aug 2023 07:24:42 +0100 Subject: [PATCH 24/44] Work on removing switch/case from integrals/ids (#587) * Work on removing switch/case from integrals/ids * Fix typos * Fix for zero integrals case * Typos * Improve documentation --- ffcx/codegeneration/form.py | 131 ++++++++++++++++++++++----- ffcx/codegeneration/form_template.py | 9 +- ffcx/codegeneration/ufcx.h | 9 ++ 3 files changed, 123 insertions(+), 26 deletions(-) diff --git a/ffcx/codegeneration/form.py b/ffcx/codegeneration/form.py index a2bac06e5..6de4c46b9 100644 --- a/ffcx/codegeneration/form.py +++ b/ffcx/codegeneration/form.py @@ -25,7 +25,7 @@ def generator(ir, options): d = {} d["factory_name"] = ir.name d["name_from_uflfile"] = ir.name_from_uflfile - d["signature"] = f"\"{ir.signature}\"" + d["signature"] = f'"{ir.signature}"' d["rank"] = ir.rank d["num_coefficients"] = ir.num_coefficients d["num_constants"] = ir.num_constants @@ -39,8 +39,11 @@ def generator(ir, options): if len(ir.original_coefficient_position) > 0: d["original_coefficient_position_init"] = L.ArrayDecl( - "int", f"original_coefficient_position_{ir.name}", - values=ir.original_coefficient_position, sizes=len(ir.original_coefficient_position)) + "int", + f"original_coefficient_position_{ir.name}", + values=ir.original_coefficient_position, + sizes=len(ir.original_coefficient_position), + ) d["original_coefficient_position"] = f"original_coefficient_position_{ir.name}" else: d["original_coefficient_position_init"] = "" @@ -49,7 +52,7 @@ def generator(ir, options): cnames = ir.coefficient_names assert ir.num_coefficients == len(cnames) names = L.Symbol("names") - if (len(cnames) == 0): + if len(cnames) == 0: code = [L.Return(L.Null())] else: code = [L.ArrayDecl("static const char*", names, len(cnames), cnames)] @@ -67,37 +70,102 @@ def generator(ir, options): if len(ir.finite_elements) > 0: d["finite_elements"] = f"finite_elements_{ir.name}" - d["finite_elements_init"] = L.ArrayDecl("ufcx_finite_element*", f"finite_elements_{ir.name}", values=[ - L.AddressOf(L.Symbol(el)) for el in ir.finite_elements], - sizes=len(ir.finite_elements)) + d["finite_elements_init"] = L.ArrayDecl( + "ufcx_finite_element*", + f"finite_elements_{ir.name}", + values=[L.AddressOf(L.Symbol(el)) for el in ir.finite_elements], + sizes=len(ir.finite_elements), + ) else: d["finite_elements"] = L.Null() d["finite_elements_init"] = "" if len(ir.dofmaps) > 0: d["dofmaps"] = f"dofmaps_{ir.name}" - d["dofmaps_init"] = L.ArrayDecl("ufcx_dofmap*", f"dofmaps_{ir.name}", values=[ - L.AddressOf(L.Symbol(dofmap)) for dofmap in ir.dofmaps], sizes=len(ir.dofmaps)) + d["dofmaps_init"] = L.ArrayDecl( + "ufcx_dofmap*", + f"dofmaps_{ir.name}", + values=[L.AddressOf(L.Symbol(dofmap)) for dofmap in ir.dofmaps], + sizes=len(ir.dofmaps), + ) else: d["dofmaps"] = L.Null() d["dofmaps_init"] = "" + integrals = [] + integral_ids = [] + integral_offsets = [0] + for itg_type in ("cell", "interior_facet", "exterior_facet"): + integrals += [L.AddressOf(L.Symbol(itg)) for itg in ir.integral_names[itg_type]] + integral_ids += ir.subdomain_ids[itg_type] + integral_offsets.append(len(integrals)) + + if len(integrals) > 0: + d["form_integrals_init"] = L.ArrayDecl( + "static ufcx_integral*", + f"form_integrals_{ir.name}", + values=integrals, + sizes=len(integrals), + ) + d["form_integrals"] = f"form_integrals_{ir.name}" + d["form_integral_ids_init"] = L.ArrayDecl( + "int", + f"form_integral_ids_{ir.name}", + values=integral_ids, + sizes=len(integral_ids), + ) + d["form_integral_ids"] = f"form_integral_ids_{ir.name}" + else: + d["form_integrals_init"] = "" + d["form_integrals"] = "NULL" + d["form_integral_ids_init"] = "" + d["form_integral_ids"] = "NULL" + + d["form_integral_offsets_init"] = L.ArrayDecl( + "int", + f"form_integral_offsets_{ir.name}", + values=integral_offsets, + sizes=len(integral_offsets), + ) + code = [] cases = [] code_ids = [] cases_ids = [] for itg_type in ("cell", "interior_facet", "exterior_facet"): if len(ir.integral_names[itg_type]) > 0: - code += [L.ArrayDecl( - "static ufcx_integral*", f"integrals_{itg_type}_{ir.name}", - values=[L.AddressOf(L.Symbol(itg)) for itg in ir.integral_names[itg_type]], - sizes=len(ir.integral_names[itg_type]))] - cases.append((L.Symbol(itg_type), L.Return(L.Symbol(f"integrals_{itg_type}_{ir.name}")))) - - code_ids += [L.ArrayDecl( - "static int", f"integral_ids_{itg_type}_{ir.name}", - values=ir.subdomain_ids[itg_type], sizes=len(ir.subdomain_ids[itg_type]))] - cases_ids.append((L.Symbol(itg_type), L.Return(L.Symbol(f"integral_ids_{itg_type}_{ir.name}")))) + code += [ + L.ArrayDecl( + "static ufcx_integral*", + f"integrals_{itg_type}_{ir.name}", + values=[ + L.AddressOf(L.Symbol(itg)) + for itg in ir.integral_names[itg_type] + ], + sizes=len(ir.integral_names[itg_type]), + ) + ] + cases.append( + ( + L.Symbol(itg_type), + L.Return(L.Symbol(f"integrals_{itg_type}_{ir.name}")), + ) + ) + + code_ids += [ + L.ArrayDecl( + "static int", + f"integral_ids_{itg_type}_{ir.name}", + values=ir.subdomain_ids[itg_type], + sizes=len(ir.subdomain_ids[itg_type]), + ) + ] + cases_ids.append( + ( + L.Symbol(itg_type), + L.Return(L.Symbol(f"integral_ids_{itg_type}_{ir.name}")), + ) + ) code += [L.Switch("integral_type", cases, default=L.Return(L.Null()))] code_ids += [L.Switch("integral_type", cases_ids, default=L.Return(L.Null()))] @@ -110,12 +178,19 @@ def generator(ir, options): # FIXME: Should be handled differently, revise how # ufcx_function_space is generated - for (name, (element, dofmap, cmap_family, cmap_degree, cmap_celltype, cmap_variant)) in ir.function_spaces.items(): + for name, ( + element, + dofmap, + cmap_family, + cmap_degree, + cmap_celltype, + cmap_variant, + ) in ir.function_spaces.items(): code += [f"static ufcx_function_space functionspace_{name} ="] code += ["{"] code += [f".finite_element = &{element},"] code += [f".dofmap = &{dofmap},"] - code += [f".geometry_family = \"{cmap_family}\","] + code += [f'.geometry_family = "{cmap_family}",'] code += [f".geometry_degree = {cmap_degree},"] code += [f".geometry_basix_cell = {int(cmap_celltype)},"] code += [f".geometry_basix_variant = {int(cmap_variant)}"] @@ -133,14 +208,20 @@ def generator(ir, options): # Check that no keys are redundant or have been missed from string import Formatter - fields = [fname for _, fname, _, _ in Formatter().parse(form_template.factory) if fname] - assert set(fields) == set(d.keys()), "Mismatch between keys in template and in formatting dict" + + fields = [ + fname for _, fname, _, _ in Formatter().parse(form_template.factory) if fname + ] + assert set(fields) == set( + d.keys() + ), "Mismatch between keys in template and in formatting dict" # Format implementation code implementation = form_template.factory.format_map(d) # Format declaration - declaration = form_template.declaration.format(factory_name=d["factory_name"], - name_from_uflfile=d["name_from_uflfile"]) + declaration = form_template.declaration.format( + factory_name=d["factory_name"], name_from_uflfile=d["name_from_uflfile"] + ) return declaration, implementation diff --git a/ffcx/codegeneration/form_template.py b/ffcx/codegeneration/form_template.py index 749d3a812..5c0b1d22c 100644 --- a/ffcx/codegeneration/form_template.py +++ b/ffcx/codegeneration/form_template.py @@ -24,6 +24,9 @@ {original_coefficient_position_init} {dofmaps_init} {finite_elements_init} +{form_integral_offsets_init} +{form_integrals_init} +{form_integral_ids_init} // Return a list of the coefficient names. const char** coefficient_name_{factory_name}(void) @@ -70,7 +73,11 @@ .integral_ids = integral_ids_{factory_name}, .num_integrals = num_integrals_{factory_name}, - .integrals = integrals_{factory_name} + .integrals = integrals_{factory_name}, + + .form_integrals = {form_integrals}, + .form_integral_ids = {form_integral_ids}, + .form_integral_offsets = form_integral_offsets_{factory_name} }}; // Alias name diff --git a/ffcx/codegeneration/ufcx.h b/ffcx/codegeneration/ufcx.h index 8e84112d7..170961b33 100644 --- a/ffcx/codegeneration/ufcx.h +++ b/ffcx/codegeneration/ufcx.h @@ -462,6 +462,15 @@ extern "C" /// Get an integral on sub domain subdomain_id ufcx_integral** (*integrals)(ufcx_integral_type); + /// List of cell, interior facet and exterior facet integrals + ufcx_integral** form_integrals; + + /// IDs for each integral in form_integrals list + int* form_integral_ids; + + /// Offsets for cell, interior facet and exterior facet integrals in form_integrals list + int* form_integral_offsets; + } ufcx_form; // FIXME: Formalise a UFCX 'function space' From 57e11c84a7e7dd5cea46cbb47341725985b3c232 Mon Sep 17 00:00:00 2001 From: Matthew Scroggs Date: Fri, 11 Aug 2023 08:03:28 +0100 Subject: [PATCH 25/44] Update Basix quadrature interface (#584) * update Basix quadrature interface * update another make_quadrature * dolfinx branch * polyset type in custom element * pass elements into create_quadrature * polyset_type * flake8 * convert elements * remove quadrature. * basix branch * dolfinx branch --- demo/ExpressionInterpolation.py | 2 +- .../basix_custom_element_template.py | 3 ++- ffcx/codegeneration/finite_element.py | 1 + ffcx/codegeneration/ufcx.h | 3 +++ ffcx/element_interface.py | 20 ++++++++++++++----- ffcx/ir/elementtables.py | 2 +- ffcx/ir/representation.py | 4 +++- ffcx/ir/representationutils.py | 8 ++++---- 8 files changed, 30 insertions(+), 13 deletions(-) diff --git a/demo/ExpressionInterpolation.py b/demo/ExpressionInterpolation.py index ebccde1de..f16a4c9b8 100644 --- a/demo/ExpressionInterpolation.py +++ b/demo/ExpressionInterpolation.py @@ -52,7 +52,7 @@ # Find quadrature points for quadrature element b_rule = basix.quadrature.string_to_type(q_rule) -quadrature_points, _ = basix.make_quadrature(b_rule, b_cell, q_degree) +quadrature_points, _ = basix.quadrature.make_quadrature(b_cell, q_degree, rule=b_rule) # Get interpolation points for output space family = basix.finite_element.string_to_family("Lagrange", cell) diff --git a/ffcx/codegeneration/basix_custom_element_template.py b/ffcx/codegeneration/basix_custom_element_template.py index f5fb55ca9..a2370329e 100644 --- a/ffcx/codegeneration/basix_custom_element_template.py +++ b/ffcx/codegeneration/basix_custom_element_template.py @@ -30,7 +30,8 @@ .discontinuous = {discontinuous}, .highest_complete_degree = {highest_complete_degree}, .interpolation_nderivs = {interpolation_nderivs}, - .highest_degree = {highest_degree} + .highest_degree = {highest_degree}, + .polyset_type = {polyset_type} }}; // End of code for custom element {factory_name} diff --git a/ffcx/codegeneration/finite_element.py b/ffcx/codegeneration/finite_element.py index 1a2fb7114..a5d51c835 100644 --- a/ffcx/codegeneration/finite_element.py +++ b/ffcx/codegeneration/finite_element.py @@ -119,6 +119,7 @@ def generate_custom_element(name, ir): d = {} d["factory_name"] = name d["cell_type"] = int(ir.cell_type) + d["polyset_type"] = int(ir.polyset_type) d["map_type"] = int(ir.map_type) d["sobolev_space"] = int(ir.sobolev_space) d["highest_complete_degree"] = ir.highest_complete_degree diff --git a/ffcx/codegeneration/ufcx.h b/ffcx/codegeneration/ufcx.h index 170961b33..63567e305 100644 --- a/ffcx/codegeneration/ufcx.h +++ b/ffcx/codegeneration/ufcx.h @@ -200,6 +200,9 @@ extern "C" /// The highest degree of a polynomial in the element int highest_degree; + + /// The polyset type of the element + int polyset_type; } ufcx_basix_custom_finite_element; typedef struct ufcx_dofmap diff --git a/ffcx/element_interface.py b/ffcx/element_interface.py index 87722b798..140a296b5 100644 --- a/ffcx/element_interface.py +++ b/ffcx/element_interface.py @@ -71,14 +71,19 @@ def basix_index(indices: typing.Tuple[int]) -> int: return basix.index(*indices) -def create_quadrature(cellname, degree, rule) -> typing.Tuple[npt.NDArray[np.float64], - npt.NDArray[np.float64]]: +def create_quadrature( + cellname: str, degree: int, rule: str, elements: typing.List[basix.ufl._ElementBase] +) -> typing.Tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]: """Create a quadrature rule.""" if cellname == "vertex": return (np.ones((1, 0), dtype=np.float64), np.ones(1, dtype=np.float64)) else: - return basix.make_quadrature(basix.quadrature.string_to_type(rule), - basix.cell.string_to_type(cellname), degree) + celltype = basix.cell.string_to_type(cellname) + polyset_type = basix.PolysetType.standard + for e in elements: + polyset_type = basix.polyset_superset(celltype, polyset_type, e.polyset_type) + return basix.make_quadrature( + celltype, degree, rule=basix.quadrature.string_to_type(rule), polyset_type=polyset_type) def reference_cell_vertices(cellname: str) -> npt.NDArray[np.float64]: @@ -111,7 +116,7 @@ def __init__(self, cellname: str, value_shape: typing.Tuple[int, ...], scheme: t assert points is None assert weights is None repr = f"QuadratureElement({cellname}, {scheme}, {degree})" - self._points, self._weights = create_quadrature(cellname, degree, scheme) + self._points, self._weights = create_quadrature(cellname, degree, scheme, []) else: assert degree is None assert points is not None @@ -262,6 +267,11 @@ def map_type(self) -> basix.MapType: """The Basix map type.""" return basix.MapType.identity + @property + def polyset_type(self) -> basix.PolysetType: + """The polyset type of the element.""" + raise NotImplementedError() + class RealElement(basix.ufl._ElementBase): """A real element.""" diff --git a/ffcx/ir/elementtables.py b/ffcx/ir/elementtables.py index f658cf8d3..0e9071154 100644 --- a/ffcx/ir/elementtables.py +++ b/ffcx/ir/elementtables.py @@ -109,7 +109,7 @@ def get_ffcx_table_values(points, cell, integral_type, element, avg, entitytype, else: # Make quadrature rule and get points and weights points, weights = create_quadrature_points_and_weights( - integral_type, cell, element.highest_degree(), "default") + integral_type, cell, element.highest_degree(), "default", [element]) # Tabulate table of basis functions and derivatives in points for each entity tdim = cell.topological_dimension() diff --git a/ffcx/ir/representation.py b/ffcx/ir/representation.py index 3bdf746ff..5513bdc3e 100644 --- a/ffcx/ir/representation.py +++ b/ffcx/ir/representation.py @@ -70,6 +70,7 @@ class CustomElementIR(typing.NamedTuple): discontinuous: bool highest_complete_degree: int highest_degree: int + polyset_type: basix.PolysetType class ElementIR(typing.NamedTuple): @@ -274,6 +275,7 @@ def _compute_custom_element_ir(basix_element: basix.finite_element.FiniteElement ir["interpolation_nderivs"] = basix_element.interpolation_nderivs ir["highest_complete_degree"] = basix_element.highest_complete_degree ir["highest_degree"] = basix_element.highest_degree + ir["polyset_type"] = basix_element.polyset_type return CustomElementIR(**ir) @@ -435,7 +437,7 @@ def _compute_integral_ir(form_data, form_index, element_numbers, integral_names, else: degree = md["quadrature_degree"] points, weights = create_quadrature_points_and_weights( - integral_type, cell, degree, scheme) + integral_type, cell, degree, scheme, [convert_element(e) for e in form_data.argument_elements]) points = np.asarray(points) weights = np.asarray(weights) diff --git a/ffcx/ir/representationutils.py b/ffcx/ir/representationutils.py index 49a99d6c4..4a2d1e3b7 100644 --- a/ffcx/ir/representationutils.py +++ b/ffcx/ir/representationutils.py @@ -44,18 +44,18 @@ def id(self): return self.hash_obj.hexdigest()[-3:] -def create_quadrature_points_and_weights(integral_type, cell, degree, rule): +def create_quadrature_points_and_weights(integral_type, cell, degree, rule, elements): """Create quadrature rule and return points and weights.""" if integral_type == "cell": - return create_quadrature(cell.cellname(), degree, rule) + return create_quadrature(cell.cellname(), degree, rule, elements) elif integral_type in ufl.measure.facet_integral_types: facet_types = cell.facet_types() # Raise exception for cells with more than one facet type e.g. prisms if len(facet_types) > 1: raise Exception(f"Cell type {cell} not supported for integral type {integral_type}.") - return create_quadrature(facet_types[0].cellname(), degree, rule) + return create_quadrature(facet_types[0].cellname(), degree, rule, elements) elif integral_type in ufl.measure.point_integral_types: - return create_quadrature("vertex", degree, rule) + return create_quadrature("vertex", degree, rule, elements) elif integral_type == "expression": return (None, None) From a6c75720a4b53b61a25646e4e71b2a61b4b7e618 Mon Sep 17 00:00:00 2001 From: Chris Richardson Date: Fri, 11 Aug 2023 09:38:39 +0100 Subject: [PATCH 26/44] Minor fix (#588) * Fix ordering --- ffcx/codegeneration/form.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ffcx/codegeneration/form.py b/ffcx/codegeneration/form.py index 6de4c46b9..14a806de1 100644 --- a/ffcx/codegeneration/form.py +++ b/ffcx/codegeneration/form.py @@ -95,7 +95,7 @@ def generator(ir, options): integrals = [] integral_ids = [] integral_offsets = [0] - for itg_type in ("cell", "interior_facet", "exterior_facet"): + for itg_type in ("cell", "exterior_facet", "interior_facet"): integrals += [L.AddressOf(L.Symbol(itg)) for itg in ir.integral_names[itg_type]] integral_ids += ir.subdomain_ids[itg_type] integral_offsets.append(len(integrals)) From 5b3dbeccd08f4ed85095024ead1c0712a7788dcd Mon Sep 17 00:00:00 2001 From: Chris Richardson Date: Tue, 15 Aug 2023 15:35:18 +0100 Subject: [PATCH 27/44] Remove switch/case from generated code (#591) * Work on removing switch/case from integrals/ids * Fix typos * Fix for zero integrals case * Typos * Improve documentation * Remove from form.py * Fix order * Remove for integrals * Remove all remaining cases * Remove from Cnodes * Fix tests * Remove break/continue * fix * Reset files in tests --- ffcx/codegeneration/C/cnodes.py | 304 ++++++++++++------------- ffcx/codegeneration/dofmap.py | 67 ------ ffcx/codegeneration/dofmap_template.py | 18 -- ffcx/codegeneration/form.py | 53 +---- ffcx/codegeneration/form_template.py | 20 -- ffcx/codegeneration/ufcx.h | 9 - test/test_add_mode.py | 21 +- test/test_blocked_elements.py | 50 ++-- test/test_jit_forms.py | 73 +++--- 9 files changed, 214 insertions(+), 401 deletions(-) diff --git a/ffcx/codegeneration/C/cnodes.py b/ffcx/codegeneration/C/cnodes.py index 860945923..3d6eb7666 100644 --- a/ffcx/codegeneration/C/cnodes.py +++ b/ffcx/codegeneration/C/cnodes.py @@ -10,8 +10,7 @@ import numpy as np from ffcx.codegeneration.C.format_lines import Indented, format_indented_lines -from ffcx.codegeneration.C.format_value import (format_float, format_int, - format_value) +from ffcx.codegeneration.C.format_value import format_float, format_int, format_value from ffcx.codegeneration.C.precedence import PRECEDENCE logger = logging.getLogger("ffcx") @@ -32,18 +31,21 @@ def is_zero_cexpr(cexpr): - return ((isinstance(cexpr, LiteralFloat) and cexpr.value == 0.0) - or (isinstance(cexpr, LiteralInt) and cexpr.value == 0)) + return (isinstance(cexpr, LiteralFloat) and cexpr.value == 0.0) or ( + isinstance(cexpr, LiteralInt) and cexpr.value == 0 + ) def is_one_cexpr(cexpr): - return ((isinstance(cexpr, LiteralFloat) and cexpr.value == 1.0) - or (isinstance(cexpr, LiteralInt) and cexpr.value == 1)) + return (isinstance(cexpr, LiteralFloat) and cexpr.value == 1.0) or ( + isinstance(cexpr, LiteralInt) and cexpr.value == 1 + ) def is_negative_one_cexpr(cexpr): - return ((isinstance(cexpr, LiteralFloat) and cexpr.value == -1.0) - or (isinstance(cexpr, LiteralInt) and cexpr.value == -1)) + return (isinstance(cexpr, LiteralFloat) and cexpr.value == -1.0) or ( + isinstance(cexpr, LiteralInt) and cexpr.value == -1 + ) def float_product(factors): @@ -58,6 +60,8 @@ def float_product(factors): if is_zero_cexpr(f): return f return Product(factors) + + # CNode core @@ -77,6 +81,7 @@ def __eq__(self, other): def __ne__(self, other): return not self.__eq__(other) + # CExpr base classes @@ -261,7 +266,7 @@ def __eq__(self, other): class LiteralFloat(CExprLiteral): """A floating point literal value.""" - __slots__ = ("value", ) + __slots__ = ("value",) precedence = PRECEDENCE.LITERAL def __init__(self, value): @@ -289,7 +294,7 @@ def flops(self): class LiteralInt(CExprLiteral): """An integer literal value.""" - __slots__ = ("value", ) + __slots__ = ("value",) precedence = PRECEDENCE.LITERAL def __init__(self, value): @@ -323,11 +328,11 @@ def __hash__(self): class LiteralBool(CExprLiteral): """A boolean literal value.""" - __slots__ = ("value", ) + __slots__ = ("value",) precedence = PRECEDENCE.LITERAL def __init__(self, value): - assert isinstance(value, (bool, )) + assert isinstance(value, (bool,)) self.value = value def ce_format(self, precision=None): @@ -345,16 +350,16 @@ def __bool__(self): class LiteralString(CExprLiteral): """A boolean literal value.""" - __slots__ = ("value", ) + __slots__ = ("value",) precedence = PRECEDENCE.LITERAL def __init__(self, value): - assert isinstance(value, (str, )) + assert isinstance(value, (str,)) assert '"' not in value self.value = value def ce_format(self, precision=None): - return '"%s"' % (self.value, ) + return '"%s"' % (self.value,) def __eq__(self, other): return isinstance(other, LiteralString) and self.value == other.value @@ -363,7 +368,7 @@ def __eq__(self, other): class Symbol(CExprTerminal): """A named symbol.""" - __slots__ = ("name", ) + __slots__ = ("name",) precedence = PRECEDENCE.SYMBOL def __init__(self, name): @@ -385,10 +390,11 @@ def __hash__(self): # CExprOperator base classes + class UnaryOp(CExprOperator): """Base class for unary operators.""" - __slots__ = ("arg", ) + __slots__ = ("arg",) def __init__(self, arg): self.arg = as_cexpr(arg) @@ -408,7 +414,7 @@ class PrefixUnaryOp(UnaryOp): def ce_format(self, precision=None): arg = self.arg.ce_format(precision) if self.arg.precedence >= self.precedence: - arg = '(' + arg + ')' + arg = "(" + arg + ")" return self.op + arg def __eq__(self, other): @@ -423,7 +429,7 @@ class PostfixUnaryOp(UnaryOp): def ce_format(self, precision=None): arg = self.arg.ce_format(precision) if self.arg.precedence >= self.precedence: - arg = '(' + arg + ')' + arg = "(" + arg + ")" return arg + self.op def __eq__(self, other): @@ -444,15 +450,19 @@ def ce_format(self, precision=None): # Apply parentheses if self.lhs.precedence >= self.precedence: - lhs = '(' + lhs + ')' + lhs = "(" + lhs + ")" if self.rhs.precedence >= self.precedence: - rhs = '(' + rhs + ')' + rhs = "(" + rhs + ")" # Return combined string return lhs + (" " + self.op + " ") + rhs def __eq__(self, other): - return (isinstance(other, type(self)) and self.lhs == other.lhs and self.rhs == other.rhs) + return ( + isinstance(other, type(self)) + and self.lhs == other.lhs + and self.rhs == other.rhs + ) def __hash__(self): return hash(self.ce_format()) @@ -464,7 +474,7 @@ def flops(self): class NaryOp(CExprOperator): """Base class for special n-ary operators.""" - __slots__ = ("args", ) + __slots__ = ("args",) def __init__(self, args): self.args = [as_cexpr(arg) for arg in args] @@ -476,7 +486,7 @@ def ce_format(self, precision=None): # Apply parentheses for i in range(len(args)): if self.args[i].precedence >= self.precedence: - args[i] = '(' + args[i] + ')' + args[i] = "(" + args[i] + ")" # Return combined string op = " " + self.op + " " @@ -486,8 +496,11 @@ def ce_format(self, precision=None): return s def __eq__(self, other): - return (isinstance(other, type(self)) and len(self.args) == len(other.args) - and all(a == b for a, b in zip(self.args, other.args))) + return ( + isinstance(other, type(self)) + and len(self.args) == len(other.args) + and all(a == b for a, b in zip(self.args, other.args)) + ) def flops(self): flops = len(self.args) - 1 @@ -716,6 +729,7 @@ class AssignDiv(AssignOp): __slots__ = () op = "/=" + # CExpr operators @@ -763,7 +777,7 @@ def __init__(self, array, dummy=None, dims=None, strides=None, offset=None): def __getitem__(self, indices): if not isinstance(indices, (list, tuple)): - indices = (indices, ) + indices = (indices,) n = len(indices) if n == 0: # Handle scalar case, allowing dims=() and indices=() for A[0] @@ -773,7 +787,7 @@ def __getitem__(self, indices): else: i, s = (indices[0], self.strides[0]) literal_one = LiteralInt(1) - flat = (i if s == literal_one else s * i) + flat = i if s == literal_one else s * i if self.offset is not None: flat = self.offset + flat for i, s in zip(indices[1:n], self.strides[1:n]): @@ -796,11 +810,11 @@ def __init__(self, array, indices): elif isinstance(array, ArrayDecl): self.array = array.symbol else: - raise ValueError("Unexpected array type %s." % (type(array).__name__, )) + raise ValueError("Unexpected array type %s." % (type(array).__name__,)) # Allow expressions or literals as indices if not isinstance(indices, (list, tuple)): - indices = (indices, ) + indices = (indices,) self.indices = tuple(as_cexpr_or_string_symbol(i) for i in indices) # Early error checking for negative array dimensions @@ -812,8 +826,10 @@ def __init__(self, array, indices): if len(self.indices) != len(array.sizes): raise ValueError("Invalid number of indices.") ints = (int, LiteralInt) - if any((isinstance(i, ints) and isinstance(d, ints) and int(i) >= int(d)) - for i, d in zip(self.indices, array.sizes)): + if any( + (isinstance(i, ints) and isinstance(d, ints) and int(i) >= int(d)) + for i, d in zip(self.indices, array.sizes) + ): raise ValueError("Index value >= array dimension.") def __getitem__(self, indices): @@ -821,7 +837,7 @@ def __getitem__(self, indices): if isinstance(indices, list): indices = tuple(indices) elif not isinstance(indices, tuple): - indices = (indices, ) + indices = (indices,) return ArrayAccess(self.array, self.indices + indices) def ce_format(self, precision=None): @@ -831,8 +847,11 @@ def ce_format(self, precision=None): return s def __eq__(self, other): - return (isinstance(other, type(self)) and self.array == other.array - and self.indices == other.indices) + return ( + isinstance(other, type(self)) + and self.array == other.array + and self.indices == other.indices + ) def __hash__(self): return hash(self.ce_format()) @@ -858,18 +877,22 @@ def ce_format(self, precision=None): # Apply parentheses if self.condition.precedence >= self.precedence: - c = '(' + c + ')' + c = "(" + c + ")" if self.true.precedence >= self.precedence: - t = '(' + t + ')' + t = "(" + t + ")" if self.false.precedence >= self.precedence: - f = '(' + f + ')' + f = "(" + f + ")" # Return combined string return c + " ? " + t + " : " + f def __eq__(self, other): - return (isinstance(other, type(self)) and self.condition == other.condition - and self.true == other.true and self.false == other.false) + return ( + isinstance(other, type(self)) + and self.condition == other.condition + and self.true == other.true + and self.false == other.false + ) def flops(self): raise NotImplementedError("Flop count is not implemented for conditionals") @@ -887,7 +910,7 @@ def __init__(self, function, arguments=None): if arguments is None: arguments = () elif not isinstance(arguments, (tuple, list)): - arguments = (arguments, ) + arguments = (arguments,) self.arguments = [as_cexpr(arg) for arg in arguments] def ce_format(self, precision=None): @@ -895,8 +918,11 @@ def ce_format(self, precision=None): return self.function.ce_format(precision) + "(" + args + ")" def __eq__(self, other): - return (isinstance(other, type(self)) and self.function == other.function - and self.arguments == other.arguments) + return ( + isinstance(other, type(self)) + and self.function == other.function + and self.arguments == other.arguments + ) def flops(self): return 1 @@ -934,7 +960,7 @@ def as_cexpr(node): elif isinstance(node, numbers.Real): return LiteralFloat(node) elif isinstance(node, str): - raise RuntimeError("Got string for CExpr, this is ambiguous: %s" % (node, )) + raise RuntimeError("Got string for CExpr, this is ambiguous: %s" % (node,)) else: raise RuntimeError("Unexpected CExpr type %s:\n%s" % (type(node), str(node))) @@ -1005,7 +1031,9 @@ class CStatement(CNode): def cs_format(self, precision=None): """Return S: string | list(S) | Indented(S).""" - raise NotImplementedError("Missing implementation of cs_format() in CStatement.") + raise NotImplementedError( + "Missing implementation of cs_format() in CStatement." + ) def __str__(self): try: @@ -1025,7 +1053,7 @@ def flops(self): class VerbatimStatement(CStatement): """Wraps a source code string to be pasted verbatim into the source code.""" - __slots__ = ("codestring", ) + __slots__ = ("codestring",) is_scoped = False def __init__(self, codestring): @@ -1036,13 +1064,13 @@ def cs_format(self, precision=None): return self.codestring def __eq__(self, other): - return (isinstance(other, type(self)) and self.codestring == other.codestring) + return isinstance(other, type(self)) and self.codestring == other.codestring class Statement(CStatement): """Make an expression into a statement.""" - __slots__ = ("expr", ) + __slots__ = ("expr",) is_scoped = False def __init__(self, expr): @@ -1052,7 +1080,7 @@ def cs_format(self, precision=None): return self.expr.ce_format(precision) + ";" def __eq__(self, other): - return (isinstance(other, type(self)) and self.expr == other.expr) + return isinstance(other, type(self)) and self.expr == other.expr def flops(self): # print(self.expr.rhs.flops()) @@ -1062,7 +1090,7 @@ def flops(self): class StatementList(CStatement): """A simple sequence of statements. No new scopes are introduced.""" - __slots__ = ("statements", ) + __slots__ = ("statements",) def __init__(self, statements): self.statements = [as_cstatement(st) for st in statements] @@ -1075,7 +1103,7 @@ def cs_format(self, precision=None): return [st.cs_format(precision) for st in self.statements] def __eq__(self, other): - return (isinstance(other, type(self)) and self.statements == other.statements) + return isinstance(other, type(self)) and self.statements == other.statements def flops(self): flops = 0 @@ -1087,36 +1115,8 @@ def flops(self): # Simple statements -class Break(CStatement): - __slots__ = () - is_scoped = True - - def cs_format(self, precision=None): - return "break;" - - def __eq__(self, other): - return isinstance(other, type(self)) - - def flops(self): - return 0 - - -class Continue(CStatement): - __slots__ = () - is_scoped = True - - def cs_format(self, precision=None): - return "continue;" - - def __eq__(self, other): - return isinstance(other, type(self)) - - def flops(self): - return 0 - - class Return(CStatement): - __slots__ = ("value", ) + __slots__ = ("value",) is_scoped = True def __init__(self, value=None): @@ -1129,10 +1129,10 @@ def cs_format(self, precision=None): if self.value is None: return "return;" else: - return "return %s;" % (self.value.ce_format(precision), ) + return "return %s;" % (self.value.ce_format(precision),) def __eq__(self, other): - return (isinstance(other, type(self)) and self.value == other.value) + return isinstance(other, type(self)) and self.value == other.value def flops(self): return 0 @@ -1141,7 +1141,7 @@ def flops(self): class Comment(CStatement): """Line comment(s) used for annotating the generated code with human readable remarks.""" - __slots__ = ("comment", ) + __slots__ = ("comment",) is_scoped = True def __init__(self, comment): @@ -1153,7 +1153,7 @@ def cs_format(self, precision=None): return ["// " + line.strip() for line in lines] def __eq__(self, other): - return (isinstance(other, type(self)) and self.comment == other.comment) + return isinstance(other, type(self)) and self.comment == other.comment def flops(self): return 0 @@ -1179,7 +1179,7 @@ def commented_code_list(code, comments): class Pragma(CStatement): """Pragma comments used for compiler-specific annotations.""" - __slots__ = ("comment", ) + __slots__ = ("comment",) is_scoped = True def __init__(self, comment): @@ -1191,7 +1191,7 @@ def cs_format(self, precision=None): return "#pragma " + self.comment def __eq__(self, other): - return (isinstance(other, type(self)) and self.comment == other.comment) + return isinstance(other, type(self)) and self.comment == other.comment def flops(self): return 0 @@ -1207,7 +1207,6 @@ class VariableDecl(CStatement): is_scoped = False def __init__(self, typename, symbol, value=None): - # No type system yet, just using strings assert isinstance(typename, str) self.typename = typename @@ -1226,8 +1225,12 @@ def cs_format(self, precision=None): return code + ";" def __eq__(self, other): - return (isinstance(other, type(self)) and self.typename == other.typename - and self.symbol == other.symbol and self.value == other.value) + return ( + isinstance(other, type(self)) + and self.typename == other.typename + and self.symbol == other.symbol + and self.value == other.value + ) def flops(self): if self.value is not None: @@ -1305,13 +1308,18 @@ def formatter(x, p): r = len(sizes) assert r > 0 if r == 1: - return [build_1d_initializer_list(values, formatter, padlen=padlen, precision=precision)] + return [ + build_1d_initializer_list( + values, formatter, padlen=padlen, precision=precision + ) + ] else: # Render all sublists parts = [] for val in values: sublist = build_initializer_lists( - val, sizes[1:], level + 1, formatter, padlen=padlen, precision=precision) + val, sizes[1:], level + 1, formatter, padlen=padlen, precision=precision + ) parts.append(sublist) # Add comma after last line in each part except the last one for part in parts[:-1]: @@ -1357,7 +1365,7 @@ def __init__(self, typename, symbol, sizes=None, values=None, padlen=0): self.symbol = as_symbol(symbol) if isinstance(sizes, int): - sizes = (sizes, ) + sizes = (sizes,) self.sizes = tuple(sizes) # NB! No type checking, assuming nested lists of literal values. Not applying as_cexpr. @@ -1370,13 +1378,15 @@ def __init__(self, typename, symbol, sizes=None, values=None, padlen=0): def cs_format(self, precision=None): if not all(self.sizes): - raise RuntimeError(f"Detected an array {self.symbol} dimension of zero. This is not valid in C.") + raise RuntimeError( + f"Detected an array {self.symbol} dimension of zero. This is not valid in C." + ) # Pad innermost array dimension sizes = pad_innermost_dim(self.sizes, self.padlen) # Add brackets - brackets = ''.join("[%d]" % n for n in sizes) + brackets = "".join("[%d]" % n for n in sizes) # Join declaration decl = self.typename + " " + self.symbol.name + brackets @@ -1398,13 +1408,21 @@ def cs_format(self, precision=None): elif self.values.dtype.kind == "i": formatter = format_int elif self.values.dtype == np.bool_: + def format_bool(x, precision=None): return "true" if x is True else "false" + formatter = format_bool else: formatter = format_value initializer_lists = build_initializer_lists( - self.values, self.sizes, 0, formatter, padlen=self.padlen, precision=precision) + self.values, + self.sizes, + 0, + formatter, + padlen=self.padlen, + precision=precision, + ) if len(initializer_lists) == 1: return decl + " = " + initializer_lists[0] + ";" else: @@ -1413,8 +1431,9 @@ def format_bool(x, precision=None): def __eq__(self, other): attributes = ("typename", "symbol", "sizes", "padlen", "values") - return (isinstance(other, type(self)) - and all(getattr(self, name) == getattr(self, name) for name in attributes)) + return isinstance(other, type(self)) and all( + getattr(self, name) == getattr(self, name) for name in attributes + ) def flops(self): return 0 @@ -1424,7 +1443,7 @@ def flops(self): class Scope(CStatement): - __slots__ = ("body", ) + __slots__ = ("body",) is_scoped = True def __init__(self, body): @@ -1434,7 +1453,7 @@ def cs_format(self, precision=None): return ("{", Indented(self.body.cs_format(precision)), "}") def __eq__(self, other): - return (isinstance(other, type(self)) and self.body == other.body) + return isinstance(other, type(self)) and self.body == other.body def flops(self): return 0 @@ -1444,8 +1463,8 @@ def _is_simple_if_body(body): if isinstance(body, StatementList): if len(body.statements) > 1: return False - body, = body.statements - return isinstance(body, (Return, AssignOp, Break, Continue)) + (body,) = body.statements + return isinstance(body, (Return, AssignOp)) class If(CStatement): @@ -1465,8 +1484,11 @@ def cs_format(self, precision=None): return (statement, "{", body_fmt, "}") def __eq__(self, other): - return (isinstance(other, type(self)) and self.condition == other.condition - and self.body == other.body) + return ( + isinstance(other, type(self)) + and self.condition == other.condition + and self.body == other.body + ) class ElseIf(CStatement): @@ -1486,12 +1508,15 @@ def cs_format(self, precision=None): return (statement, "{", body_fmt, "}") def __eq__(self, other): - return (isinstance(other, type(self)) and self.condition == other.condition - and self.body == other.body) + return ( + isinstance(other, type(self)) + and self.condition == other.condition + and self.body == other.body + ) class Else(CStatement): - __slots__ = ("body", ) + __slots__ = ("body",) is_scoped = True def __init__(self, body): @@ -1506,7 +1531,7 @@ def cs_format(self, precision=None): return (statement, "{", body_fmt, "}") def __eq__(self, other): - return (isinstance(other, type(self)) and self.body == other.body) + return isinstance(other, type(self)) and self.body == other.body def is_simple_inner_loop(code): @@ -1517,56 +1542,6 @@ def is_simple_inner_loop(code): return False -class Switch(CStatement): - __slots__ = ("arg", "cases", "default", "autobreak", "autoscope") - is_scoped = True - - def __init__(self, arg, cases, default=None, autobreak=True, autoscope=True): - self.arg = as_cexpr_or_string_symbol(arg) - self.cases = [(as_cexpr(value), as_cstatement(body)) for value, body in cases] - if default is not None: - default = as_cstatement(default) - defcase = [(None, default)] - else: - defcase = [] - self.default = default - # If this is a switch where every case returns, scopes or breaks are never needed - if all(isinstance(case[1], Return) for case in self.cases + defcase): - autobreak = False - autoscope = False - if all(case[1].is_scoped for case in self.cases + defcase): - autoscope = False - assert autobreak in (True, False) - assert autoscope in (True, False) - self.autobreak = autobreak - self.autoscope = autoscope - - def cs_format(self, precision=None): - cases = [] - for case in self.cases: - caseheader = "case " + case[0].ce_format(precision) + ":" - casebody = case[1].cs_format(precision) - if self.autoscope: - casebody = ("{", Indented(casebody), "}") - if self.autobreak: - casebody = (casebody, "break;") - cases.extend([caseheader, Indented(casebody)]) - - if self.default is not None: - caseheader = "default:" - casebody = self.default.cs_format(precision) - if self.autoscope: - casebody = ("{", Indented(casebody), "}") - cases.extend([caseheader, Indented(casebody)]) - - return ("switch (" + self.arg.ce_format(precision) + ")", "{", cases, "}") - - def __eq__(self, other): - attributes = ("arg", "cases", "default", "autobreak", "autoscope") - return (isinstance(other, type(self)) - and all(getattr(self, name) == getattr(self, name) for name in attributes)) - - class ForRange(CStatement): """Slightly higher-level for loop assuming incrementing an index over a range.""" @@ -1603,8 +1578,9 @@ def cs_format(self, precision=None): def __eq__(self, other): attributes = ("index", "begin", "end", "body", "index_type") - return (isinstance(other, type(self)) - and all(getattr(self, name) == getattr(self, name) for name in attributes)) + return isinstance(other, type(self)) and all( + getattr(self, name) == getattr(self, name) for name in attributes + ) def flops(self): return (self.end.value - self.begin.value) * self.body.flops() @@ -1626,8 +1602,10 @@ def as_cstatement(node): # Special case for using assignment expressions as statements return Statement(node) else: - raise RuntimeError("Trying to create a statement of CExprOperator type %s:\n%s" % - (type(node), str(node))) + raise RuntimeError( + "Trying to create a statement of CExprOperator type %s:\n%s" + % (type(node), str(node)) + ) elif isinstance(node, list): # Convenience case for list of statements if len(node) == 1: @@ -1639,4 +1617,6 @@ def as_cstatement(node): # Backdoor for flexibility in code generation to allow verbatim pasted statements return VerbatimStatement(node) else: - raise RuntimeError("Unexpected CStatement type %s:\n%s" % (type(node), str(node))) + raise RuntimeError( + "Unexpected CStatement type %s:\n%s" % (type(node), str(node)) + ) diff --git a/ffcx/codegeneration/dofmap.py b/ffcx/codegeneration/dofmap.py index 3ca6e3174..de3d21dc4 100644 --- a/ffcx/codegeneration/dofmap.py +++ b/ffcx/codegeneration/dofmap.py @@ -8,54 +8,12 @@ # old implementation in FFC import logging -import typing import ffcx.codegeneration.dofmap_template as ufcx_dofmap logger = logging.getLogger("ffcx") -def tabulate_entity_dofs( - L, - entity_dofs: typing.List[typing.List[typing.List[int]]], - num_dofs_per_entity: typing.List[int], -): - # Output argument array - dofs = L.Symbol("dofs") - - # Input arguments - d = L.Symbol("d") - i = L.Symbol("i") - - # TODO: Removed check for (d <= tdim + 1) - tdim = len(num_dofs_per_entity) - 1 - - # Generate cases for each dimension: - all_cases = [] - for dim in range(tdim + 1): - # Ignore if no entities for this dimension - if num_dofs_per_entity[dim] == 0: - continue - - # Generate cases for each mesh entity - cases = [] - for entity in range(len(entity_dofs[dim])): - casebody = [] - for j, dof in enumerate(entity_dofs[dim][entity]): - casebody += [L.Assign(dofs[j], dof)] - cases.append((entity, L.StatementList(casebody))) - - # Generate inner switch - # TODO: Removed check for (i <= num_entities-1) - inner_switch = L.Switch(i, cases, autoscope=False) - all_cases.append((dim, inner_switch)) - - if all_cases: - return L.Switch(d, all_cases, autoscope=False) - else: - return L.NoOp() - - def generator(ir, options): """Generate UFC code for a dofmap.""" logger.info("Generating code for dofmap:") @@ -73,23 +31,6 @@ def generator(ir, options): import ffcx.codegeneration.C.cnodes as L - num_entity_dofs = ir.num_entity_dofs + [0, 0, 0, 0] - num_entity_dofs = num_entity_dofs[:4] - d["num_entity_dofs"] = f"num_entity_dofs_{ir.name}" - d["num_entity_dofs_init"] = L.ArrayDecl( - "int", f"num_entity_dofs_{ir.name}", values=num_entity_dofs, sizes=4 - ) - - num_entity_closure_dofs = ir.num_entity_closure_dofs + [0, 0, 0, 0] - num_entity_closure_dofs = num_entity_closure_dofs[:4] - d["num_entity_closure_dofs"] = f"num_entity_closure_dofs_{ir.name}" - d["num_entity_closure_dofs_init"] = L.ArrayDecl( - "int", - f"num_entity_closure_dofs_{ir.name}", - values=num_entity_closure_dofs, - sizes=4, - ) - flattened_entity_dofs = [] entity_dof_offsets = [0] for dim in ir.entity_dofs: @@ -137,14 +78,6 @@ def generator(ir, options): d["block_size"] = ir.block_size - # Functions - d["tabulate_entity_dofs"] = tabulate_entity_dofs( - L, ir.entity_dofs, ir.num_entity_dofs - ) - d["tabulate_entity_closure_dofs"] = tabulate_entity_dofs( - L, ir.entity_closure_dofs, ir.num_entity_closure_dofs - ) - if len(ir.sub_dofmaps) > 0: d["sub_dofmaps_initialization"] = L.ArrayDecl( "ufcx_dofmap*", diff --git a/ffcx/codegeneration/dofmap_template.py b/ffcx/codegeneration/dofmap_template.py index 0088d9a5f..abe5563f7 100644 --- a/ffcx/codegeneration/dofmap_template.py +++ b/ffcx/codegeneration/dofmap_template.py @@ -12,20 +12,6 @@ {sub_dofmaps_initialization} -void tabulate_entity_dofs_{factory_name}(int* restrict dofs, int d, int i) -{{ -{tabulate_entity_dofs} -}} - -void tabulate_entity_closure_dofs_{factory_name}(int* restrict dofs, int d, int i) -{{ -{tabulate_entity_closure_dofs} -}} - -{num_entity_dofs_init} - -{num_entity_closure_dofs_init} - {entity_dofs_init} {entity_dof_offsets_init} @@ -44,10 +30,6 @@ .entity_dof_offsets = {entity_dof_offsets}, .entity_closure_dofs = {entity_closure_dofs}, .entity_closure_dof_offsets = {entity_closure_dof_offsets}, - .num_entity_dofs = {num_entity_dofs}, - .tabulate_entity_dofs = tabulate_entity_dofs_{factory_name}, - .num_entity_closure_dofs = {num_entity_closure_dofs}, - .tabulate_entity_closure_dofs = tabulate_entity_closure_dofs_{factory_name}, .num_sub_dofmaps = {num_sub_dofmaps}, .sub_dofmaps = {sub_dofmaps} }}; diff --git a/ffcx/codegeneration/form.py b/ffcx/codegeneration/form.py index 14a806de1..2e95edadd 100644 --- a/ffcx/codegeneration/form.py +++ b/ffcx/codegeneration/form.py @@ -30,13 +30,6 @@ def generator(ir, options): d["num_coefficients"] = ir.num_coefficients d["num_constants"] = ir.num_constants - code = [] - cases = [] - for itg_type in ("cell", "interior_facet", "exterior_facet"): - cases += [(L.Symbol(itg_type), L.Return(len(ir.subdomain_ids[itg_type])))] - code += [L.Switch("integral_type", cases, default=L.Return(0))] - d["num_integrals"] = L.StatementList(code) - if len(ir.original_coefficient_position) > 0: d["original_coefficient_position_init"] = L.ArrayDecl( "int", @@ -95,6 +88,7 @@ def generator(ir, options): integrals = [] integral_ids = [] integral_offsets = [0] + # Note: the order of this list is defined by the enum ufcx_integral_type in ufcx.h for itg_type in ("cell", "exterior_facet", "interior_facet"): integrals += [L.AddressOf(L.Symbol(itg)) for itg in ir.integral_names[itg_type]] integral_ids += ir.subdomain_ids[itg_type] @@ -128,51 +122,6 @@ def generator(ir, options): sizes=len(integral_offsets), ) - code = [] - cases = [] - code_ids = [] - cases_ids = [] - for itg_type in ("cell", "interior_facet", "exterior_facet"): - if len(ir.integral_names[itg_type]) > 0: - code += [ - L.ArrayDecl( - "static ufcx_integral*", - f"integrals_{itg_type}_{ir.name}", - values=[ - L.AddressOf(L.Symbol(itg)) - for itg in ir.integral_names[itg_type] - ], - sizes=len(ir.integral_names[itg_type]), - ) - ] - cases.append( - ( - L.Symbol(itg_type), - L.Return(L.Symbol(f"integrals_{itg_type}_{ir.name}")), - ) - ) - - code_ids += [ - L.ArrayDecl( - "static int", - f"integral_ids_{itg_type}_{ir.name}", - values=ir.subdomain_ids[itg_type], - sizes=len(ir.subdomain_ids[itg_type]), - ) - ] - cases_ids.append( - ( - L.Symbol(itg_type), - L.Return(L.Symbol(f"integral_ids_{itg_type}_{ir.name}")), - ) - ) - - code += [L.Switch("integral_type", cases, default=L.Return(L.Null()))] - code_ids += [L.Switch("integral_type", cases_ids, default=L.Return(L.Null()))] - d["integrals"] = L.StatementList(code) - - d["integral_ids"] = L.StatementList(code_ids) - code = [] function_name = L.Symbol("function_name") diff --git a/ffcx/codegeneration/form_template.py b/ffcx/codegeneration/form_template.py index 5c0b1d22c..02ad31d6f 100644 --- a/ffcx/codegeneration/form_template.py +++ b/ffcx/codegeneration/form_template.py @@ -40,21 +40,6 @@ {constant_name_map} }} -int* integral_ids_{factory_name}(ufcx_integral_type integral_type) -{{ -{integral_ids} -}} - -int num_integrals_{factory_name}(ufcx_integral_type integral_type) -{{ -{num_integrals} -}} - -ufcx_integral** integrals_{factory_name}(ufcx_integral_type integral_type) -{{ -{integrals} -}} - ufcx_form {factory_name} = {{ @@ -70,11 +55,6 @@ .finite_elements = {finite_elements}, .dofmaps = {dofmaps}, - .integral_ids = integral_ids_{factory_name}, - .num_integrals = num_integrals_{factory_name}, - - .integrals = integrals_{factory_name}, - .form_integrals = {form_integrals}, .form_integral_ids = {form_integral_ids}, .form_integral_offsets = form_integral_offsets_{factory_name} diff --git a/ffcx/codegeneration/ufcx.h b/ffcx/codegeneration/ufcx.h index 63567e305..f3e3d1e00 100644 --- a/ffcx/codegeneration/ufcx.h +++ b/ffcx/codegeneration/ufcx.h @@ -456,15 +456,6 @@ extern "C" /// Coefficient number j=i-r if r+j <= i < r+n ufcx_dofmap** dofmaps; - /// All ids for integrals - int* (*integral_ids)(ufcx_integral_type); - - /// Number of integrals - int (*num_integrals)(ufcx_integral_type); - - /// Get an integral on sub domain subdomain_id - ufcx_integral** (*integrals)(ufcx_integral_type); - /// List of cell, interior facet and exterior facet integrals ufcx_integral** form_integrals; diff --git a/test/test_add_mode.py b/test/test_add_mode.py index 7ee4aba31..87d159a78 100644 --- a/test/test_add_mode.py +++ b/test/test_add_mode.py @@ -35,11 +35,13 @@ def test_additive_facet_integral(mode, compile_args): ffi = module.ffi form0 = compiled_forms[0] - assert form0.num_integrals(module.lib.exterior_facet) == 1 - ids = form0.integral_ids(module.lib.exterior_facet) - assert ids[0] == -1 + integral_offsets = form0.form_integral_offsets + ex = module.lib.exterior_facet + assert integral_offsets[ex + 1] - integral_offsets[ex] == 1 + integral_id = form0.form_integral_ids[integral_offsets[ex]] + assert integral_id == -1 - default_integral = form0.integrals(module.lib.exterior_facet)[0] + default_integral = form0.form_integrals[integral_offsets[ex]] np_type = cdtype_to_numpy(mode) A = np.zeros((3, 3), dtype=np_type) @@ -83,11 +85,14 @@ def test_additive_cell_integral(mode, compile_args): ffi = module.ffi form0 = compiled_forms[0] - assert form0.num_integrals(module.lib.cell) == 1 - ids = form0.integral_ids(module.lib.cell) - assert ids[0] == -1 + cell = module.lib.cell + offsets = form0.form_integral_offsets + num_integrals = offsets[cell + 1] - offsets[cell] + assert num_integrals == 1 + integral_id = form0.form_integral_ids[offsets[cell]] + assert integral_id == -1 - default_integral = form0.integrals(0)[0] + default_integral = form0.form_integrals[offsets[cell]] np_type = cdtype_to_numpy(mode) A = np.zeros((3, 3), dtype=np_type) diff --git a/test/test_blocked_elements.py b/test/test_blocked_elements.py index fd4db7b48..c67660a54 100644 --- a/test/test_blocked_elements.py +++ b/test/test_blocked_elements.py @@ -32,15 +32,11 @@ def test_finite_element(compile_args): assert ufcx_dofmap.num_global_support_dofs == 0 assert ufcx_dofmap.num_global_support_dofs == 0 assert ufcx_dofmap.num_element_support_dofs == 3 - assert ufcx_dofmap.num_entity_dofs[0] == 1 - assert ufcx_dofmap.num_entity_dofs[1] == 0 - assert ufcx_dofmap.num_entity_dofs[2] == 0 - assert ufcx_dofmap.num_entity_dofs[3] == 0 + off = np.array([ufcx_dofmap.entity_dof_offsets[i] for i in range(8)]) + assert np.all(np.diff(off) == [1, 1, 1, 0, 0, 0, 0]) + for v in range(3): - vals = np.zeros(1, dtype=np.int32) - vals_ptr = module.ffi.cast("int *", module.ffi.from_buffer(vals)) - ufcx_dofmap.tabulate_entity_dofs(vals_ptr, 0, v) - assert vals[0] == v + assert ufcx_dofmap.entity_dofs[v] == v assert ufcx_dofmap.num_sub_dofmaps == 0 @@ -66,15 +62,11 @@ def test_vector_element(compile_args): assert ufcx_dofmap.num_global_support_dofs == 0 assert ufcx_dofmap.num_global_support_dofs == 0 assert ufcx_dofmap.num_element_support_dofs == 3 - assert ufcx_dofmap.num_entity_dofs[0] == 1 - assert ufcx_dofmap.num_entity_dofs[1] == 0 - assert ufcx_dofmap.num_entity_dofs[2] == 0 - assert ufcx_dofmap.num_entity_dofs[3] == 0 + off = np.array([ufcx_dofmap.entity_dof_offsets[i] for i in range(8)]) + assert np.all(np.diff(off) == [1, 1, 1, 0, 0, 0, 0]) + for v in range(3): - vals = np.zeros(1, dtype=np.int32) - vals_ptr = module.ffi.cast("int *", module.ffi.from_buffer(vals)) - ufcx_dofmap.tabulate_entity_dofs(vals_ptr, 0, v) - assert vals[0] == v + assert ufcx_dofmap.entity_dofs[v] == v assert ufcx_dofmap.num_sub_dofmaps == 2 @@ -102,15 +94,11 @@ def test_tensor_element(compile_args): assert ufcx_dofmap.num_global_support_dofs == 0 assert ufcx_dofmap.num_global_support_dofs == 0 assert ufcx_dofmap.num_element_support_dofs == 3 - assert ufcx_dofmap.num_entity_dofs[0] == 1 - assert ufcx_dofmap.num_entity_dofs[1] == 0 - assert ufcx_dofmap.num_entity_dofs[2] == 0 - assert ufcx_dofmap.num_entity_dofs[3] == 0 + off = np.array([ufcx_dofmap.entity_dof_offsets[i] for i in range(8)]) + assert np.all(np.diff(off) == [1, 1, 1, 0, 0, 0, 0]) + for v in range(3): - vals = np.zeros(1, dtype=np.int32) - vals_ptr = module.ffi.cast("int *", module.ffi.from_buffer(vals)) - ufcx_dofmap.tabulate_entity_dofs(vals_ptr, 0, v) - assert vals[0] == v + assert ufcx_dofmap.entity_dofs[v] == v assert ufcx_dofmap.num_sub_dofmaps == 4 @@ -136,14 +124,10 @@ def test_vector_quadrature_element(compile_args): assert ufcx_dofmap.num_global_support_dofs == 0 assert ufcx_dofmap.num_global_support_dofs == 0 assert ufcx_dofmap.num_element_support_dofs == 4 - assert ufcx_dofmap.num_entity_dofs[0] == 0 - assert ufcx_dofmap.num_entity_dofs[1] == 0 - assert ufcx_dofmap.num_entity_dofs[2] == 0 - assert ufcx_dofmap.num_entity_dofs[3] == 4 - - vals = np.zeros(4, dtype=np.int32) - vals_ptr = module.ffi.cast("int *", module.ffi.from_buffer(vals)) - ufcx_dofmap.tabulate_entity_dofs(vals_ptr, 3, 0) - assert (vals == [0, 1, 2, 3]).all() + off = np.array([ufcx_dofmap.entity_dof_offsets[i] for i in range(16)]) + assert np.all(np.diff(off) == [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4]) + + for i in range(4): + assert ufcx_dofmap.entity_dofs[i] == i assert ufcx_dofmap.num_sub_dofmaps == 3 diff --git a/test/test_jit_forms.py b/test/test_jit_forms.py index b6fe9518b..caa085d35 100644 --- a/test/test_jit_forms.py +++ b/test/test_jit_forms.py @@ -39,11 +39,13 @@ def test_laplace_bilinear_form_2d(mode, expected_result, compile_args): ffi = module.ffi form0 = compiled_forms[0] - assert form0.num_integrals(module.lib.cell) == 1 - ids = form0.integral_ids(module.lib.cell) - assert ids[0] == -1 + offsets = form0.form_integral_offsets + cell = module.lib.cell + assert offsets[cell + 1] - offsets[cell] == 1 + integral_id = form0.form_integral_ids[offsets[cell]] + assert integral_id == -1 - default_integral = form0.integrals(module.lib.cell)[0] + default_integral = form0.form_integrals[offsets[cell]] np_type = cdtype_to_numpy(mode) A = np.zeros((3, 3), dtype=np_type) @@ -107,8 +109,8 @@ def test_mass_bilinear_form_2d(mode, expected_result, compile_args): for f, compiled_f in zip(forms, compiled_forms): assert compiled_f.rank == len(f.arguments()) - form0 = compiled_forms[0].integrals(module.lib.cell)[0] - form1 = compiled_forms[1].integrals(module.lib.cell)[0] + form0 = compiled_forms[0].form_integrals[0] + form1 = compiled_forms[1].form_integrals[0] np_type = cdtype_to_numpy(mode) A = np.zeros((3, 3), dtype=np_type) @@ -165,7 +167,7 @@ def test_helmholtz_form_2d(mode, expected_result, compile_args): for f, compiled_f in zip(forms, compiled_forms): assert compiled_f.rank == len(f.arguments()) - form0 = compiled_forms[0].integrals(module.lib.cell)[0] + form0 = compiled_forms[0].form_integrals[0] np_type = cdtype_to_numpy(mode) A = np.zeros((3, 3), dtype=np_type) @@ -213,7 +215,7 @@ def test_laplace_bilinear_form_3d(mode, expected_result, compile_args): for f, compiled_f in zip(forms, compiled_forms): assert compiled_f.rank == len(f.arguments()) - form0 = compiled_forms[0].integrals(module.lib.cell)[0] + form0 = compiled_forms[0].form_integrals[0] np_type = cdtype_to_numpy(mode) A = np.zeros((4, 4), dtype=np_type) @@ -249,7 +251,7 @@ def test_form_coefficient(compile_args): for f, compiled_f in zip(forms, compiled_forms): assert compiled_f.rank == len(f.arguments()) - form0 = compiled_forms[0].integrals(module.lib.cell)[0] + form0 = compiled_forms[0].form_integrals[0] A = np.zeros((3, 3), dtype=np.float64) w = np.array([1.0, 1.0, 1.0], dtype=np.float64) c = np.array([], dtype=np.float64) @@ -288,21 +290,26 @@ def test_subdomains(compile_args): assert compiled_f.rank == len(f.arguments()) form0 = compiled_forms[0] - ids = form0.integral_ids(module.lib.cell) + offsets = form0.form_integral_offsets + cell = module.lib.cell + ids = [form0.form_integral_ids[j] for j in range(offsets[cell], offsets[cell + 1])] assert ids[0] == -1 and ids[1] == 2 form1 = compiled_forms[1] - ids = form1.integral_ids(module.lib.cell) + offsets = form1.form_integral_offsets + ids = [form1.form_integral_ids[j] for j in range(offsets[cell], offsets[cell + 1])] assert ids[0] == -1 and ids[1] == 2 form2 = compiled_forms[2] - ids = form2.integral_ids(module.lib.cell) + offsets = form2.form_integral_offsets + ids = [form2.form_integral_ids[j] for j in range(offsets[cell], offsets[cell + 1])] assert ids[0] == 1 and ids[1] == 2 form3 = compiled_forms[3] - assert form3.num_integrals(module.lib.cell) == 0 - - ids = form3.integral_ids(module.lib.exterior_facet) + offsets = form3.form_integral_offsets + assert offsets[cell + 1] - offsets[cell] == 0 + exf = module.lib.exterior_facet + ids = [form3.form_integral_ids[j] for j in range(offsets[exf], offsets[exf + 1])] assert ids[0] == 0 and ids[1] == 210 @@ -325,7 +332,7 @@ def test_interior_facet_integral(mode, compile_args): ffi = module.ffi np_type = cdtype_to_numpy(mode) - integral0 = form0.integrals(module.lib.interior_facet)[0] + integral0 = form0.form_integrals[0] A = np.zeros((6, 6), dtype=np_type) w = np.array([], dtype=np_type) c = np.array([], dtype=np.float64) @@ -370,8 +377,8 @@ def test_conditional(mode, compile_args): compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms( forms, options={'scalar_type': mode}, cffi_extra_compile_args=compile_args) - form0 = compiled_forms[0].integrals(module.lib.cell)[0] - form1 = compiled_forms[1].integrals(module.lib.cell)[0] + form0 = compiled_forms[0].form_integrals[0] + form1 = compiled_forms[1].form_integrals[0] ffi = module.ffi np_type = cdtype_to_numpy(mode) @@ -427,7 +434,7 @@ def test_custom_quadrature(compile_args): ffi = module.ffi form = compiled_forms[0] - default_integral = form.integrals(module.lib.cell)[0] + default_integral = form.form_integrals[0] A = np.zeros((6, 6), dtype=np.float64) w = np.array([], dtype=np.float64) @@ -512,8 +519,8 @@ def test_lagrange_triangle(compile_args, order, mode, sym_fun, ufl_fun): ffi = module.ffi form0 = compiled_forms[0] - assert form0.num_integrals(module.lib.cell) == 1 - default_integral = form0.integrals(module.lib.cell)[0] + assert form0.form_integral_offsets[module.lib.cell + 1] == 1 + default_integral = form0.form_integrals[0] np_type = cdtype_to_numpy(mode) b = np.zeros((order + 2) * (order + 1) // 2, dtype=np_type) @@ -603,9 +610,9 @@ def test_lagrange_tetrahedron(compile_args, order, mode, sym_fun, ufl_fun): ffi = module.ffi form0 = compiled_forms[0] - assert form0.num_integrals(module.lib.cell) == 1 + assert form0.form_integral_offsets[module.lib.cell + 1] == 1 - default_integral = form0.integrals(module.lib.cell)[0] + default_integral = form0.form_integrals[0] np_type = cdtype_to_numpy(mode) b = np.zeros((order + 3) * (order + 2) * (order + 1) // 6, dtype=np_type) @@ -640,9 +647,9 @@ def test_prism(compile_args): ffi = module.ffi form0 = compiled_forms[0] - assert form0.num_integrals(module.lib.cell) == 1 + assert form0.form_integral_offsets[module.lib.cell + 1] == 1 - default_integral = form0.integrals(module.lib.cell)[0] + default_integral = form0.form_integrals[0] b = np.zeros(6, dtype=np.float64) coords = np.array([1.0, 0.0, 0.0, 0.0, 1.0, 0.0, @@ -675,8 +682,8 @@ def test_complex_operations(compile_args): compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms( forms, options={'scalar_type': mode}, cffi_extra_compile_args=compile_args) - form0 = compiled_forms[0].integrals(module.lib.cell)[0] - form1 = compiled_forms[1].integrals(module.lib.cell)[0] + form0 = compiled_forms[0].form_integrals[0] + form1 = compiled_forms[1].form_integrals[0] ffi = module.ffi np_type = cdtype_to_numpy(mode) @@ -752,9 +759,9 @@ def test_interval_vertex_quadrature(compile_args): ffi = module.ffi form0 = compiled_forms[0] - assert form0.num_integrals(module.lib.cell) == 1 + assert form0.form_integral_offsets[module.lib.cell + 1] == 1 - default_integral = form0.integrals(module.lib.cell)[0] + default_integral = form0.form_integrals[0] J = np.zeros(1, dtype=np.float64) a = np.pi b = np.exp(1) @@ -799,9 +806,11 @@ def test_facet_vertex_quadrature(compile_args): assert len(compiled_forms) == 2 solutions = [] for form in compiled_forms: - assert form.num_integrals(module.lib.exterior_facet) == 1 + offsets = form.form_integral_offsets + exf = module.lib.exterior_facet + assert offsets[exf + 1] - offsets[exf] == 1 - default_integral = form.integrals(module.lib.exterior_facet)[0] + default_integral = form.form_integrals[offsets[exf]] J = np.zeros(1, dtype=np.float64) a = np.pi b = np.exp(1) @@ -850,7 +859,7 @@ def test_manifold_derivatives(compile_args): compiled_forms, module, _ = ffcx.codegeneration.jit.compile_forms( [J], cffi_extra_compile_args=compile_args) - default_integral = compiled_forms[0].integrals(module.lib.cell)[0] + default_integral = compiled_forms[0].form_integrals[0] scale = 2.5 coords = np.array([0.0, 0.0, 0.0, 0.0, scale, 0.0], dtype=np.float64) dof_coords = el.element.points.reshape(-1) From 4572bec1d9d67bbaa711ec4d09e80935e0d84f99 Mon Sep 17 00:00:00 2001 From: Chris Richardson Date: Thu, 24 Aug 2023 09:53:49 +0100 Subject: [PATCH 28/44] Move templates for static C code to the "C" folder (#593) * Move C related files to C folder * Add files * Reduce use of CNodes * Fix for mypy * Reduce CNodes usage * Remove more CNodes * Remove remaining CNodes from templates * Remove unused * fix typos --- .../{ => C}/basix_custom_element_template.py | 0 ffcx/codegeneration/C/cnodes.py | 75 ---------- ffcx/codegeneration/{ => C}/dofmap.py | 49 +++---- .../codegeneration/{ => C}/dofmap_template.py | 0 ffcx/codegeneration/C/expressions.py | 129 ++++++++++++++++++ .../{ => C}/expressions_template.py | 0 ffcx/codegeneration/{ => C}/finite_element.py | 31 ++--- .../{ => C}/finite_element_template.py | 0 ffcx/codegeneration/{ => C}/form.py | 96 +++++-------- ffcx/codegeneration/{ => C}/form_template.py | 0 ffcx/codegeneration/C/integrals.py | 77 +++++++++++ .../{ => C}/integrals_template.py | 0 ffcx/codegeneration/codegeneration.py | 10 +- ...expressions.py => expression_generator.py} | 117 +--------------- ffcx/codegeneration/flop_count.py | 2 +- .../{integrals.py => integral_generator.py} | 68 +-------- 16 files changed, 282 insertions(+), 372 deletions(-) rename ffcx/codegeneration/{ => C}/basix_custom_element_template.py (100%) rename ffcx/codegeneration/{ => C}/dofmap.py (68%) rename ffcx/codegeneration/{ => C}/dofmap_template.py (100%) create mode 100644 ffcx/codegeneration/C/expressions.py rename ffcx/codegeneration/{ => C}/expressions_template.py (100%) rename ffcx/codegeneration/{ => C}/finite_element.py (86%) rename ffcx/codegeneration/{ => C}/finite_element_template.py (100%) rename ffcx/codegeneration/{ => C}/form.py (59%) rename ffcx/codegeneration/{ => C}/form_template.py (100%) create mode 100644 ffcx/codegeneration/C/integrals.py rename ffcx/codegeneration/{ => C}/integrals_template.py (100%) rename ffcx/codegeneration/{expressions.py => expression_generator.py} (76%) rename ffcx/codegeneration/{integrals.py => integral_generator.py} (91%) diff --git a/ffcx/codegeneration/basix_custom_element_template.py b/ffcx/codegeneration/C/basix_custom_element_template.py similarity index 100% rename from ffcx/codegeneration/basix_custom_element_template.py rename to ffcx/codegeneration/C/basix_custom_element_template.py diff --git a/ffcx/codegeneration/C/cnodes.py b/ffcx/codegeneration/C/cnodes.py index 3d6eb7666..c648d3284 100644 --- a/ffcx/codegeneration/C/cnodes.py +++ b/ffcx/codegeneration/C/cnodes.py @@ -1459,81 +1459,6 @@ def flops(self): return 0 -def _is_simple_if_body(body): - if isinstance(body, StatementList): - if len(body.statements) > 1: - return False - (body,) = body.statements - return isinstance(body, (Return, AssignOp)) - - -class If(CStatement): - __slots__ = ("condition", "body") - is_scoped = True - - def __init__(self, condition, body): - self.condition = as_cexpr(condition) - self.body = as_cstatement(body) - - def cs_format(self, precision=None): - statement = "if (" + self.condition.ce_format(precision) + ")" - body_fmt = Indented(self.body.cs_format(precision)) - if _is_simple_if_body(self.body): - return (statement, body_fmt) - else: - return (statement, "{", body_fmt, "}") - - def __eq__(self, other): - return ( - isinstance(other, type(self)) - and self.condition == other.condition - and self.body == other.body - ) - - -class ElseIf(CStatement): - __slots__ = ("condition", "body") - is_scoped = True - - def __init__(self, condition, body): - self.condition = as_cexpr(condition) - self.body = as_cstatement(body) - - def cs_format(self, precision=None): - statement = "else if (" + self.condition.ce_format(precision) + ")" - body_fmt = Indented(self.body.cs_format(precision)) - if _is_simple_if_body(self.body): - return (statement, body_fmt) - else: - return (statement, "{", body_fmt, "}") - - def __eq__(self, other): - return ( - isinstance(other, type(self)) - and self.condition == other.condition - and self.body == other.body - ) - - -class Else(CStatement): - __slots__ = ("body",) - is_scoped = True - - def __init__(self, body): - self.body = as_cstatement(body) - - def cs_format(self, precision=None): - statement = "else" - body_fmt = Indented(self.body.cs_format(precision)) - if _is_simple_if_body(self.body): - return (statement, body_fmt) - else: - return (statement, "{", body_fmt, "}") - - def __eq__(self, other): - return isinstance(other, type(self)) and self.body == other.body - - def is_simple_inner_loop(code): if isinstance(code, ForRange) and is_simple_inner_loop(code.body): return True diff --git a/ffcx/codegeneration/dofmap.py b/ffcx/codegeneration/C/dofmap.py similarity index 68% rename from ffcx/codegeneration/dofmap.py rename to ffcx/codegeneration/C/dofmap.py index de3d21dc4..5f5d4cdf9 100644 --- a/ffcx/codegeneration/dofmap.py +++ b/ffcx/codegeneration/C/dofmap.py @@ -9,7 +9,7 @@ import logging -import ffcx.codegeneration.dofmap_template as ufcx_dofmap +import ffcx.codegeneration.C.dofmap_template as ufcx_dofmap logger = logging.getLogger("ffcx") @@ -29,8 +29,6 @@ def generator(ir, options): d["num_element_support_dofs"] = ir.num_element_support_dofs d["num_sub_dofmaps"] = ir.num_sub_dofmaps - import ffcx.codegeneration.C.cnodes as L - flattened_entity_dofs = [] entity_dof_offsets = [0] for dim in ir.entity_dofs: @@ -39,19 +37,13 @@ def generator(ir, options): flattened_entity_dofs.append(v) entity_dof_offsets.append(len(flattened_entity_dofs)) d["entity_dofs"] = f"entity_dofs_{ir.name}" - d["entity_dofs_init"] = L.ArrayDecl( - "int", - f"entity_dofs_{ir.name}", - values=flattened_entity_dofs, - sizes=len(flattened_entity_dofs), - ) + values = ", ".join(str(i) for i in flattened_entity_dofs) + sizes = len(flattened_entity_dofs) + d["entity_dofs_init"] = f"int entity_dofs_{ir.name}[{sizes}] = {{{values}}};" d["entity_dof_offsets"] = f"entity_dof_offsets_{ir.name}" - d["entity_dof_offsets_init"] = L.ArrayDecl( - "int", - f"entity_dof_offsets_{ir.name}", - values=entity_dof_offsets, - sizes=len(entity_dof_offsets), - ) + values = ", ".join(str(i) for i in entity_dof_offsets) + sizes = len(entity_dof_offsets) + d["entity_dof_offsets_init"] = f"int entity_dof_offsets_{ir.name}[{sizes}] = {{{values}}};" # Closure flattened_entity_closure_dofs = [] @@ -62,29 +54,20 @@ def generator(ir, options): flattened_entity_closure_dofs.append(v) entity_closure_dof_offsets.append(len(flattened_entity_closure_dofs)) d["entity_closure_dofs"] = f"entity_closure_dofs_{ir.name}" - d["entity_closure_dofs_init"] = L.ArrayDecl( - "int", - f"entity_closure_dofs_{ir.name}", - values=flattened_entity_closure_dofs, - sizes=len(flattened_entity_closure_dofs), - ) + values = ", ".join(str(i) for i in flattened_entity_closure_dofs) + sizes = len(flattened_entity_closure_dofs) + d["entity_closure_dofs_init"] = f"int entity_closure_dofs_{ir.name}[{sizes}] = {{{values}}};" d["entity_closure_dof_offsets"] = f"entity_closure_dof_offsets_{ir.name}" - d["entity_closure_dof_offsets_init"] = L.ArrayDecl( - "int", - f"entity_closure_dof_offsets_{ir.name}", - values=entity_closure_dof_offsets, - sizes=len(entity_closure_dof_offsets), - ) + values = ", ".join(str(i) for i in entity_closure_dof_offsets) + sizes = len(entity_dof_offsets) + d["entity_closure_dof_offsets_init"] = f"int entity_closure_dof_offsets_{ir.name}[{sizes}] = {{{values}}};" d["block_size"] = ir.block_size if len(ir.sub_dofmaps) > 0: - d["sub_dofmaps_initialization"] = L.ArrayDecl( - "ufcx_dofmap*", - f"sub_dofmaps_{ir.name}", - values=[L.AddressOf(L.Symbol(dofmap)) for dofmap in ir.sub_dofmaps], - sizes=len(ir.sub_dofmaps), - ) + values = ", ".join(f"&{dofmap}" for dofmap in ir.sub_dofmaps) + sizes = len(ir.sub_dofmaps) + d["sub_dofmaps_initialization"] = f"ufcx_dofmap* sub_dofmaps_{ir.name}[{sizes}] = {{{values}}};" d["sub_dofmaps"] = f"sub_dofmaps_{ir.name}" else: d["sub_dofmaps_initialization"] = "" diff --git a/ffcx/codegeneration/dofmap_template.py b/ffcx/codegeneration/C/dofmap_template.py similarity index 100% rename from ffcx/codegeneration/dofmap_template.py rename to ffcx/codegeneration/C/dofmap_template.py diff --git a/ffcx/codegeneration/C/expressions.py b/ffcx/codegeneration/C/expressions.py new file mode 100644 index 000000000..a994b8c5c --- /dev/null +++ b/ffcx/codegeneration/C/expressions.py @@ -0,0 +1,129 @@ +# Copyright (C) 2019 Michal Habera +# +# This file is part of FFCx.(https://www.fenicsproject.org) +# +# SPDX-License-Identifier: LGPL-3.0-or-later + +import logging + +from ffcx.codegeneration.C import expressions_template +from ffcx.codegeneration.expression_generator import ExpressionGenerator +from ffcx.codegeneration.backend import FFCXBackend +from ffcx.codegeneration.C.format_lines import format_indented_lines +from ffcx.naming import cdtype_to_numpy, scalar_to_value_type + +logger = logging.getLogger("ffcx") + + +def generator(ir, options): + """Generate UFC code for an expression.""" + logger.info("Generating code for expression:") + logger.info(f"--- points: {ir.points}") + logger.info(f"--- name: {ir.name}") + + factory_name = ir.name + + # Format declaration + declaration = expressions_template.declaration.format( + factory_name=factory_name, name_from_uflfile=ir.name_from_uflfile) + + backend = FFCXBackend(ir, options) + eg = ExpressionGenerator(ir, backend) + + d = {} + d["name_from_uflfile"] = ir.name_from_uflfile + d["factory_name"] = ir.name + + parts = eg.generate() + + body = format_indented_lines(parts.cs_format(), 1) + d["tabulate_expression"] = body + + if len(ir.original_coefficient_positions) > 0: + d["original_coefficient_positions"] = f"original_coefficient_positions_{ir.name}" + values = ", ".join(str(i) for i in ir.original_coefficient_positions) + sizes = len(ir.original_coefficient_positions) + d["original_coefficient_positions_init"] = \ + f"static int original_coefficient_positions_{ir.name}[{sizes}] = {{{values}}};" + else: + d["original_coefficient_positions"] = "NULL" + d["original_coefficient_positions_init"] = "" + + values = ", ".join(str(p) for p in ir.points.flatten()) + sizes = ir.points.size + d["points_init"] = f"static double points_{ir.name}[{sizes}] = {{{values}}};" + d["points"] = f"points_{ir.name}" + + if len(ir.expression_shape) > 0: + values = ", ".join(str(i) for i in ir.expression_shape) + sizes = len(ir.expression_shape) + d["value_shape_init"] = f"static int value_shape_{ir.name}[{sizes}] = {{{values}}};" + d["value_shape"] = f"value_shape_{ir.name}" + else: + d["value_shape_init"] = "" + d["value_shape"] = "NULL" + + d["num_components"] = len(ir.expression_shape) + d["num_coefficients"] = len(ir.coefficient_numbering) + d["num_constants"] = len(ir.constant_names) + d["num_points"] = ir.points.shape[0] + d["topological_dimension"] = ir.points.shape[1] + d["scalar_type"] = options["scalar_type"] + d["geom_type"] = scalar_to_value_type(options["scalar_type"]) + d["np_scalar_type"] = cdtype_to_numpy(options["scalar_type"]) + + d["rank"] = len(ir.tensor_shape) + + if len(ir.coefficient_names) > 0: + values = ", ".join(f'"{name}"' for name in ir.coefficient_names) + sizes = len(ir.coefficient_names) + d["coefficient_names_init"] = f"static const char* coefficient_names_{ir.name}[{sizes}] = {{{values}}};" + d["coefficient_names"] = f"coefficient_names_{ir.name}" + else: + d["coefficient_names_init"] = "" + d["coefficient_names"] = "NULL" + + if len(ir.constant_names) > 0: + values = ", ".join(f'"{name}"' for name in ir.constant_names) + sizes = len(ir.constant_names) + d["constant_names_init"] = f"static const char* constant_names_{ir.name}[{sizes}] = {{{values}}};" + d["constant_names"] = f"constant_names_{ir.name}" + else: + d["constant_names_init"] = "" + d["constant_names"] = "NULL" + + code = [] + + # FIXME: Should be handled differently, revise how + # ufcx_function_space is generated (also for ufcx_form) + for (name, (element, dofmap, cmap_family, cmap_degree)) in ir.function_spaces.items(): + code += [f"static ufcx_function_space function_space_{name}_{ir.name_from_uflfile} ="] + code += ["{"] + code += [f".finite_element = &{element},"] + code += [f".dofmap = &{dofmap},"] + code += [f".geometry_family = \"{cmap_family}\","] + code += [f".geometry_degree = {cmap_degree}"] + code += ["};"] + + d["function_spaces_alloc"] = "\n".join(code) + d["function_spaces"] = "" + + if len(ir.function_spaces) > 0: + d["function_spaces"] = f"function_spaces_{ir.name}" + values = ", ".join(f"&function_space_{name}_{ir.name_from_uflfile}" + for (name, _) in ir.function_spaces.items()) + sizes = len(ir.function_spaces) + d["function_spaces_init"] = f"ufcx_function_space* function_spaces_{ir.name}[{sizes}] = {{{values}}};" + else: + d["function_spaces"] = "NULL" + d["function_spaces_init"] = "" + + # Check that no keys are redundant or have been missed + from string import Formatter + fields = [fname for _, fname, _, _ in Formatter().parse(expressions_template.factory) if fname] + assert set(fields) == set(d.keys()), "Mismatch between keys in template and in formatting dict" + + # Format implementation code + implementation = expressions_template.factory.format_map(d) + + return declaration, implementation diff --git a/ffcx/codegeneration/expressions_template.py b/ffcx/codegeneration/C/expressions_template.py similarity index 100% rename from ffcx/codegeneration/expressions_template.py rename to ffcx/codegeneration/C/expressions_template.py diff --git a/ffcx/codegeneration/finite_element.py b/ffcx/codegeneration/C/finite_element.py similarity index 86% rename from ffcx/codegeneration/finite_element.py rename to ffcx/codegeneration/C/finite_element.py index a5d51c835..674cea64d 100644 --- a/ffcx/codegeneration/finite_element.py +++ b/ffcx/codegeneration/C/finite_element.py @@ -10,8 +10,8 @@ import logging -import ffcx.codegeneration.basix_custom_element_template as ufcx_basix_custom_finite_element -import ffcx.codegeneration.finite_element_template as ufcx_finite_element +import ffcx.codegeneration.C.basix_custom_element_template as ufcx_basix_custom_finite_element +import ffcx.codegeneration.C.finite_element_template as ufcx_finite_element import ufl logger = logging.getLogger("ffcx") @@ -63,30 +63,29 @@ def generator(ir, options): else: d["basix_cell"] = int(ir.basix_cell) - import ffcx.codegeneration.C.cnodes as L - if len(ir.value_shape) > 0: d["value_shape"] = f"value_shape_{ir.name}" - d["value_shape_init"] = L.ArrayDecl( - "int", f"value_shape_{ir.name}", values=ir.value_shape, sizes=len(ir.value_shape)) + values = ", ".join(str(i) for i in ir.value_shape) + sizes = len(ir.value_shape) + d["value_shape_init"] = f"int value_shape_{ir.name}[{sizes}] = {{{values}}};" else: d["value_shape"] = "NULL" d["value_shape_init"] = "" if len(ir.reference_value_shape) > 0: d["reference_value_shape"] = f"reference_value_shape_{ir.name}" - d["reference_value_shape_init"] = L.ArrayDecl( - "int", f"reference_value_shape_{ir.name}", - values=ir.reference_value_shape, sizes=len(ir.reference_value_shape)) + values = ", ".join(str(i) for i in ir.reference_value_shape) + sizes = len(ir.reference_value_shape) + d["reference_value_shape_init"] = f"int reference_value_shape_{ir.name}[{sizes}] = {{{values}}};" else: d["reference_value_shape"] = "NULL" d["reference_value_shape_init"] = "" if len(ir.sub_elements) > 0: d["sub_elements"] = f"sub_elements_{ir.name}" - d["sub_elements_init"] = L.ArrayDecl( - "ufcx_finite_element*", f"sub_elements_{ir.name}", - values=[L.AddressOf(L.Symbol(el)) for el in ir.sub_elements], sizes=len(ir.sub_elements)) + values = ", ".join(f"&{el}" for el in ir.sub_elements) + sizes = len(ir.sub_elements) + d["sub_elements_init"] = f"ufcx_finite_element* sub_elements_{ir.name}[{sizes}] = {{{values}}};" else: d["sub_elements"] = "NULL" d["sub_elements_init"] = "" @@ -126,14 +125,12 @@ def generate_custom_element(name, ir): d["highest_degree"] = ir.highest_degree d["discontinuous"] = "true" if ir.discontinuous else "false" d["interpolation_nderivs"] = ir.interpolation_nderivs - - import ffcx.codegeneration.C.cnodes as L - d["value_shape_length"] = len(ir.value_shape) if len(ir.value_shape) > 0: d["value_shape"] = f"value_shape_{name}" - d["value_shape_init"] = L.ArrayDecl( - "int", f"value_shape_{name}", values=ir.value_shape, sizes=len(ir.value_shape)) + values = ", ".join(str(i) for i in ir.value_shape) + sizes = len(ir.value_shape) + d["value_shape_init"] = f"int value_shape_{name}[{sizes}] = {{{values}}};" else: d["value_shape"] = "NULL" d["value_shape_init"] = "" diff --git a/ffcx/codegeneration/finite_element_template.py b/ffcx/codegeneration/C/finite_element_template.py similarity index 100% rename from ffcx/codegeneration/finite_element_template.py rename to ffcx/codegeneration/C/finite_element_template.py diff --git a/ffcx/codegeneration/form.py b/ffcx/codegeneration/C/form.py similarity index 59% rename from ffcx/codegeneration/form.py rename to ffcx/codegeneration/C/form.py index 2e95edadd..275fe0d8a 100644 --- a/ffcx/codegeneration/form.py +++ b/ffcx/codegeneration/C/form.py @@ -9,7 +9,7 @@ import logging -from ffcx.codegeneration import form_template +from ffcx.codegeneration.C import form_template logger = logging.getLogger("ffcx") @@ -20,8 +20,6 @@ def generator(ir, options): logger.info(f"--- rank: {ir.rank}") logger.info(f"--- name: {ir.name}") - import ffcx.codegeneration.C.cnodes as L - d = {} d["factory_name"] = ir.name d["name_from_uflfile"] = ir.name_from_uflfile @@ -31,58 +29,51 @@ def generator(ir, options): d["num_constants"] = ir.num_constants if len(ir.original_coefficient_position) > 0: - d["original_coefficient_position_init"] = L.ArrayDecl( - "int", - f"original_coefficient_position_{ir.name}", - values=ir.original_coefficient_position, - sizes=len(ir.original_coefficient_position), - ) + values = ", ".join(str(i) for i in ir.original_coefficient_position) + sizes = len(ir.original_coefficient_position) + + d["original_coefficient_position_init"] = \ + f"int original_coefficient_position_{ir.name}[{sizes}] = {{{values}}};" d["original_coefficient_position"] = f"original_coefficient_position_{ir.name}" else: d["original_coefficient_position_init"] = "" - d["original_coefficient_position"] = L.Null() + d["original_coefficient_position"] = "NULL" cnames = ir.coefficient_names assert ir.num_coefficients == len(cnames) - names = L.Symbol("names") if len(cnames) == 0: - code = [L.Return(L.Null())] + code = ["return NULL;"] else: - code = [L.ArrayDecl("static const char*", names, len(cnames), cnames)] - code += [L.Return(names)] - d["coefficient_name_map"] = L.StatementList(code) + values = ", ".join(f'"{name}"' for name in cnames) + code = [f"static const char* names[{len(cnames)}] = {{{values}}};", + "return names;"] + d["coefficient_name_map"] = "\n".join(code) cstnames = ir.constant_names - names = L.Symbol("names") if len(cstnames) == 0: - code = [L.Return(L.Null())] + code = ["return NULL;"] else: - code = [L.ArrayDecl("static const char*", names, len(cstnames), cstnames)] - code += [L.Return(names)] - d["constant_name_map"] = L.StatementList(code) + values = ", ".join(f'"{name}"' for name in cstnames) + code = [f"static const char* names[{len(cstnames)}] = {{{values}}};", + "return names;"] + d["constant_name_map"] = "\n".join(code) if len(ir.finite_elements) > 0: d["finite_elements"] = f"finite_elements_{ir.name}" - d["finite_elements_init"] = L.ArrayDecl( - "ufcx_finite_element*", - f"finite_elements_{ir.name}", - values=[L.AddressOf(L.Symbol(el)) for el in ir.finite_elements], - sizes=len(ir.finite_elements), - ) + values = ", ".join(f"&{el}" for el in ir.finite_elements) + sizes = len(ir.finite_elements) + d["finite_elements_init"] = f"ufcx_finite_element* finite_elements_{ir.name}[{sizes}] = {{{values}}};" else: - d["finite_elements"] = L.Null() + d["finite_elements"] = "NULL" d["finite_elements_init"] = "" if len(ir.dofmaps) > 0: d["dofmaps"] = f"dofmaps_{ir.name}" - d["dofmaps_init"] = L.ArrayDecl( - "ufcx_dofmap*", - f"dofmaps_{ir.name}", - values=[L.AddressOf(L.Symbol(dofmap)) for dofmap in ir.dofmaps], - sizes=len(ir.dofmaps), - ) + values = ", ".join(f"&{dofmap}" for dofmap in ir.dofmaps) + sizes = len(ir.dofmaps) + d["dofmaps_init"] = f"ufcx_dofmap* dofmaps_{ir.name}[{sizes}] = {{{values}}};" else: - d["dofmaps"] = L.Null() + d["dofmaps"] = "NULL" d["dofmaps_init"] = "" integrals = [] @@ -90,24 +81,18 @@ def generator(ir, options): integral_offsets = [0] # Note: the order of this list is defined by the enum ufcx_integral_type in ufcx.h for itg_type in ("cell", "exterior_facet", "interior_facet"): - integrals += [L.AddressOf(L.Symbol(itg)) for itg in ir.integral_names[itg_type]] + integrals += [f"&{itg}" for itg in ir.integral_names[itg_type]] integral_ids += ir.subdomain_ids[itg_type] integral_offsets.append(len(integrals)) if len(integrals) > 0: - d["form_integrals_init"] = L.ArrayDecl( - "static ufcx_integral*", - f"form_integrals_{ir.name}", - values=integrals, - sizes=len(integrals), - ) + sizes = len(integrals) + values = ", ".join(integrals) + d["form_integrals_init"] = f"static ufcx_integral* form_integrals_{ir.name}[{sizes}] = {{{values}}};" d["form_integrals"] = f"form_integrals_{ir.name}" - d["form_integral_ids_init"] = L.ArrayDecl( - "int", - f"form_integral_ids_{ir.name}", - values=integral_ids, - sizes=len(integral_ids), - ) + sizes = len(integral_ids) + values = ", ".join(str(i) for i in integral_ids) + d["form_integral_ids_init"] = f"int form_integral_ids_{ir.name}[{sizes}] = {{{values}}};" d["form_integral_ids"] = f"form_integral_ids_{ir.name}" else: d["form_integrals_init"] = "" @@ -115,15 +100,11 @@ def generator(ir, options): d["form_integral_ids_init"] = "" d["form_integral_ids"] = "NULL" - d["form_integral_offsets_init"] = L.ArrayDecl( - "int", - f"form_integral_offsets_{ir.name}", - values=integral_offsets, - sizes=len(integral_offsets), - ) + sizes = len(integral_offsets) + values = ", ".join(str(i) for i in integral_offsets) + d["form_integral_offsets_init"] = f"int form_integral_offsets_{ir.name}[{sizes}] = {{{values}}};" code = [] - function_name = L.Symbol("function_name") # FIXME: Should be handled differently, revise how # ufcx_function_space is generated @@ -145,15 +126,12 @@ def generator(ir, options): code += [f".geometry_basix_variant = {int(cmap_variant)}"] code += ["};"] - _if = L.If for name in ir.function_spaces.keys(): - condition = L.EQ(L.Call("strcmp", (function_name, L.LiteralString(name))), 0) - code += [_if(condition, L.Return(L.Symbol(f"&functionspace_{name}")))] - _if = L.ElseIf + code += [f'if (strcmp(function_name, "{name}") == 0) return &functionspace_{name};'] code += ["return NULL;\n"] - d["functionspace"] = L.StatementList(code) + d["functionspace"] = "\n".join(code) # Check that no keys are redundant or have been missed from string import Formatter diff --git a/ffcx/codegeneration/form_template.py b/ffcx/codegeneration/C/form_template.py similarity index 100% rename from ffcx/codegeneration/form_template.py rename to ffcx/codegeneration/C/form_template.py diff --git a/ffcx/codegeneration/C/integrals.py b/ffcx/codegeneration/C/integrals.py new file mode 100644 index 000000000..b1b73c525 --- /dev/null +++ b/ffcx/codegeneration/C/integrals.py @@ -0,0 +1,77 @@ +# Copyright (C) 2015-2021 Martin Sandve Alnæs, Michal Habera, Igor Baratta +# +# This file is part of FFCx. (https://www.fenicsproject.org) +# +# SPDX-License-Identifier: LGPL-3.0-or-later + +import logging + +from ffcx.codegeneration.integral_generator import IntegralGenerator +from ffcx.codegeneration.C import integrals_template as ufcx_integrals +from ffcx.codegeneration.backend import FFCXBackend +from ffcx.codegeneration.C.format_lines import format_indented_lines +from ffcx.naming import cdtype_to_numpy, scalar_to_value_type + +logger = logging.getLogger("ffcx") + + +def generator(ir, options): + logger.info("Generating code for integral:") + logger.info(f"--- type: {ir.integral_type}") + logger.info(f"--- name: {ir.name}") + + """Generate code for an integral.""" + factory_name = ir.name + + # Format declaration + declaration = ufcx_integrals.declaration.format(factory_name=factory_name) + + # Create FFCx C backend + backend = FFCXBackend(ir, options) + + # Configure kernel generator + ig = IntegralGenerator(ir, backend) + + # Generate code ast for the tabulate_tensor body + parts = ig.generate() + + # Format code as string + body = format_indented_lines(parts.cs_format(ir.precision), 1) + + # Generate generic FFCx code snippets and add specific parts + code = {} + code["class_type"] = ir.integral_type + "_integral" + code["name"] = ir.name + code["members"] = "" + code["constructor"] = "" + code["constructor_arguments"] = "" + code["initializer_list"] = "" + code["destructor"] = "" + + if len(ir.enabled_coefficients) > 0: + values = ", ".join("1" if i else "0" for i in ir.enabled_coefficients) + sizes = len(ir.enabled_coefficients) + code["enabled_coefficients_init"] = f"bool enabled_coefficients_{ir.name}[{sizes}] = {{{values}}};" + code["enabled_coefficients"] = f"enabled_coefficients_{ir.name}" + else: + code["enabled_coefficients_init"] = "" + code["enabled_coefficients"] = "NULL" + + code["additional_includes_set"] = set() # FIXME: Get this out of code[] + code["tabulate_tensor"] = body + + if options["tabulate_tensor_void"]: + code["tabulate_tensor"] = "" + + implementation = ufcx_integrals.factory.format( + factory_name=factory_name, + enabled_coefficients=code["enabled_coefficients"], + enabled_coefficients_init=code["enabled_coefficients_init"], + tabulate_tensor=code["tabulate_tensor"], + needs_facet_permutations="true" if ir.needs_facet_permutations else "false", + scalar_type=options["scalar_type"], + geom_type=scalar_to_value_type(options["scalar_type"]), + np_scalar_type=cdtype_to_numpy(options["scalar_type"]), + coordinate_element=f"&{ir.coordinate_element}") + + return declaration, implementation diff --git a/ffcx/codegeneration/integrals_template.py b/ffcx/codegeneration/C/integrals_template.py similarity index 100% rename from ffcx/codegeneration/integrals_template.py rename to ffcx/codegeneration/C/integrals_template.py diff --git a/ffcx/codegeneration/codegeneration.py b/ffcx/codegeneration/codegeneration.py index fb5bbd2e6..ece9f2d23 100644 --- a/ffcx/codegeneration/codegeneration.py +++ b/ffcx/codegeneration/codegeneration.py @@ -14,12 +14,12 @@ import logging import typing -from ffcx.codegeneration.dofmap import generator as dofmap_generator -from ffcx.codegeneration.expressions import generator as expression_generator -from ffcx.codegeneration.finite_element import \ +from ffcx.codegeneration.C.dofmap import generator as dofmap_generator +from ffcx.codegeneration.C.expressions import generator as expression_generator +from ffcx.codegeneration.C.finite_element import \ generator as finite_element_generator -from ffcx.codegeneration.form import generator as form_generator -from ffcx.codegeneration.integrals import generator as integral_generator +from ffcx.codegeneration.C.form import generator as form_generator +from ffcx.codegeneration.C.integrals import generator as integral_generator logger = logging.getLogger("ffcx") diff --git a/ffcx/codegeneration/expressions.py b/ffcx/codegeneration/expression_generator.py similarity index 76% rename from ffcx/codegeneration/expressions.py rename to ffcx/codegeneration/expression_generator.py index 330b65d8b..553e8b315 100644 --- a/ffcx/codegeneration/expressions.py +++ b/ffcx/codegeneration/expression_generator.py @@ -10,128 +10,15 @@ from typing import Any, DefaultDict, Dict, Set import ufl -from ffcx.codegeneration import expressions_template, geometry +from ffcx.codegeneration import geometry from ffcx.codegeneration.backend import FFCXBackend from ffcx.codegeneration.C.cnodes import CNode -from ffcx.codegeneration.C.format_lines import format_indented_lines from ffcx.ir.representation import ExpressionIR -from ffcx.naming import cdtype_to_numpy, scalar_to_value_type +from ffcx.naming import scalar_to_value_type logger = logging.getLogger("ffcx") -def generator(ir, options): - """Generate UFC code for an expression.""" - logger.info("Generating code for expression:") - logger.info(f"--- points: {ir.points}") - logger.info(f"--- name: {ir.name}") - - factory_name = ir.name - - # Format declaration - declaration = expressions_template.declaration.format( - factory_name=factory_name, name_from_uflfile=ir.name_from_uflfile) - - backend = FFCXBackend(ir, options) - L = backend.language - eg = ExpressionGenerator(ir, backend) - - d = {} - d["name_from_uflfile"] = ir.name_from_uflfile - d["factory_name"] = ir.name - - parts = eg.generate() - - body = format_indented_lines(parts.cs_format(), 1) - d["tabulate_expression"] = body - - if len(ir.original_coefficient_positions) > 0: - d["original_coefficient_positions"] = f"original_coefficient_positions_{ir.name}" - d["original_coefficient_positions_init"] = L.ArrayDecl( - "static int", f"original_coefficient_positions_{ir.name}", - values=ir.original_coefficient_positions, sizes=len(ir.original_coefficient_positions)) - else: - d["original_coefficient_positions"] = L.Null() - d["original_coefficient_positions_init"] = "" - - d["points_init"] = L.ArrayDecl( - "static double", f"points_{ir.name}", values=ir.points.flatten(), sizes=ir.points.size) - d["points"] = L.Symbol(f"points_{ir.name}") - - if len(ir.expression_shape) > 0: - d["value_shape_init"] = L.ArrayDecl( - "static int", f"value_shape_{ir.name}", values=ir.expression_shape, sizes=len(ir.expression_shape)) - d["value_shape"] = f"value_shape_{ir.name}" - else: - d["value_shape_init"] = "" - d["value_shape"] = L.Null() - - d["num_components"] = len(ir.expression_shape) - d["num_coefficients"] = len(ir.coefficient_numbering) - d["num_constants"] = len(ir.constant_names) - d["num_points"] = ir.points.shape[0] - d["topological_dimension"] = ir.points.shape[1] - d["scalar_type"] = options["scalar_type"] - d["geom_type"] = scalar_to_value_type(options["scalar_type"]) - d["np_scalar_type"] = cdtype_to_numpy(options["scalar_type"]) - - d["rank"] = len(ir.tensor_shape) - - if len(ir.coefficient_names) > 0: - d["coefficient_names_init"] = L.ArrayDecl( - "static const char*", f"coefficient_names_{ir.name}", values=ir.coefficient_names, - sizes=len(ir.coefficient_names)) - d["coefficient_names"] = f"coefficient_names_{ir.name}" - else: - d["coefficient_names_init"] = "" - d["coefficient_names"] = L.Null() - - if len(ir.constant_names) > 0: - d["constant_names_init"] = L.ArrayDecl( - "static const char*", f"constant_names_{ir.name}", values=ir.constant_names, - sizes=len(ir.constant_names)) - d["constant_names"] = f"constant_names_{ir.name}" - else: - d["constant_names_init"] = "" - d["constant_names"] = L.Null() - - code = [] - - # FIXME: Should be handled differently, revise how - # ufcx_function_space is generated (also for ufcx_form) - for (name, (element, dofmap, cmap_family, cmap_degree)) in ir.function_spaces.items(): - code += [f"static ufcx_function_space function_space_{name}_{ir.name_from_uflfile} ="] - code += ["{"] - code += [f".finite_element = &{element},"] - code += [f".dofmap = &{dofmap},"] - code += [f".geometry_family = \"{cmap_family}\","] - code += [f".geometry_degree = {cmap_degree}"] - code += ["};"] - - d["function_spaces_alloc"] = L.StatementList(code) - d["function_spaces"] = "" - - if len(ir.function_spaces) > 0: - d["function_spaces"] = f"function_spaces_{ir.name}" - d["function_spaces_init"] = L.ArrayDecl("ufcx_function_space*", f"function_spaces_{ir.name}", values=[ - L.AddressOf(L.Symbol(f"function_space_{name}_{ir.name_from_uflfile}")) - for (name, _) in ir.function_spaces.items()], - sizes=len(ir.function_spaces)) - else: - d["function_spaces"] = L.Null() - d["function_spaces_init"] = "" - - # Check that no keys are redundant or have been missed - from string import Formatter - fields = [fname for _, fname, _, _ in Formatter().parse(expressions_template.factory) if fname] - assert set(fields) == set(d.keys()), "Mismatch between keys in template and in formatting dict" - - # Format implementation code - implementation = expressions_template.factory.format_map(d) - - return declaration, implementation - - class ExpressionGenerator: def __init__(self, ir: ExpressionIR, backend: FFCXBackend): diff --git a/ffcx/codegeneration/flop_count.py b/ffcx/codegeneration/flop_count.py index dc2a1402e..656fb7428 100644 --- a/ffcx/codegeneration/flop_count.py +++ b/ffcx/codegeneration/flop_count.py @@ -9,7 +9,7 @@ import ufl from ffcx.analysis import analyze_ufl_objects from ffcx.codegeneration.backend import FFCXBackend -from ffcx.codegeneration.integrals import IntegralGenerator +from ffcx.codegeneration.integral_generator import IntegralGenerator from ffcx.ir.representation import compute_ir diff --git a/ffcx/codegeneration/integrals.py b/ffcx/codegeneration/integral_generator.py similarity index 91% rename from ffcx/codegeneration/integrals.py rename to ffcx/codegeneration/integral_generator.py index cd2c105ee..1ebcef70f 100644 --- a/ffcx/codegeneration/integrals.py +++ b/ffcx/codegeneration/integral_generator.py @@ -10,81 +10,15 @@ import ufl from ffcx.codegeneration import geometry -from ffcx.codegeneration import integrals_template as ufcx_integrals -from ffcx.codegeneration.backend import FFCXBackend from ffcx.codegeneration.C.cnodes import BinOp, CNode -from ffcx.codegeneration.C.format_lines import format_indented_lines from ffcx.ir.elementtables import piecewise_ttypes from ffcx.ir.integral import BlockDataT from ffcx.ir.representationutils import QuadratureRule -from ffcx.naming import cdtype_to_numpy, scalar_to_value_type +from ffcx.naming import scalar_to_value_type logger = logging.getLogger("ffcx") -def generator(ir, options): - logger.info("Generating code for integral:") - logger.info(f"--- type: {ir.integral_type}") - logger.info(f"--- name: {ir.name}") - - """Generate code for an integral.""" - factory_name = ir.name - - # Format declaration - declaration = ufcx_integrals.declaration.format(factory_name=factory_name) - - # Create FFCx C backend - backend = FFCXBackend(ir, options) - - # Configure kernel generator - ig = IntegralGenerator(ir, backend) - - # Generate code ast for the tabulate_tensor body - parts = ig.generate() - - # Format code as string - body = format_indented_lines(parts.cs_format(ir.precision), 1) - - # Generate generic FFCx code snippets and add specific parts - code = {} - code["class_type"] = ir.integral_type + "_integral" - code["name"] = ir.name - code["members"] = "" - code["constructor"] = "" - code["constructor_arguments"] = "" - code["initializer_list"] = "" - code["destructor"] = "" - - L = backend.language - if len(ir.enabled_coefficients) > 0: - code["enabled_coefficients_init"] = L.ArrayDecl( - "bool", f"enabled_coefficients_{ir.name}", values=ir.enabled_coefficients, - sizes=len(ir.enabled_coefficients)) - code["enabled_coefficients"] = f"enabled_coefficients_{ir.name}" - else: - code["enabled_coefficients_init"] = "" - code["enabled_coefficients"] = L.Null() - - code["additional_includes_set"] = set() # FIXME: Get this out of code[] - code["tabulate_tensor"] = body - - if options["tabulate_tensor_void"]: - code["tabulate_tensor"] = "" - - implementation = ufcx_integrals.factory.format( - factory_name=factory_name, - enabled_coefficients=code["enabled_coefficients"], - enabled_coefficients_init=code["enabled_coefficients_init"], - tabulate_tensor=code["tabulate_tensor"], - needs_facet_permutations="true" if ir.needs_facet_permutations else "false", - scalar_type=options["scalar_type"], - geom_type=scalar_to_value_type(options["scalar_type"]), - np_scalar_type=cdtype_to_numpy(options["scalar_type"]), - coordinate_element=L.AddressOf(L.Symbol(ir.coordinate_element))) - - return declaration, implementation - - class IntegralGenerator(object): def __init__(self, ir, backend): # Store ir From 52bcbe45ffecff7a76fc8459d95881f9a1874a05 Mon Sep 17 00:00:00 2001 From: Chris Richardson Date: Mon, 4 Sep 2023 15:33:08 +0100 Subject: [PATCH 29/44] Rename CNodes as LNodes, add typing, process AST separately from creating it. (#594) * Move Cnodes to Lnodes * Use new LNodes * Adjust for dtype * Work mostly on Expression * Remove flop count test * Add demo to test complex literal * Formatting tweaks for complex numbers * Fixes for obscure functionality (which is probably broken in main) * Correction to facet_edge_vertices * Make precision work * Remove unused files * Get rid of UFL2LNodes class * Fix documentation --- demo/CellGeometry.py | 8 +- demo/ComplexPoisson.py | 39 + demo/MetaData.py | 2 + demo/test_demos.py | 6 +- ffcx/analysis.py | 2 +- ffcx/codegeneration/C/c_implementation.py | 349 +++++ ffcx/codegeneration/C/cnodes.py | 1547 ------------------- ffcx/codegeneration/C/expressions.py | 6 +- ffcx/codegeneration/C/format_lines.py | 60 - ffcx/codegeneration/C/format_value.py | 59 - ffcx/codegeneration/C/integrals.py | 5 +- ffcx/codegeneration/C/precedence.py | 61 - ffcx/codegeneration/C/ufl_to_cnodes.py | 295 ---- ffcx/codegeneration/access.py | 29 +- ffcx/codegeneration/backend.py | 17 +- ffcx/codegeneration/definitions.py | 21 +- ffcx/codegeneration/expression_generator.py | 60 +- ffcx/codegeneration/geometry.py | 66 +- ffcx/codegeneration/integral_generator.py | 116 +- ffcx/codegeneration/lnodes.py | 874 +++++++++++ ffcx/codegeneration/symbols.py | 50 +- ffcx/codegeneration/utils.py | 34 + test/test_flops.py | 40 - 23 files changed, 1452 insertions(+), 2294 deletions(-) create mode 100644 demo/ComplexPoisson.py create mode 100644 ffcx/codegeneration/C/c_implementation.py delete mode 100644 ffcx/codegeneration/C/cnodes.py delete mode 100644 ffcx/codegeneration/C/format_lines.py delete mode 100644 ffcx/codegeneration/C/format_value.py delete mode 100644 ffcx/codegeneration/C/precedence.py delete mode 100644 ffcx/codegeneration/C/ufl_to_cnodes.py create mode 100644 ffcx/codegeneration/lnodes.py create mode 100644 ffcx/codegeneration/utils.py delete mode 100644 test/test_flops.py diff --git a/demo/CellGeometry.py b/demo/CellGeometry.py index 57cd9e88f..8eea59f0f 100644 --- a/demo/CellGeometry.py +++ b/demo/CellGeometry.py @@ -3,7 +3,8 @@ # A functional M involving a bunch of cell geometry quantities. import basix.ufl from ufl import (CellVolume, Circumradius, Coefficient, FacetArea, FacetNormal, - SpatialCoordinate, ds, dx, tetrahedron) + SpatialCoordinate, ds, dx, tetrahedron, TrialFunction) +from ufl.geometry import FacetEdgeVectors cell = tetrahedron V = basix.ufl.element("P", cell.cellname(), 1) @@ -17,3 +18,8 @@ area = FacetArea(cell) M = u * (x[0] * vol * rad) * dx + u * (x[0] * vol * rad * area) * ds # + u*area*avg(n[0]*x[0]*vol*rad)*dS + +# Test some obscure functionality +fev = FacetEdgeVectors(cell) +v = TrialFunction(V) +L = fev[0, 0] * v * ds diff --git a/demo/ComplexPoisson.py b/demo/ComplexPoisson.py new file mode 100644 index 000000000..731ec9c09 --- /dev/null +++ b/demo/ComplexPoisson.py @@ -0,0 +1,39 @@ +# Copyright (C) 2023 Chris Richardson +# +# This file is part of FFCx. +# +# FFCx is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# FFCx is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with FFCx. If not, see . +# +# The bilinear form a(u, v) and linear form L(v) for +# Poisson's equation using bilinear elements on bilinear mesh geometry. +import basix.ufl +from ufl import (Coefficient, FunctionSpace, Mesh, TestFunction, TrialFunction, + dx, grad, inner) + +coords = basix.ufl.element("P", "triangle", 2, rank=1) +mesh = Mesh(coords) +dx = dx(mesh) + +element = basix.ufl.element("P", mesh.ufl_cell().cellname(), 2) +space = FunctionSpace(mesh, element) + +u = TrialFunction(space) +v = TestFunction(space) +f = Coefficient(space) + +# Test literal complex number in form +k = 3.213 + 1.023j + +a = k * inner(grad(u), grad(v)) * dx +L = inner(k * f, v) * dx diff --git a/demo/MetaData.py b/demo/MetaData.py index bc94b7465..3de96f592 100644 --- a/demo/MetaData.py +++ b/demo/MetaData.py @@ -32,3 +32,5 @@ + inner(c, c) * inner(grad(u), grad(v)) * dx(1, degree=4)\ + inner(c, c) * inner(grad(u), grad(v)) * dx(1, degree=2)\ + inner(grad(u), grad(v)) * dx(1, degree=-1) + +L = v * dx(0, metadata={"precision": 1}) diff --git a/demo/test_demos.py b/demo/test_demos.py index d28e394e4..8cf199619 100644 --- a/demo/test_demos.py +++ b/demo/test_demos.py @@ -22,8 +22,12 @@ def test_demo(file): # Skip demos that use elements not yet implemented in Basix pytest.skip() + opts = "" + if "Complex" in file: + opts = '--scalar_type "double _Complex"' + extra_flags = "-Wunused-variable -Werror -fPIC " - assert os.system(f"cd {demo_dir} && ffcx {file}.py") == 0 + assert os.system(f"cd {demo_dir} && ffcx {opts} {file}.py") == 0 assert os.system(f"cd {demo_dir} && " "CPATH=../ffcx/codegeneration/ " f"gcc -I/usr/include/python{sys.version_info.major}.{sys.version_info.minor} {extra_flags}" diff --git a/ffcx/analysis.py b/ffcx/analysis.py index 51ea1610f..0e55a6e19 100644 --- a/ffcx/analysis.py +++ b/ffcx/analysis.py @@ -202,7 +202,7 @@ def _analyze_form(form: ufl.form.Form, options: typing.Dict) -> ufl.algorithms.f p = precisions.pop() elif len(precisions) == 0: # Default precision - p = np.finfo("double").precision + 1 # == 16 + p = None else: raise RuntimeError("Only one precision allowed within integrals grouped by subdomain.") diff --git a/ffcx/codegeneration/C/c_implementation.py b/ffcx/codegeneration/C/c_implementation.py new file mode 100644 index 000000000..f6d18bacd --- /dev/null +++ b/ffcx/codegeneration/C/c_implementation.py @@ -0,0 +1,349 @@ +# Copyright (C) 2023 Chris Richardson +# +# This file is part of FFCx. (https://www.fenicsproject.org) +# +# SPDX-License-Identifier: LGPL-3.0-or-later + +import warnings +import ffcx.codegeneration.lnodes as L +from ffcx.codegeneration.utils import scalar_to_value_type, cdtype_to_numpy +import numpy as np + +math_table = { + "double": { + "sqrt": "sqrt", + "abs": "fabs", + "cos": "cos", + "sin": "sin", + "tan": "tan", + "acos": "acos", + "asin": "asin", + "atan": "atan", + "cosh": "cosh", + "sinh": "sinh", + "tanh": "tanh", + "acosh": "acosh", + "asinh": "asinh", + "atanh": "atanh", + "power": "pow", + "exp": "exp", + "ln": "log", + "erf": "erf", + "atan_2": "atan2", + "min_value": "fmin", + "max_value": "fmax", + "bessel_y": "yn", + "bessel_j": "jn", + }, + "float": { + "sqrt": "sqrtf", + "abs": "fabsf", + "cos": "cosf", + "sin": "sinf", + "tan": "tanf", + "acos": "acosf", + "asin": "asinf", + "atan": "atanf", + "cosh": "coshf", + "sinh": "sinhf", + "tanh": "tanhf", + "acosh": "acoshf", + "asinh": "asinhf", + "atanh": "atanhf", + "power": "powf", + "exp": "expf", + "ln": "logf", + "erf": "erff", + "atan_2": "atan2f", + "min_value": "fminf", + "max_value": "fmaxf", + "bessel_y": "yn", + "bessel_j": "jn", + }, + "long double": { + "sqrt": "sqrtl", + "abs": "fabsl", + "cos": "cosl", + "sin": "sinl", + "tan": "tanl", + "acos": "acosl", + "asin": "asinl", + "atan": "atanl", + "cosh": "coshl", + "sinh": "sinhl", + "tanh": "tanhl", + "acosh": "acoshl", + "asinh": "asinhl", + "atanh": "atanhl", + "power": "powl", + "exp": "expl", + "ln": "logl", + "erf": "erfl", + "atan_2": "atan2l", + "min_value": "fminl", + "max_value": "fmaxl", + }, + "double _Complex": { + "sqrt": "csqrt", + "abs": "cabs", + "cos": "ccos", + "sin": "csin", + "tan": "ctan", + "acos": "cacos", + "asin": "casin", + "atan": "catan", + "cosh": "ccosh", + "sinh": "csinh", + "tanh": "ctanh", + "acosh": "cacosh", + "asinh": "casinh", + "atanh": "catanh", + "power": "cpow", + "exp": "cexp", + "ln": "clog", + "real": "creal", + "imag": "cimag", + "conj": "conj", + "max_value": "fmax", + "min_value": "fmin", + "bessel_y": "yn", + "bessel_j": "jn", + }, + "float _Complex": { + "sqrt": "csqrtf", + "abs": "cabsf", + "cos": "ccosf", + "sin": "csinf", + "tan": "ctanf", + "acos": "cacosf", + "asin": "casinf", + "atan": "catanf", + "cosh": "ccoshf", + "sinh": "csinhf", + "tanh": "ctanhf", + "acosh": "cacoshf", + "asinh": "casinhf", + "atanh": "catanhf", + "power": "cpowf", + "exp": "cexpf", + "ln": "clogf", + "real": "crealf", + "imag": "cimagf", + "conj": "conjf", + "max_value": "fmaxf", + "min_value": "fminf", + "bessel_y": "yn", + "bessel_j": "jn", + }, +} + + +class CFormatter(object): + def __init__(self, scalar, precision=None) -> None: + self.scalar_type = scalar + self.real_type = scalar_to_value_type(scalar) + if precision is None: + np_type = cdtype_to_numpy(self.real_type) + self.precision = np.finfo(np_type).precision + 1 + else: + assert isinstance(precision, int) + self.precision = precision + + def _format_number(self, x): + p = self.precision + if isinstance(x, complex): + return f"({x.real:.{p}}+I*{x.imag:.{p}})" + elif isinstance(x, float): + return f"{x:.{p}}" + return str(x) + + def _build_initializer_lists(self, values): + arr = "{" + if len(values.shape) == 1: + arr += ", ".join(self._format_number(v) for v in values) + elif len(values.shape) > 1: + arr += ",\n ".join(self._build_initializer_lists(v) for v in values) + arr += "}" + return arr + + def format_statement_list(self, slist) -> str: + return "".join(self.c_format(s) for s in slist.statements) + + def format_comment(self, c) -> str: + return "// " + c.comment + "\n" + + def format_array_decl(self, arr) -> str: + dtype = arr.symbol.dtype + assert dtype is not None + + if dtype == L.DataType.SCALAR: + typename = self.scalar_type + elif dtype == L.DataType.REAL: + typename = self.real_type + elif dtype == L.DataType.INT: + typename = "int" + else: + raise ValueError(f"Invalid dtype: {dtype}") + + symbol = self.c_format(arr.symbol) + dims = "".join([f"[{i}]" for i in arr.sizes]) + if arr.values is None: + assert arr.const is False + return f"{typename} {symbol}{dims};\n" + + vals = self._build_initializer_lists(arr.values) + cstr = "static const " if arr.const else "" + return f"{cstr}{typename} {symbol}{dims} = {vals};\n" + + def format_array_access(self, arr) -> str: + name = self.c_format(arr.array) + indices = f"[{']['.join(self.c_format(i) for i in arr.indices)}]" + return f"{name}{indices}" + + def format_variable_decl(self, v) -> str: + val = self.c_format(v.value) + symbol = self.c_format(v.symbol) + assert v.symbol.dtype + if v.symbol.dtype == L.DataType.SCALAR: + typename = self.scalar_type + elif v.symbol.dtype == L.DataType.REAL: + typename = self.real_type + return f"{typename} {symbol} = {val};\n" + + def format_nary_op(self, oper) -> str: + # Format children + args = [self.c_format(arg) for arg in oper.args] + + # Apply parentheses + for i in range(len(args)): + if oper.args[i].precedence >= oper.precedence: + args[i] = "(" + args[i] + ")" + + # Return combined string + return f" {oper.op} ".join(args) + + def format_binary_op(self, oper) -> str: + # Format children + lhs = self.c_format(oper.lhs) + rhs = self.c_format(oper.rhs) + + # Apply parentheses + if oper.lhs.precedence >= oper.precedence: + lhs = f"({lhs})" + if oper.rhs.precedence >= oper.precedence: + rhs = f"({rhs})" + + # Return combined string + return f"{lhs} {oper.op} {rhs}" + + def format_neg(self, val) -> str: + arg = self.c_format(val.arg) + return f"-{arg}" + + def format_not(self, val) -> str: + arg = self.c_format(val.arg) + return f"{val.op}({arg})" + + def format_literal_float(self, val) -> str: + value = self._format_number(val.value) + return f"{value}" + + def format_literal_int(self, val) -> str: + return f"{val.value}" + + def format_for_range(self, r) -> str: + begin = self.c_format(r.begin) + end = self.c_format(r.end) + index = self.c_format(r.index) + output = f"for (int {index} = {begin}; {index} < {end}; ++{index})\n" + output += "{\n" + body = self.c_format(r.body) + for line in body.split("\n"): + if len(line) > 0: + output += f" {line}\n" + output += "}\n" + return output + + def format_statement(self, s) -> str: + return self.c_format(s.expr) + + def format_assign(self, expr) -> str: + rhs = self.c_format(expr.rhs) + lhs = self.c_format(expr.lhs) + return f"{lhs} {expr.op} {rhs};\n" + + def format_conditional(self, s) -> str: + # Format children + c = self.c_format(s.condition) + t = self.c_format(s.true) + f = self.c_format(s.false) + + # Apply parentheses + if s.condition.precedence >= s.precedence: + c = "(" + c + ")" + if s.true.precedence >= s.precedence: + t = "(" + t + ")" + if s.false.precedence >= s.precedence: + f = "(" + f + ")" + + # Return combined string + return c + " ? " + t + " : " + f + + def format_symbol(self, s) -> str: + return f"{s.name}" + + def format_math_function(self, c) -> str: + # Get a table of functions for this type, if available + arg_type = self.scalar_type + if hasattr(c.args[0], "dtype"): + if c.args[0].dtype == L.DataType.REAL: + arg_type = self.real_type + else: + warnings.warn(f"Syntax item without dtype {c.args[0]}") + + dtype_math_table = math_table.get(arg_type, {}) + + # Get a function from the table, if available, else just use bare name + func = dtype_math_table.get(c.function, c.function) + args = ", ".join(self.c_format(arg) for arg in c.args) + return f"{func}({args})" + + c_impl = { + "StatementList": format_statement_list, + "Comment": format_comment, + "ArrayDecl": format_array_decl, + "ArrayAccess": format_array_access, + "VariableDecl": format_variable_decl, + "ForRange": format_for_range, + "Statement": format_statement, + "Assign": format_assign, + "AssignAdd": format_assign, + "Product": format_nary_op, + "Neg": format_neg, + "Sum": format_nary_op, + "Add": format_binary_op, + "Sub": format_binary_op, + "Mul": format_binary_op, + "Div": format_binary_op, + "Not": format_not, + "LiteralFloat": format_literal_float, + "LiteralInt": format_literal_int, + "Symbol": format_symbol, + "Conditional": format_conditional, + "MathFunction": format_math_function, + "And": format_binary_op, + "Or": format_binary_op, + "NE": format_binary_op, + "EQ": format_binary_op, + "GE": format_binary_op, + "LE": format_binary_op, + "GT": format_binary_op, + "LT": format_binary_op, + } + + def c_format(self, s) -> str: + name = s.__class__.__name__ + try: + return self.c_impl[name](self, s) + except KeyError: + raise RuntimeError("Unknown statement: ", name) diff --git a/ffcx/codegeneration/C/cnodes.py b/ffcx/codegeneration/C/cnodes.py deleted file mode 100644 index c648d3284..000000000 --- a/ffcx/codegeneration/C/cnodes.py +++ /dev/null @@ -1,1547 +0,0 @@ -# Copyright (C) 2013-2017 Martin Sandve Alnæs -# -# This file is part of FFCx.(https://www.fenicsproject.org) -# -# SPDX-License-Identifier: LGPL-3.0-or-later - -import logging -import numbers - -import numpy as np - -from ffcx.codegeneration.C.format_lines import Indented, format_indented_lines -from ffcx.codegeneration.C.format_value import format_float, format_int, format_value -from ffcx.codegeneration.C.precedence import PRECEDENCE - -logger = logging.getLogger("ffcx") -"""CNode TODO: -- Array copy statement -- Extend ArrayDecl and ArrayAccess with support for - flattened but conceptually multidimensional arrays, - maybe even with padding (FlattenedArray possibly covers what we need) -- Function declaration -- TypeDef -- Type -- TemplateArgumentList -- Class declaration -- Class definition -""" - -# Some helper functions - - -def is_zero_cexpr(cexpr): - return (isinstance(cexpr, LiteralFloat) and cexpr.value == 0.0) or ( - isinstance(cexpr, LiteralInt) and cexpr.value == 0 - ) - - -def is_one_cexpr(cexpr): - return (isinstance(cexpr, LiteralFloat) and cexpr.value == 1.0) or ( - isinstance(cexpr, LiteralInt) and cexpr.value == 1 - ) - - -def is_negative_one_cexpr(cexpr): - return (isinstance(cexpr, LiteralFloat) and cexpr.value == -1.0) or ( - isinstance(cexpr, LiteralInt) and cexpr.value == -1 - ) - - -def float_product(factors): - """Build product of float factors, simplifying ones and zeros and returning 1.0 if empty sequence.""" - factors = [f for f in factors if not is_one_cexpr(f)] - if len(factors) == 0: - return LiteralFloat(1.0) - elif len(factors) == 1: - return factors[0] - else: - for f in factors: - if is_zero_cexpr(f): - return f - return Product(factors) - - -# CNode core - - -class CNode(object): - """Base class for all C AST nodes.""" - - __slots__ = () - - def __str__(self): - name = self.__class__.__name__ - raise NotImplementedError("Missing implementation of __str__ in " + name) - - def __eq__(self, other): - name = self.__class__.__name__ - raise NotImplementedError("Missing implementation of __eq__ in " + name) - - def __ne__(self, other): - return not self.__eq__(other) - - -# CExpr base classes - - -class CExpr(CNode): - """Base class for all C expressions. - - All subtypes should define a 'precedence' class attribute. - - """ - - __slots__ = () - - def ce_format(self, precision=None): - raise NotImplementedError("Missing implementation of ce_format() in CExpr.") - - def __str__(self): - try: - s = self.ce_format() - except Exception: - raise - - return s - - def __getitem__(self, indices): - return ArrayAccess(self, indices) - - def __neg__(self): - if isinstance(self, LiteralFloat): - return LiteralFloat(-self.value) - if isinstance(self, LiteralInt): - return LiteralInt(-self.value) - return Neg(self) - - def __add__(self, other): - other = as_cexpr(other) - if is_zero_cexpr(self): - return other - if is_zero_cexpr(other): - return self - if isinstance(other, Neg): - return Sub(self, other.arg) - return Add(self, other) - - def __radd__(self, other): - other = as_cexpr(other) - if is_zero_cexpr(self): - return other - if is_zero_cexpr(other): - return self - if isinstance(self, Neg): - return Sub(other, self.arg) - return Add(other, self) - - def __sub__(self, other): - other = as_cexpr(other) - if is_zero_cexpr(self): - return -other - if is_zero_cexpr(other): - return self - if isinstance(other, Neg): - return Add(self, other.arg) - return Sub(self, other) - - def __rsub__(self, other): - other = as_cexpr(other) - if is_zero_cexpr(self): - return other - if is_zero_cexpr(other): - return -self - if isinstance(self, Neg): - return Add(other, self.arg) - return Sub(other, self) - - def __mul__(self, other): - other = as_cexpr(other) - if is_zero_cexpr(self): - return self - if is_zero_cexpr(other): - return other - if is_one_cexpr(self): - return other - if is_one_cexpr(other): - return self - if is_negative_one_cexpr(other): - return Neg(self) - if is_negative_one_cexpr(self): - return Neg(other) - return Mul(self, other) - - def __rmul__(self, other): - other = as_cexpr(other) - if is_zero_cexpr(self): - return self - if is_zero_cexpr(other): - return other - if is_one_cexpr(self): - return other - if is_one_cexpr(other): - return self - if is_negative_one_cexpr(other): - return Neg(self) - if is_negative_one_cexpr(self): - return Neg(other) - return Mul(other, self) - - def __div__(self, other): - other = as_cexpr(other) - if is_zero_cexpr(other): - raise ValueError("Division by zero!") - if is_zero_cexpr(self): - return self - return Div(self, other) - - def __rdiv__(self, other): - other = as_cexpr(other) - if is_zero_cexpr(self): - raise ValueError("Division by zero!") - if is_zero_cexpr(other): - return other - return Div(other, self) - - # TODO: Error check types? Can't do that exactly as symbols here have no type. - __truediv__ = __div__ - __rtruediv__ = __rdiv__ - __floordiv__ = __div__ - __rfloordiv__ = __rdiv__ - - def __mod__(self, other): - other = as_cexpr(other) - if is_zero_cexpr(other): - raise ValueError("Division by zero!") - if is_zero_cexpr(self): - return self - return Mod(self, other) - - def __rmod__(self, other): - other = as_cexpr(other) - if is_zero_cexpr(self): - raise ValueError("Division by zero!") - if is_zero_cexpr(other): - return other - return Mod(other, self) - - -class CExprOperator(CExpr): - """Base class for all C expression operator.""" - - __slots__ = () - sideeffect = False - - -class CExprTerminal(CExpr): - """Base class for all C expression terminals.""" - - __slots__ = () - sideeffect = False - - -# CExprTerminal types - - -class CExprLiteral(CExprTerminal): - """A float or int literal value.""" - - __slots__ = () - precedence = PRECEDENCE.LITERAL - - -class Null(CExprLiteral): - """A null pointer literal.""" - - __slots__ = () - precedence = PRECEDENCE.LITERAL - - def ce_format(self, precision=None): - return "NULL" - - def __eq__(self, other): - return isinstance(other, Null) - - -class LiteralFloat(CExprLiteral): - """A floating point literal value.""" - - __slots__ = ("value",) - precedence = PRECEDENCE.LITERAL - - def __init__(self, value): - assert isinstance(value, (float, complex, int, np.number)) - self.value = value - - def ce_format(self, precision=None): - return format_float(self.value, precision) - - def __eq__(self, other): - return isinstance(other, LiteralFloat) and self.value == other.value - - def __bool__(self): - return bool(self.value) - - __nonzero__ = __bool__ - - def __float__(self): - return float(self.value) - - def flops(self): - return 0 - - -class LiteralInt(CExprLiteral): - """An integer literal value.""" - - __slots__ = ("value",) - precedence = PRECEDENCE.LITERAL - - def __init__(self, value): - assert isinstance(value, (int, np.number)) - self.value = value - - def ce_format(self, precision=None): - return str(self.value) - - def flops(self): - return 0 - - def __eq__(self, other): - return isinstance(other, LiteralInt) and self.value == other.value - - def __bool__(self): - return bool(self.value) - - __nonzero__ = __bool__ - - def __int__(self): - return int(self.value) - - def __float__(self): - return float(self.value) - - def __hash__(self): - return hash(self.ce_format()) - - -class LiteralBool(CExprLiteral): - """A boolean literal value.""" - - __slots__ = ("value",) - precedence = PRECEDENCE.LITERAL - - def __init__(self, value): - assert isinstance(value, (bool,)) - self.value = value - - def ce_format(self, precision=None): - return "true" if self.value else "false" - - def __eq__(self, other): - return isinstance(other, LiteralBool) and self.value == other.value - - def __bool__(self): - return bool(self.value) - - __nonzero__ = __bool__ - - -class LiteralString(CExprLiteral): - """A boolean literal value.""" - - __slots__ = ("value",) - precedence = PRECEDENCE.LITERAL - - def __init__(self, value): - assert isinstance(value, (str,)) - assert '"' not in value - self.value = value - - def ce_format(self, precision=None): - return '"%s"' % (self.value,) - - def __eq__(self, other): - return isinstance(other, LiteralString) and self.value == other.value - - -class Symbol(CExprTerminal): - """A named symbol.""" - - __slots__ = ("name",) - precedence = PRECEDENCE.SYMBOL - - def __init__(self, name): - assert isinstance(name, str) - self.name = name - - def ce_format(self, precision=None): - return self.name - - def flops(self): - return 0 - - def __eq__(self, other): - return isinstance(other, Symbol) and self.name == other.name - - def __hash__(self): - return hash(self.ce_format()) - - -# CExprOperator base classes - - -class UnaryOp(CExprOperator): - """Base class for unary operators.""" - - __slots__ = ("arg",) - - def __init__(self, arg): - self.arg = as_cexpr(arg) - - def __eq__(self, other): - return isinstance(other, type(self)) and self.arg == other.arg - - def flops(self): - raise NotImplementedError() - - -class PrefixUnaryOp(UnaryOp): - """Base class for prefix unary operators.""" - - __slots__ = () - - def ce_format(self, precision=None): - arg = self.arg.ce_format(precision) - if self.arg.precedence >= self.precedence: - arg = "(" + arg + ")" - return self.op + arg - - def __eq__(self, other): - return isinstance(other, type(self)) - - -class PostfixUnaryOp(UnaryOp): - """Base class for postfix unary operators.""" - - __slots__ = () - - def ce_format(self, precision=None): - arg = self.arg.ce_format(precision) - if self.arg.precedence >= self.precedence: - arg = "(" + arg + ")" - return arg + self.op - - def __eq__(self, other): - return isinstance(other, type(self)) - - -class BinOp(CExprOperator): - __slots__ = ("lhs", "rhs") - - def __init__(self, lhs, rhs): - self.lhs = as_cexpr(lhs) - self.rhs = as_cexpr(rhs) - - def ce_format(self, precision=None): - # Format children - lhs = self.lhs.ce_format(precision) - rhs = self.rhs.ce_format(precision) - - # Apply parentheses - if self.lhs.precedence >= self.precedence: - lhs = "(" + lhs + ")" - if self.rhs.precedence >= self.precedence: - rhs = "(" + rhs + ")" - - # Return combined string - return lhs + (" " + self.op + " ") + rhs - - def __eq__(self, other): - return ( - isinstance(other, type(self)) - and self.lhs == other.lhs - and self.rhs == other.rhs - ) - - def __hash__(self): - return hash(self.ce_format()) - - def flops(self): - return 1 + self.lhs.flops() + self.rhs.flops() - - -class NaryOp(CExprOperator): - """Base class for special n-ary operators.""" - - __slots__ = ("args",) - - def __init__(self, args): - self.args = [as_cexpr(arg) for arg in args] - - def ce_format(self, precision=None): - # Format children - args = [arg.ce_format(precision) for arg in self.args] - - # Apply parentheses - for i in range(len(args)): - if self.args[i].precedence >= self.precedence: - args[i] = "(" + args[i] + ")" - - # Return combined string - op = " " + self.op + " " - s = args[0] - for i in range(1, len(args)): - s += op + args[i] - return s - - def __eq__(self, other): - return ( - isinstance(other, type(self)) - and len(self.args) == len(other.args) - and all(a == b for a, b in zip(self.args, other.args)) - ) - - def flops(self): - flops = len(self.args) - 1 - for arg in self.args: - flops += arg.flops() - return flops - - -# CExpr unary operators - - -class AddressOf(PrefixUnaryOp): - __slots__ = () - precedence = PRECEDENCE.ADDRESSOF - op = "&" - - -class SizeOf(PrefixUnaryOp): - __slots__ = () - precedence = PRECEDENCE.SIZEOF - op = "sizeof" - - -class Neg(PrefixUnaryOp): - __slots__ = () - precedence = PRECEDENCE.NEG - op = "-" - - -class Pos(PrefixUnaryOp): - __slots__ = () - precedence = PRECEDENCE.POS - op = "+" - - -class Not(PrefixUnaryOp): - __slots__ = () - precedence = PRECEDENCE.NOT - op = "!" - - -class BitNot(PrefixUnaryOp): - __slots__ = () - precedence = PRECEDENCE.BIT_NOT - op = "~" - - -class PreIncrement(PrefixUnaryOp): - __slots__ = () - precedence = PRECEDENCE.PRE_INC - sideeffect = True - op = "++" - - -class PreDecrement(PrefixUnaryOp): - __slots__ = () - precedence = PRECEDENCE.PRE_DEC - sideeffect = True - op = "--" - - -class PostIncrement(PostfixUnaryOp): - __slots__ = () - precedence = PRECEDENCE.POST_INC - sideeffect = True - op = "++" - - -class PostDecrement(PostfixUnaryOp): - __slots__ = () - precedence = PRECEDENCE.POST_DEC - sideeffect = True - op = "--" - - -# CExpr binary operators - - -class Add(BinOp): - __slots__ = () - precedence = PRECEDENCE.ADD - op = "+" - - -class Sub(BinOp): - __slots__ = () - precedence = PRECEDENCE.SUB - op = "-" - - -class Mul(BinOp): - __slots__ = () - precedence = PRECEDENCE.MUL - op = "*" - - -class Div(BinOp): - __slots__ = () - precedence = PRECEDENCE.DIV - op = "/" - - -class Mod(BinOp): - __slots__ = () - precedence = PRECEDENCE.MOD - op = "%" - - -class EQ(BinOp): - __slots__ = () - precedence = PRECEDENCE.EQ - op = "==" - - -class NE(BinOp): - __slots__ = () - precedence = PRECEDENCE.NE - op = "!=" - - -class LT(BinOp): - __slots__ = () - precedence = PRECEDENCE.LT - op = "<" - - -class GT(BinOp): - __slots__ = () - precedence = PRECEDENCE.GT - op = ">" - - -class LE(BinOp): - __slots__ = () - precedence = PRECEDENCE.LE - op = "<=" - - -class GE(BinOp): - __slots__ = () - precedence = PRECEDENCE.GE - op = ">=" - - -class BitwiseAnd(BinOp): - __slots__ = () - precedence = PRECEDENCE.BITAND - op = "&" - - -class BitShiftR(BinOp): - __slots__ = () - precedence = PRECEDENCE.BITSHIFT - op = ">>" - - -class BitShiftL(BinOp): - __slots__ = () - precedence = PRECEDENCE.BITSHIFT - op = "<<" - - -class And(BinOp): - __slots__ = () - precedence = PRECEDENCE.AND - op = "&&" - - -class Or(BinOp): - __slots__ = () - precedence = PRECEDENCE.OR - op = "||" - - -class Sum(NaryOp): - """Sum of any number of operands.""" - - __slots__ = () - precedence = PRECEDENCE.ADD - op = "+" - - -class Product(NaryOp): - """Product of any number of operands.""" - - __slots__ = () - precedence = PRECEDENCE.MUL - op = "*" - - -class AssignOp(BinOp): - """Base class for assignment operators.""" - - __slots__ = () - precedence = PRECEDENCE.ASSIGN - sideeffect = True - - def __init__(self, lhs, rhs): - BinOp.__init__(self, as_cexpr_or_string_symbol(lhs), rhs) - - -class Assign(AssignOp): - __slots__ = () - op = "=" - - def flops(self): - return super().flops() - 1 - - -class AssignAdd(AssignOp): - __slots__ = () - op = "+=" - - -class AssignSub(AssignOp): - __slots__ = () - op = "-=" - - -class AssignMul(AssignOp): - __slots__ = () - op = "*=" - - -class AssignDiv(AssignOp): - __slots__ = () - op = "/=" - - -# CExpr operators - - -class FlattenedArray(object): - """Syntax carrying object only, will get translated on __getitem__ to ArrayAccess.""" - - __slots__ = ("array", "strides", "offset", "dims") - - def __init__(self, array, dummy=None, dims=None, strides=None, offset=None): - assert dummy is None, "Please use keyword arguments for strides or dims." - - # Typecheck array argument - if isinstance(array, ArrayDecl): - self.array = array.symbol - elif isinstance(array, Symbol): - self.array = array - else: - assert isinstance(array, str) - self.array = Symbol(array) - - # Allow expressions or literals as strides or dims and offset - if strides is None: - assert dims is not None, "Please provide either strides or dims." - assert isinstance(dims, (list, tuple)) - dims = tuple(as_cexpr(i) for i in dims) - self.dims = dims - n = len(dims) - literal_one = LiteralInt(1) - strides = [literal_one] * n - for i in range(n - 2, -1, -1): - s = strides[i + 1] - d = dims[i + 1] - if d == literal_one: - strides[i] = s - elif s == literal_one: - strides[i] = d - else: - strides[i] = d * s - else: - self.dims = None - assert isinstance(strides, (list, tuple)) - strides = tuple(as_cexpr(i) for i in strides) - self.strides = strides - self.offset = None if offset is None else as_cexpr(offset) - - def __getitem__(self, indices): - if not isinstance(indices, (list, tuple)): - indices = (indices,) - n = len(indices) - if n == 0: - # Handle scalar case, allowing dims=() and indices=() for A[0] - if len(self.strides) != 0: - raise ValueError("Empty indices for nonscalar array.") - flat = LiteralInt(0) - else: - i, s = (indices[0], self.strides[0]) - literal_one = LiteralInt(1) - flat = i if s == literal_one else s * i - if self.offset is not None: - flat = self.offset + flat - for i, s in zip(indices[1:n], self.strides[1:n]): - flat = flat + (i if s == literal_one else s * i) - # Delay applying ArrayAccess until we have all indices - if n == len(self.strides): - return ArrayAccess(self.array, flat) - else: - return FlattenedArray(self.array, strides=self.strides[n:], offset=flat) - - -class ArrayAccess(CExprOperator): - __slots__ = ("array", "indices") - precedence = PRECEDENCE.SUBSCRIPT - - def __init__(self, array, indices): - # Typecheck array argument - if isinstance(array, Symbol): - self.array = array - elif isinstance(array, ArrayDecl): - self.array = array.symbol - else: - raise ValueError("Unexpected array type %s." % (type(array).__name__,)) - - # Allow expressions or literals as indices - if not isinstance(indices, (list, tuple)): - indices = (indices,) - self.indices = tuple(as_cexpr_or_string_symbol(i) for i in indices) - - # Early error checking for negative array dimensions - if any(isinstance(i, int) and i < 0 for i in self.indices): - raise ValueError("Index value < 0.") - - # Additional dimension checks possible if we get an ArrayDecl instead of just a name - if isinstance(array, ArrayDecl): - if len(self.indices) != len(array.sizes): - raise ValueError("Invalid number of indices.") - ints = (int, LiteralInt) - if any( - (isinstance(i, ints) and isinstance(d, ints) and int(i) >= int(d)) - for i, d in zip(self.indices, array.sizes) - ): - raise ValueError("Index value >= array dimension.") - - def __getitem__(self, indices): - """Handle nested expr[i][j].""" - if isinstance(indices, list): - indices = tuple(indices) - elif not isinstance(indices, tuple): - indices = (indices,) - return ArrayAccess(self.array, self.indices + indices) - - def ce_format(self, precision=None): - s = self.array.ce_format(precision) - for index in self.indices: - s += "[" + index.ce_format(precision) + "]" - return s - - def __eq__(self, other): - return ( - isinstance(other, type(self)) - and self.array == other.array - and self.indices == other.indices - ) - - def __hash__(self): - return hash(self.ce_format()) - - def flops(self): - return 0 - - -class Conditional(CExprOperator): - __slots__ = ("condition", "true", "false") - precedence = PRECEDENCE.CONDITIONAL - - def __init__(self, condition, true, false): - self.condition = as_cexpr(condition) - self.true = as_cexpr(true) - self.false = as_cexpr(false) - - def ce_format(self, precision=None): - # Format children - c = self.condition.ce_format(precision) - t = self.true.ce_format(precision) - f = self.false.ce_format(precision) - - # Apply parentheses - if self.condition.precedence >= self.precedence: - c = "(" + c + ")" - if self.true.precedence >= self.precedence: - t = "(" + t + ")" - if self.false.precedence >= self.precedence: - f = "(" + f + ")" - - # Return combined string - return c + " ? " + t + " : " + f - - def __eq__(self, other): - return ( - isinstance(other, type(self)) - and self.condition == other.condition - and self.true == other.true - and self.false == other.false - ) - - def flops(self): - raise NotImplementedError("Flop count is not implemented for conditionals") - - -class Call(CExprOperator): - __slots__ = ("function", "arguments") - precedence = PRECEDENCE.CALL - sideeffect = True - - def __init__(self, function, arguments=None): - self.function = as_cexpr_or_string_symbol(function) - - # Accept None, single, or multiple arguments; literals or CExprs - if arguments is None: - arguments = () - elif not isinstance(arguments, (tuple, list)): - arguments = (arguments,) - self.arguments = [as_cexpr(arg) for arg in arguments] - - def ce_format(self, precision=None): - args = ", ".join(arg.ce_format(precision) for arg in self.arguments) - return self.function.ce_format(precision) + "(" + args + ")" - - def __eq__(self, other): - return ( - isinstance(other, type(self)) - and self.function == other.function - and self.arguments == other.arguments - ) - - def flops(self): - return 1 - - -def Sqrt(x): - return Call("sqrt", x) - - -# Conversion function to expression nodes - - -def _is_zero_valued(values): - if isinstance(values, (numbers.Integral, LiteralInt)): - return int(values) == 0 - elif isinstance(values, (numbers.Number, LiteralFloat)): - return float(values) == 0.0 - else: - return np.count_nonzero(values) == 0 - - -def as_cexpr(node): - """Typechecks and wraps an object as a valid CExpr. - - Accepts CExpr nodes, treats int and float as literals, and treats a - string as a symbol. - - """ - if isinstance(node, CExpr): - return node - elif isinstance(node, bool): - return LiteralBool(node) - elif isinstance(node, numbers.Integral): - return LiteralInt(node) - elif isinstance(node, numbers.Real): - return LiteralFloat(node) - elif isinstance(node, str): - raise RuntimeError("Got string for CExpr, this is ambiguous: %s" % (node,)) - else: - raise RuntimeError("Unexpected CExpr type %s:\n%s" % (type(node), str(node))) - - -def as_cexpr_or_string_symbol(node): - if isinstance(node, str): - return Symbol(node) - return as_cexpr(node) - - -def as_cexpr_or_literal(node): - if isinstance(node, str): - return LiteralString(node) - return as_cexpr(node) - - -def as_symbol(symbol): - if isinstance(symbol, str): - symbol = Symbol(symbol) - assert isinstance(symbol, Symbol) - return symbol - - -def flattened_indices(indices, shape): - """Return a flattened indexing expression. - - Given a tuple of indices and a shape tuple, return - a CNode expression for flattened indexing into multidimensional - array. - - Indices and shape entries can be int values, str symbol names, or - CNode expressions. - - """ - n = len(shape) - if n == 0: - # Scalar - return as_cexpr(0) - elif n == 1: - # Simple vector - return as_cexpr(indices[0]) - else: - # 2d or higher - strides = [None] * (n - 2) + [shape[-1], 1] - for i in range(n - 3, -1, -1): - strides[i] = Mul(shape[i + 1], strides[i + 1]) - result = indices[-1] - for i in range(n - 2, -1, -1): - result = Add(Mul(strides[i], indices[i]), result) - return result - - -# Base class for all statements - - -class CStatement(CNode): - """Base class for all C statements. - - Subtypes do _not_ define a 'precedence' class attribute. - - """ - - __slots__ = () - - # True if statement contains its own scope, false by default to be - # on the safe side - is_scoped = False - - def cs_format(self, precision=None): - """Return S: string | list(S) | Indented(S).""" - raise NotImplementedError( - "Missing implementation of cs_format() in CStatement." - ) - - def __str__(self): - try: - s = self.cs_format() - except Exception: - logger.error("Error in CStatement string formatting.") - raise - return format_indented_lines(s) - - def flops(self): - raise NotImplementedError() - - -# Statements - - -class VerbatimStatement(CStatement): - """Wraps a source code string to be pasted verbatim into the source code.""" - - __slots__ = ("codestring",) - is_scoped = False - - def __init__(self, codestring): - assert isinstance(codestring, str) - self.codestring = codestring - - def cs_format(self, precision=None): - return self.codestring - - def __eq__(self, other): - return isinstance(other, type(self)) and self.codestring == other.codestring - - -class Statement(CStatement): - """Make an expression into a statement.""" - - __slots__ = ("expr",) - is_scoped = False - - def __init__(self, expr): - self.expr = as_cexpr(expr) - - def cs_format(self, precision=None): - return self.expr.ce_format(precision) + ";" - - def __eq__(self, other): - return isinstance(other, type(self)) and self.expr == other.expr - - def flops(self): - # print(self.expr.rhs.flops()) - return self.expr.flops() - - -class StatementList(CStatement): - """A simple sequence of statements. No new scopes are introduced.""" - - __slots__ = ("statements",) - - def __init__(self, statements): - self.statements = [as_cstatement(st) for st in statements] - - @property - def is_scoped(self): - return all(st.is_scoped for st in self.statements) - - def cs_format(self, precision=None): - return [st.cs_format(precision) for st in self.statements] - - def __eq__(self, other): - return isinstance(other, type(self)) and self.statements == other.statements - - def flops(self): - flops = 0 - for statement in self.statements: - flops += statement.flops() - return flops - - -# Simple statements - - -class Return(CStatement): - __slots__ = ("value",) - is_scoped = True - - def __init__(self, value=None): - if value is None: - self.value = None - else: - self.value = as_cexpr(value) - - def cs_format(self, precision=None): - if self.value is None: - return "return;" - else: - return "return %s;" % (self.value.ce_format(precision),) - - def __eq__(self, other): - return isinstance(other, type(self)) and self.value == other.value - - def flops(self): - return 0 - - -class Comment(CStatement): - """Line comment(s) used for annotating the generated code with human readable remarks.""" - - __slots__ = ("comment",) - is_scoped = True - - def __init__(self, comment): - assert isinstance(comment, str) - self.comment = comment - - def cs_format(self, precision=None): - lines = self.comment.strip().split("\n") - return ["// " + line.strip() for line in lines] - - def __eq__(self, other): - return isinstance(other, type(self)) and self.comment == other.comment - - def flops(self): - return 0 - - -def NoOp(): - return Comment("Do nothing") - - -def commented_code_list(code, comments): - """Add comment to code list if the list is not empty.""" - if isinstance(code, CNode): - code = [code] - assert isinstance(code, list) - if code: - if not isinstance(comments, (list, tuple)): - comments = [comments] - comments = [Comment(c) for c in comments] - code = comments + code - return code - - -class Pragma(CStatement): - """Pragma comments used for compiler-specific annotations.""" - - __slots__ = ("comment",) - is_scoped = True - - def __init__(self, comment): - assert isinstance(comment, str) - self.comment = comment - - def cs_format(self, precision=None): - assert "\n" not in self.comment - return "#pragma " + self.comment - - def __eq__(self, other): - return isinstance(other, type(self)) and self.comment == other.comment - - def flops(self): - return 0 - - -# Type and variable declarations - - -class VariableDecl(CStatement): - """Declare a variable, optionally define initial value.""" - - __slots__ = ("typename", "symbol", "value") - is_scoped = False - - def __init__(self, typename, symbol, value=None): - # No type system yet, just using strings - assert isinstance(typename, str) - self.typename = typename - - # Allow Symbol or just a string - self.symbol = as_symbol(symbol) - - if value is not None: - value = as_cexpr(value) - self.value = value - - def cs_format(self, precision=None): - code = self.typename + " " + self.symbol.name - if self.value is not None: - code += " = " + self.value.ce_format(precision) - return code + ";" - - def __eq__(self, other): - return ( - isinstance(other, type(self)) - and self.typename == other.typename - and self.symbol == other.symbol - and self.value == other.value - ) - - def flops(self): - if self.value is not None: - return self.value.flops() - else: - return 0 - - -def leftover(size, padlen): - """Return minimum integer to add to size to make it divisible by padlen.""" - return (padlen - (size % padlen)) % padlen - - -def pad_dim(dim, padlen): - """Make dim divisible by padlen.""" - return ((dim + padlen - 1) // padlen) * padlen - - -def pad_innermost_dim(shape, padlen): - """Make the last dimension in shape divisible by padlen.""" - if not shape: - return () - shape = list(shape) - if padlen: - shape[-1] = pad_dim(shape[-1], padlen) - return tuple(shape) - - -def build_1d_initializer_list(values, formatter, padlen=0, precision=None): - """Return a list containing a single line formatted like '{ 0.0, 1.0, 2.0 }'.""" - if formatter == str: - - def formatter(x, p): - return str(x) - - tokens = ["{ "] - if np.prod(values.shape) > 0: - sep = ", " - fvalues = [formatter(v, precision) for v in values] - for v in fvalues[:-1]: - tokens.append(v) - tokens.append(sep) - tokens.append(fvalues[-1]) - if padlen: - # Add padding - zero = formatter(values.dtype.type(0), precision) - for i in range(leftover(len(values), padlen)): - tokens.append(sep) - tokens.append(zero) - tokens += " }" - return "".join(tokens) - - -def build_initializer_lists(values, sizes, level, formatter, padlen=0, precision=None): - """Return a list of lines with initializer lists for a multidimensional array. - - Example output:: - - { { 0.0, 0.1 }, - { 1.0, 1.1 } } - - """ - if formatter == str: - - def formatter(x, p): - return str(x) - - values = np.asarray(values) - assert np.prod(values.shape) == np.prod(sizes) - assert len(sizes) > 0 - assert len(values.shape) > 0 - assert len(sizes) == len(values.shape) - assert np.all(values.shape == sizes) - - r = len(sizes) - assert r > 0 - if r == 1: - return [ - build_1d_initializer_list( - values, formatter, padlen=padlen, precision=precision - ) - ] - else: - # Render all sublists - parts = [] - for val in values: - sublist = build_initializer_lists( - val, sizes[1:], level + 1, formatter, padlen=padlen, precision=precision - ) - parts.append(sublist) - # Add comma after last line in each part except the last one - for part in parts[:-1]: - part[-1] += "," - # Collect all lines in flat list - lines = [] - for part in parts: - lines.extend(part) - # Enclose lines in '{ ' and ' }' and indent lines in between - lines[0] = "{ " + lines[0] - for i in range(1, len(lines)): - lines[i] = " " + lines[i] - lines[-1] += " }" - return lines - - -class ArrayDecl(CStatement): - """A declaration or definition of an array. - - Note that just setting values=0 is sufficient to initialize the - entire array to zero. - - Otherwise use nested lists of lists to represent multidimensional - array values to initialize to. - - """ - - __slots__ = ("typename", "symbol", "sizes", "padlen", "values") - is_scoped = False - - def __init__(self, typename, symbol, sizes=None, values=None, padlen=0): - assert isinstance(typename, str) - self.typename = typename - - if isinstance(symbol, FlattenedArray): - if sizes is None: - assert symbol.dims is not None - sizes = symbol.dims - elif symbol.dims is not None: - assert symbol.dims == sizes - self.symbol = symbol.array - else: - self.symbol = as_symbol(symbol) - - if isinstance(sizes, int): - sizes = (sizes,) - self.sizes = tuple(sizes) - - # NB! No type checking, assuming nested lists of literal values. Not applying as_cexpr. - if isinstance(values, (list, tuple)): - self.values = np.asarray(values) - else: - self.values = values - - self.padlen = padlen - - def cs_format(self, precision=None): - if not all(self.sizes): - raise RuntimeError( - f"Detected an array {self.symbol} dimension of zero. This is not valid in C." - ) - - # Pad innermost array dimension - sizes = pad_innermost_dim(self.sizes, self.padlen) - - # Add brackets - brackets = "".join("[%d]" % n for n in sizes) - - # Join declaration - decl = self.typename + " " + self.symbol.name + brackets - - if self.values is None: - # Undefined initial values - return decl + ";" - elif _is_zero_valued(self.values): - # Zero initial values - # (NB! C style zero initialization, not sure about other target languages) - nb = len(sizes) - lbr = "{" * nb - rbr = "}" * nb - return f"{decl} = {lbr} 0 {rbr};" - else: - # Construct initializer lists for arbitrary multidimensional array values - if self.values.dtype.kind == "f": - formatter = format_float - elif self.values.dtype.kind == "i": - formatter = format_int - elif self.values.dtype == np.bool_: - - def format_bool(x, precision=None): - return "true" if x is True else "false" - - formatter = format_bool - else: - formatter = format_value - initializer_lists = build_initializer_lists( - self.values, - self.sizes, - 0, - formatter, - padlen=self.padlen, - precision=precision, - ) - if len(initializer_lists) == 1: - return decl + " = " + initializer_lists[0] + ";" - else: - initializer_lists[-1] += ";" # Close statement on final line - return (decl + " =", Indented(initializer_lists)) - - def __eq__(self, other): - attributes = ("typename", "symbol", "sizes", "padlen", "values") - return isinstance(other, type(self)) and all( - getattr(self, name) == getattr(self, name) for name in attributes - ) - - def flops(self): - return 0 - - -# Scoped statements - - -class Scope(CStatement): - __slots__ = ("body",) - is_scoped = True - - def __init__(self, body): - self.body = as_cstatement(body) - - def cs_format(self, precision=None): - return ("{", Indented(self.body.cs_format(precision)), "}") - - def __eq__(self, other): - return isinstance(other, type(self)) and self.body == other.body - - def flops(self): - return 0 - - -def is_simple_inner_loop(code): - if isinstance(code, ForRange) and is_simple_inner_loop(code.body): - return True - if isinstance(code, Statement) and isinstance(code.expr, AssignOp): - return True - return False - - -class ForRange(CStatement): - """Slightly higher-level for loop assuming incrementing an index over a range.""" - - __slots__ = ("index", "begin", "end", "body", "index_type") - is_scoped = True - - def __init__(self, index, begin, end, body, index_type="int"): - self.index = as_cexpr_or_string_symbol(index) - self.begin = as_cexpr(begin) - self.end = as_cexpr(end) - self.body = as_cstatement(body) - self.index_type = index_type - - def cs_format(self, precision=None): - indextype = self.index_type - index = self.index.ce_format(precision) - begin = self.begin.ce_format(precision) - end = self.end.ce_format(precision) - - init = indextype + " " + index + " = " + begin - check = index + " < " + end - update = "++" + index - - prelude = "for (" + init + "; " + check + "; " + update + ")" - body = Indented(self.body.cs_format(precision)) - - # Reduce size of code with lots of simple loops by dropping {} in obviously safe cases - if is_simple_inner_loop(self.body): - code = (prelude, body) - else: - code = (prelude, "{", body, "}") - - return code - - def __eq__(self, other): - attributes = ("index", "begin", "end", "body", "index_type") - return isinstance(other, type(self)) and all( - getattr(self, name) == getattr(self, name) for name in attributes - ) - - def flops(self): - return (self.end.value - self.begin.value) * self.body.flops() - - -# Conversion function to statement nodes - - -def as_cstatement(node): - """Perform type checking on node and wrap in a suitable statement type if necessary.""" - if isinstance(node, StatementList) and len(node.statements) == 1: - # Cleans up the expression tree a bit - return node.statements[0] - elif isinstance(node, CStatement): - # No-op - return node - elif isinstance(node, CExprOperator): - if node.sideeffect: - # Special case for using assignment expressions as statements - return Statement(node) - else: - raise RuntimeError( - "Trying to create a statement of CExprOperator type %s:\n%s" - % (type(node), str(node)) - ) - elif isinstance(node, list): - # Convenience case for list of statements - if len(node) == 1: - # Cleans up the expression tree a bit - return as_cstatement(node[0]) - else: - return StatementList(node) - elif isinstance(node, str): - # Backdoor for flexibility in code generation to allow verbatim pasted statements - return VerbatimStatement(node) - else: - raise RuntimeError( - "Unexpected CStatement type %s:\n%s" % (type(node), str(node)) - ) diff --git a/ffcx/codegeneration/C/expressions.py b/ffcx/codegeneration/C/expressions.py index a994b8c5c..530c0ab2a 100644 --- a/ffcx/codegeneration/C/expressions.py +++ b/ffcx/codegeneration/C/expressions.py @@ -9,7 +9,7 @@ from ffcx.codegeneration.C import expressions_template from ffcx.codegeneration.expression_generator import ExpressionGenerator from ffcx.codegeneration.backend import FFCXBackend -from ffcx.codegeneration.C.format_lines import format_indented_lines +from ffcx.codegeneration.C.c_implementation import CFormatter from ffcx.naming import cdtype_to_numpy, scalar_to_value_type logger = logging.getLogger("ffcx") @@ -36,8 +36,8 @@ def generator(ir, options): parts = eg.generate() - body = format_indented_lines(parts.cs_format(), 1) - d["tabulate_expression"] = body + CF = CFormatter(options["scalar_type"]) + d["tabulate_expression"] = CF.c_format(parts) if len(ir.original_coefficient_positions) > 0: d["original_coefficient_positions"] = f"original_coefficient_positions_{ir.name}" diff --git a/ffcx/codegeneration/C/format_lines.py b/ffcx/codegeneration/C/format_lines.py deleted file mode 100644 index 1fcfaee92..000000000 --- a/ffcx/codegeneration/C/format_lines.py +++ /dev/null @@ -1,60 +0,0 @@ -# This file is part of FFCx.(https://www.fenicsproject.org) -# -# SPDX-License-Identifier: LGPL-3.0-or-later -"""Tools for indentation-aware code string stitching. - -When formatting an AST into a string, it's better to collect lists of -snippets and then join them than adding the pieces continually, which -gives O(n^2) behaviour w.r.t. AST size n. - -""" - - -class Indented(object): - """Class to mark a collection of snippets for indentation. - - This way nested indentations can be handled by adding the prefix - spaces only once to each line instead of splitting and indenting - substrings repeatedly. - - """ - - # Try to keep memory overhead low: - __slots__ = ("body", ) - - def __init__(self, body): - # Body can be any valid snippet format - self.body = body - - -def iter_indented_lines(snippets, level=0): - """Iterate over indented string lines from a snippets data structure. - - The snippets object can be built recursively using the following types: - - - str: Split and yield as one line at a time indented to the appropriate level. - - - Indented: Yield the lines within this object indented by one level. - - - tuple,list: Yield lines from recursive application of this function to list items. - - """ - tabsize = 2 - indentation = ' ' * (tabsize * level) - if isinstance(snippets, str): - for line in snippets.split("\n"): - yield indentation + line - elif isinstance(snippets, Indented): - for line in iter_indented_lines(snippets.body, level + 1): - yield line - elif isinstance(snippets, (tuple, list)): - for part in snippets: - for line in iter_indented_lines(part, level): - yield line - else: - raise RuntimeError("Unexpected type %s:\n%s" % (type(snippets), str(snippets))) - - -def format_indented_lines(snippets, level=0): - """Format recursive sequences of indented lines as one string.""" - return "\n".join(iter_indented_lines(snippets, level)) diff --git a/ffcx/codegeneration/C/format_value.py b/ffcx/codegeneration/C/format_value.py deleted file mode 100644 index 0f057a3df..000000000 --- a/ffcx/codegeneration/C/format_value.py +++ /dev/null @@ -1,59 +0,0 @@ -# This file is part of FFCx.(https://www.fenicsproject.org) -# -# SPDX-License-Identifier: LGPL-3.0-or-later - -import numbers -import re - -_subs = ( - # Remove 0s after e+ or e- - (re.compile(r"e[\+]0*(.)"), r"e\1"), - (re.compile(r"e[\-]0*(.)"), r"e-\1"), -) - - -def format_float(x, precision=None): - """Format a float value according to given precision.""" - global _subs - - if precision: - if isinstance(x, complex): - s = "({:.{prec}}+I*{:.{prec}})".format(x.real, x.imag, prec=precision) - elif isinstance(x, float): - s = "{:.{prec}}".format(x, prec=precision) - else: - s = "{:.{prec}}".format(float(x), prec=precision) - else: - s = repr(float(x)) - for r, v in _subs: - s = r.sub(v, s) - return s - - -def format_int(x, precision=None): - return str(x) - - -def format_value(value, precision=None): - """Format a literal value as string. - - - float: Formatted according to current precision configuration. - - - int: Formatted as regular base 10 int literal. - - - str: Wrapped in "quotes". - - """ - if isinstance(value, numbers.Real): - return format_float(float(value), precision=precision) - elif isinstance(value, numbers.Integral): - return format_int(int(value)) - elif isinstance(value, str): - # FIXME: Is this ever used? - assert '"' not in value - return '"' + value + '"' - elif hasattr(value, "ce_format"): - return value.ce_format() - else: - raise RuntimeError("Unexpected type %s:\n%s" % (type(value), - str(value))) diff --git a/ffcx/codegeneration/C/integrals.py b/ffcx/codegeneration/C/integrals.py index b1b73c525..386e3ed40 100644 --- a/ffcx/codegeneration/C/integrals.py +++ b/ffcx/codegeneration/C/integrals.py @@ -9,7 +9,7 @@ from ffcx.codegeneration.integral_generator import IntegralGenerator from ffcx.codegeneration.C import integrals_template as ufcx_integrals from ffcx.codegeneration.backend import FFCXBackend -from ffcx.codegeneration.C.format_lines import format_indented_lines +from ffcx.codegeneration.C.c_implementation import CFormatter from ffcx.naming import cdtype_to_numpy, scalar_to_value_type logger = logging.getLogger("ffcx") @@ -36,7 +36,8 @@ def generator(ir, options): parts = ig.generate() # Format code as string - body = format_indented_lines(parts.cs_format(ir.precision), 1) + CF = CFormatter(options["scalar_type"], ir.precision) + body = CF.c_format(parts) # Generate generic FFCx code snippets and add specific parts code = {} diff --git a/ffcx/codegeneration/C/precedence.py b/ffcx/codegeneration/C/precedence.py deleted file mode 100644 index a54c8c0f8..000000000 --- a/ffcx/codegeneration/C/precedence.py +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright (C) 2011-2017 Martin Sandve Alnæs -# -# This file is part of FFCx.(https://www.fenicsproject.org) -# -# SPDX-License-Identifier: LGPL-3.0-or-later - - -class PRECEDENCE: - """An enum-like class for C operator precedence levels.""" - - HIGHEST = 0 - LITERAL = 0 - SYMBOL = 0 - - # SCOPE = 1 - - POST_INC = 2 - POST_DEC = 2 - CALL = 2 - SUBSCRIPT = 2 - # MEMBER = 2 - # PTR_MEMBER = 2 - - PRE_INC = 3 - PRE_DEC = 3 - NOT = 3 - BIT_NOT = 3 - POS = 3 - NEG = 3 - DEREFERENCE = 3 - ADDRESSOF = 3 - SIZEOF = 3 - - MUL = 4 - DIV = 4 - MOD = 4 - - ADD = 5 - SUB = 5 - - BITSHIFT = 6 - - LT = 7 - LE = 7 - GT = 7 - GE = 7 - - EQ = 8 - NE = 8 - - BITAND = 9 - - AND = 11 - OR = 12 - - CONDITIONAL = 13 - ASSIGN = 13 - - # COMMA = 14 - - LOWEST = 15 diff --git a/ffcx/codegeneration/C/ufl_to_cnodes.py b/ffcx/codegeneration/C/ufl_to_cnodes.py deleted file mode 100644 index 85d9faa94..000000000 --- a/ffcx/codegeneration/C/ufl_to_cnodes.py +++ /dev/null @@ -1,295 +0,0 @@ -# Copyright (C) 2011-2017 Martin Sandve Alnæs -# -# This file is part of FFCx.(https://www.fenicsproject.org) -# -# SPDX-License-Identifier: LGPL-3.0-or-later -"""Tools for C/C++ expression formatting.""" - -import logging - -import ufl - -logger = logging.getLogger("ffcx") - -# Table of handled math functions for different scalar types - -math_table = {'double': {'sqrt': 'sqrt', - 'abs': 'fabs', - 'cos': 'cos', - 'sin': 'sin', - 'tan': 'tan', - 'acos': 'acos', - 'asin': 'asin', - 'atan': 'atan', - 'cosh': 'cosh', - 'sinh': 'sinh', - 'tanh': 'tanh', - 'acosh': 'acosh', - 'asinh': 'asinh', - 'atanh': 'atanh', - 'power': 'pow', - 'exp': 'exp', - 'ln': 'log', - 'erf': 'erf', - 'atan_2': 'atan2', - 'min_value': 'fmin', - 'max_value': 'fmax'}, - - 'float': {'sqrt': 'sqrtf', - 'abs': 'fabsf', - 'cos': 'cosf', - 'sin': 'sinf', - 'tan': 'tanf', - 'acos': 'acosf', - 'asin': 'asinf', - 'atan': 'atanf', - 'cosh': 'coshf', - 'sinh': 'sinhf', - 'tanh': 'tanhf', - 'acosh': 'acoshf', - 'asinh': 'asinhf', - 'atanh': 'atanhf', - 'power': 'powf', - 'exp': 'expf', - 'ln': 'logf', - 'erf': 'erff', - 'atan_2': 'atan2f', - 'min_value': 'fminf', - 'max_value': 'fmaxf'}, - - 'long double': {'sqrt': 'sqrtl', - 'abs': 'fabsl', - 'cos': 'cosl', - 'sin': 'sinl', - 'tan': 'tanl', - 'acos': 'acosl', - 'asin': 'asinl', - 'atan': 'atanl', - 'cosh': 'coshl', - 'sinh': 'sinhl', - 'tanh': 'tanhl', - 'acosh': 'acoshl', - 'asinh': 'asinhl', - 'atanh': 'atanhl', - 'power': 'powl', - 'exp': 'expl', - 'ln': 'logl', - 'erf': 'erfl', - 'atan_2': 'atan2l', - 'min_value': 'fminl', - 'max_value': 'fmaxl'}, - - 'double _Complex': {'sqrt': 'csqrt', - 'abs': 'cabs', - 'cos': 'ccos', - 'sin': 'csin', - 'tan': 'ctan', - 'acos': 'cacos', - 'asin': 'casin', - 'atan': 'catan', - 'cosh': 'ccosh', - 'sinh': 'csinh', - 'tanh': 'ctanh', - 'acosh': 'cacosh', - 'asinh': 'casinh', - 'atanh': 'catanh', - 'power': 'cpow', - 'exp': 'cexp', - 'ln': 'clog', - 'real': 'creal', - 'imag': 'cimag', - 'conj': 'conj', - 'max_value': 'fmax', - 'min_value': 'fmin'}, - - 'float _Complex': {'sqrt': 'csqrtf', - 'abs': 'cabsf', - 'cos': 'ccosf', - 'sin': 'csinf', - 'tan': 'ctanf', - 'acos': 'cacosf', - 'asin': 'casinf', - 'atan': 'catanf', - 'cosh': 'ccoshf', - 'sinh': 'csinhf', - 'tanh': 'ctanhf', - 'acosh': 'cacoshf', - 'asinh': 'casinhf', - 'atanh': 'catanhf', - 'power': 'cpowf', - 'exp': 'cexpf', - 'ln': 'clogf', - 'real': 'crealf', - 'imag': 'cimagf', - 'conj': 'conjf', - 'max_value': 'fmaxf', - 'min_value': 'fminf'}} - - -class UFL2CNodesTranslatorCpp(object): - """UFL to CNodes translator class.""" - - def __init__(self, language, scalar_type="double"): - self.L = language - self.force_floats = False - self.enable_strength_reduction = False - self.scalar_type = scalar_type - - # Lookup table for handler to call when the "get" method (below) is - # called, depending on the first argument type. - self.call_lookup = {ufl.constantvalue.IntValue: self.int_value, - ufl.constantvalue.FloatValue: self.float_value, - ufl.constantvalue.ComplexValue: self.complex_value, - ufl.constantvalue.Zero: self.zero, - ufl.algebra.Product: self.product, - ufl.algebra.Sum: self.sum, - ufl.algebra.Division: self.division, - ufl.algebra.Abs: self._cmath, - ufl.algebra.Power: self._cmath, - ufl.algebra.Real: self._cmath, - ufl.algebra.Imag: self._cmath, - ufl.algebra.Conj: self._cmath, - ufl.classes.GT: self.gt, - ufl.classes.GE: self.ge, - ufl.classes.EQ: self.eq, - ufl.classes.NE: self.ne, - ufl.classes.LT: self.lt, - ufl.classes.LE: self.le, - ufl.classes.AndCondition: self.and_condition, - ufl.classes.OrCondition: self.or_condition, - ufl.classes.NotCondition: self.not_condition, - ufl.classes.Conditional: self.conditional, - ufl.classes.MinValue: self._cmath, - ufl.classes.MaxValue: self._cmath, - ufl.mathfunctions.Sqrt: self._cmath, - ufl.mathfunctions.Ln: self._cmath, - ufl.mathfunctions.Exp: self._cmath, - ufl.mathfunctions.Cos: self._cmath, - ufl.mathfunctions.Sin: self._cmath, - ufl.mathfunctions.Tan: self._cmath, - ufl.mathfunctions.Cosh: self._cmath, - ufl.mathfunctions.Sinh: self._cmath, - ufl.mathfunctions.Tanh: self._cmath, - ufl.mathfunctions.Acos: self._cmath, - ufl.mathfunctions.Asin: self._cmath, - ufl.mathfunctions.Atan: self._cmath, - ufl.mathfunctions.Erf: self._cmath, - ufl.mathfunctions.Atan2: self._cmath, - ufl.mathfunctions.MathFunction: self.math_function, - ufl.mathfunctions.BesselJ: self.bessel_j, - ufl.mathfunctions.BesselY: self.bessel_y} - - def get(self, o, *args): - # Call appropriate handler, depending on the type of o - otype = type(o) - if otype in self.call_lookup: - return self.call_lookup[otype](o, *args) - else: - raise RuntimeError(f"Missing C formatting rule for expr type {otype}.") - - def expr(self, o, *args): - """Raise generic fallback with error message for missing rules.""" - raise RuntimeError(f"Missing C formatting rule for expr type {o._ufl_class_}.") - - # === Formatting rules for scalar literals === - - def zero(self, o): - return self.L.LiteralFloat(0.0) - - def float_value(self, o): - return self.L.LiteralFloat(float(o)) - - def int_value(self, o): - if self.force_floats: - return self.float_value(o) - return self.L.LiteralInt(int(o)) - - def complex_value(self, o): - return self.L.LiteralFloat(o.value()) - - # === Formatting rules for arithmetic operators === - - def sum(self, o, a, b): - return self.L.Add(a, b) - - def product(self, o, a, b): - return self.L.Mul(a, b) - - def division(self, o, a, b): - if self.enable_strength_reduction: - return self.L.Mul(a, self.L.Div(1.0, b)) - else: - return self.L.Div(a, b) - - # === Formatting rules for conditional expressions === - - def conditional(self, o, c, t, f): - return self.L.Conditional(c, t, f) - - def eq(self, o, a, b): - return self.L.EQ(a, b) - - def ne(self, o, a, b): - return self.L.NE(a, b) - - def le(self, o, a, b): - return self.L.LE(a, b) - - def ge(self, o, a, b): - return self.L.GE(a, b) - - def lt(self, o, a, b): - return self.L.LT(a, b) - - def gt(self, o, a, b): - return self.L.GT(a, b) - - def and_condition(self, o, a, b): - return self.L.And(a, b) - - def or_condition(self, o, a, b): - return self.L.Or(a, b) - - def not_condition(self, o, a): - return self.L.Not(a) - - # === Formatting rules for cmath functions === - - def math_function(self, o, op): - # Fallback for unhandled MathFunction subclass: - # attempting to just call it. - return self.L.Call(o._name, op) - - def _cmath(self, o, *args): - k = o._ufl_handler_name_ - try: - name = math_table[self.scalar_type].get(k) - except Exception as e: - raise type(e)("Math function not found:", self.scalar_type, k) - if name is None: - raise RuntimeError("Not supported in current scalar mode") - return self.L.Call(name, args) - - # === Formatting rules for bessel functions === - # Some Bessel functions exist in gcc, as XSI extensions - # but not all. - - def bessel_j(self, o, n, v): - assert "complex" not in self.scalar_type - n = int(float(n)) - if n == 0: - return self.L.Call("j0", v) - elif n == 1: - return self.L.Call("j1", v) - else: - return self.L.Call("jn", (n, v)) - - def bessel_y(self, o, n, v): - assert "complex" not in self.scalar_type - n = int(float(n)) - if n == 0: - return self.L.Call("y0", v) - elif n == 1: - return self.L.Call("y1", v) - else: - return self.L.Call("yn", (n, v)) diff --git a/ffcx/codegeneration/access.py b/ffcx/codegeneration/access.py index 9d1dcc985..b047251ee 100644 --- a/ffcx/codegeneration/access.py +++ b/ffcx/codegeneration/access.py @@ -11,6 +11,7 @@ import ufl import basix.ufl from ffcx.element_interface import convert_element +import ffcx.codegeneration.lnodes as L logger = logging.getLogger("ffcx") @@ -18,12 +19,11 @@ class FFCXBackendAccess(object): """FFCx specific cpp formatter class.""" - def __init__(self, ir, language, symbols, options): + def __init__(self, ir, symbols, options): # Store ir and options self.entitytype = ir.entitytype self.integral_type = ir.integral_type - self.language = language self.symbols = symbols self.options = options @@ -138,7 +138,6 @@ def cell_coordinate(self, e, mt, tabledata, num_points): raise RuntimeError("Expecting reference cell coordinate to be symbolically rewritten.") def facet_coordinate(self, e, mt, tabledata, num_points): - L = self.language if mt.global_derivatives: raise RuntimeError("Not expecting derivatives of FacetCoordinate.") if mt.local_derivatives: @@ -178,36 +177,32 @@ def jacobian(self, e, mt, tabledata, num_points): return self.symbols.J_component(mt) def reference_cell_volume(self, e, mt, tabledata, access): - L = self.language cellname = ufl.domain.extract_unique_domain(mt.terminal).ufl_cell().cellname() if cellname in ("interval", "triangle", "tetrahedron", "quadrilateral", "hexahedron"): - return L.Symbol(f"{cellname}_reference_cell_volume") + return L.Symbol(f"{cellname}_reference_cell_volume", dtype=L.DataType.REAL) else: raise RuntimeError(f"Unhandled cell types {cellname}.") def reference_facet_volume(self, e, mt, tabledata, access): - L = self.language cellname = ufl.domain.extract_unique_domain(mt.terminal).ufl_cell().cellname() if cellname in ("interval", "triangle", "tetrahedron", "quadrilateral", "hexahedron"): - return L.Symbol(f"{cellname}_reference_facet_volume") + return L.Symbol(f"{cellname}_reference_facet_volume", dtype=L.DataType.REAL) else: raise RuntimeError(f"Unhandled cell types {cellname}.") def reference_normal(self, e, mt, tabledata, access): - L = self.language cellname = ufl.domain.extract_unique_domain(mt.terminal).ufl_cell().cellname() if cellname in ("interval", "triangle", "tetrahedron", "quadrilateral", "hexahedron"): - table = L.Symbol(f"{cellname}_reference_facet_normals") + table = L.Symbol(f"{cellname}_reference_facet_normals", dtype=L.DataType.REAL) facet = self.symbols.entity("facet", mt.restriction) return table[facet][mt.component[0]] else: raise RuntimeError(f"Unhandled cell types {cellname}.") def cell_facet_jacobian(self, e, mt, tabledata, num_points): - L = self.language cellname = ufl.domain.extract_unique_domain(mt.terminal).ufl_cell().cellname() if cellname in ("triangle", "tetrahedron", "quadrilateral", "hexahedron"): - table = L.Symbol(f"{cellname}_reference_facet_jacobian") + table = L.Symbol(f"{cellname}_reference_facet_jacobian", dtype=L.DataType.REAL) facet = self.symbols.entity("facet", mt.restriction) return table[facet][mt.component[0]][mt.component[1]] elif cellname == "interval": @@ -216,10 +211,9 @@ def cell_facet_jacobian(self, e, mt, tabledata, num_points): raise RuntimeError(f"Unhandled cell types {cellname}.") def reference_cell_edge_vectors(self, e, mt, tabledata, num_points): - L = self.language cellname = ufl.domain.extract_unique_domain(mt.terminal).ufl_cell().cellname() if cellname in ("triangle", "tetrahedron", "quadrilateral", "hexahedron"): - table = L.Symbol(f"{cellname}_reference_edge_vectors") + table = L.Symbol(f"{cellname}_reference_edge_vectors", dtype=L.DataType.REAL) return table[mt.component[0]][mt.component[1]] elif cellname == "interval": raise RuntimeError("The reference cell edge vectors doesn't make sense for interval cell.") @@ -227,10 +221,9 @@ def reference_cell_edge_vectors(self, e, mt, tabledata, num_points): raise RuntimeError(f"Unhandled cell types {cellname}.") def reference_facet_edge_vectors(self, e, mt, tabledata, num_points): - L = self.language cellname = ufl.domain.extract_unique_domain(mt.terminal).ufl_cell().cellname() if cellname in ("tetrahedron", "hexahedron"): - table = L.Symbol(f"{cellname}_reference_edge_vectors") + table = L.Symbol(f"{cellname}_reference_edge_vectors", dtype=L.DataType.REAL) facet = self.symbols.entity("facet", mt.restriction) return table[facet][mt.component[0]][mt.component[1]] elif cellname in ("interval", "triangle", "quadrilateral"): @@ -241,12 +234,11 @@ def reference_facet_edge_vectors(self, e, mt, tabledata, num_points): raise RuntimeError(f"Unhandled cell types {cellname}.") def facet_orientation(self, e, mt, tabledata, num_points): - L = self.language cellname = ufl.domain.extract_unique_domain(mt.terminal).ufl_cell().cellname() if cellname not in ("interval", "triangle", "tetrahedron"): raise RuntimeError(f"Unhandled cell types {cellname}.") - table = L.Symbol(f"{cellname}_facet_orientations") + table = L.Symbol(f"{cellname}_facet_orientations", dtype=L.DataType.INT) facet = self.symbols.entity("facet", mt.restriction) return table[facet] @@ -313,7 +305,6 @@ def cell_edge_vectors(self, e, mt, tabledata, num_points): ) def facet_edge_vectors(self, e, mt, tabledata, num_points): - L = self.language # Get properties of domain domain = ufl.domain.extract_unique_domain(mt.terminal) @@ -342,7 +333,7 @@ def facet_edge_vectors(self, e, mt, tabledata, num_points): # Get edge vertices facet = self.symbols.entity("facet", mt.restriction) facet_edge = mt.component[0] - facet_edge_vertices = L.Symbol(f"{cellname}_facet_edge_vertices") + facet_edge_vertices = L.Symbol(f"{cellname}_facet_edge_vertices", dtype=L.DataType.INT) vertex0 = facet_edge_vertices[facet][facet_edge][0] vertex1 = facet_edge_vertices[facet][facet_edge][1] diff --git a/ffcx/codegeneration/backend.py b/ffcx/codegeneration/backend.py index 0b9c5d8d2..b874196e4 100644 --- a/ffcx/codegeneration/backend.py +++ b/ffcx/codegeneration/backend.py @@ -5,11 +5,7 @@ # SPDX-License-Identifier: LGPL-3.0-or-later """Collection of FFCx specific pieces for the code generation phase.""" -import types - -import ffcx.codegeneration.C.cnodes from ffcx.codegeneration.access import FFCXBackendAccess -from ffcx.codegeneration.C.ufl_to_cnodes import UFL2CNodesTranslatorCpp from ffcx.codegeneration.definitions import FFCXBackendDefinitions from ffcx.codegeneration.symbols import FFCXBackendSymbols @@ -19,19 +15,12 @@ class FFCXBackend(object): def __init__(self, ir, options): - # This is the seam where cnodes/C is chosen for the FFCx backend - self.language: types.ModuleType = ffcx.codegeneration.C.cnodes - scalar_type = options["scalar_type"] - self.ufl_to_language = UFL2CNodesTranslatorCpp(self.language, scalar_type) - coefficient_numbering = ir.coefficient_numbering coefficient_offsets = ir.coefficient_offsets original_constant_offsets = ir.original_constant_offsets - self.symbols = FFCXBackendSymbols(self.language, coefficient_numbering, + self.symbols = FFCXBackendSymbols(coefficient_numbering, coefficient_offsets, original_constant_offsets) - self.definitions = FFCXBackendDefinitions(ir, self.language, - self.symbols, options) - self.access = FFCXBackendAccess(ir, self.language, self.symbols, - options) + self.definitions = FFCXBackendDefinitions(ir, self.symbols, options) + self.access = FFCXBackendAccess(ir, self.symbols, options) diff --git a/ffcx/codegeneration/definitions.py b/ffcx/codegeneration/definitions.py index 1b26de95f..3fbbacbda 100644 --- a/ffcx/codegeneration/definitions.py +++ b/ffcx/codegeneration/definitions.py @@ -9,7 +9,7 @@ import ufl from ffcx.element_interface import convert_element -from ffcx.naming import scalar_to_value_type +import ffcx.codegeneration.lnodes as L logger = logging.getLogger("ffcx") @@ -17,11 +17,10 @@ class FFCXBackendDefinitions(object): """FFCx specific code definitions.""" - def __init__(self, ir, language, symbols, options): + def __init__(self, ir, symbols, options): # Store ir and options self.integral_type = ir.integral_type self.entitytype = ir.entitytype - self.language = language self.symbols = symbols self.options = options @@ -64,8 +63,6 @@ def get(self, t, mt, tabledata, quadrature_rule, access): def coefficient(self, t, mt, tabledata, quadrature_rule, access): """Return definition code for coefficients.""" - L = self.language - ttype = tabledata.ttype num_dofs = tabledata.values.shape[3] bs = tabledata.block_size @@ -99,14 +96,14 @@ def coefficient(self, t, mt, tabledata, quadrature_rule, access): # If a map is necessary from stride 1 to bs, the code must be added before the quadrature loop. if dof_access_map: - pre_code += [L.ArrayDecl(self.options["scalar_type"], dof_access.array, num_dofs)] - pre_body = L.Assign(dof_access, dof_access_map) + pre_code += [L.ArrayDecl(dof_access.array, sizes=num_dofs)] + pre_body = [L.Assign(dof_access, dof_access_map)] pre_code += [L.ForRange(ic, 0, num_dofs, pre_body)] else: dof_access = self.symbols.coefficient_dof_access(mt.terminal, ic * bs + begin) body = [L.AssignAdd(access, dof_access * FE[ic])] - code += [L.VariableDecl(self.options["scalar_type"], access, 0.0)] + code += [L.VariableDecl(access, 0.0)] code += [L.ForRange(ic, 0, num_dofs, body)] return pre_code, code @@ -119,8 +116,6 @@ def constant(self, t, mt, tabledata, quadrature_rule, access): def _define_coordinate_dofs_lincomb(self, e, mt, tabledata, quadrature_rule, access): """Define x or J as a linear combination of coordinate dofs with given table data.""" - L = self.language - # Get properties of domain domain = ufl.domain.extract_unique_domain(mt.terminal) coordinate_element = domain.ufl_coordinate_element() @@ -140,7 +135,7 @@ def _define_coordinate_dofs_lincomb(self, e, mt, tabledata, quadrature_rule, acc # Get access to element table FE = self.symbols.element_table(tabledata, self.entitytype, mt.restriction) ic = self.symbols.coefficient_dof_sum_index() - dof_access = self.symbols.S("coordinate_dofs") + dof_access = L.Symbol("coordinate_dofs", dtype=L.DataType.REAL) # coordinate dofs is always 3d dim = 3 @@ -148,11 +143,9 @@ def _define_coordinate_dofs_lincomb(self, e, mt, tabledata, quadrature_rule, acc if mt.restriction == "-": offset = num_scalar_dofs * dim - value_type = scalar_to_value_type(self.options["scalar_type"]) - code = [] body = [L.AssignAdd(access, dof_access[ic * dim + begin + offset] * FE[ic])] - code += [L.VariableDecl(f"{value_type}", access, 0.0)] + code += [L.VariableDecl(access, 0.0)] code += [L.ForRange(ic, 0, num_scalar_dofs, body)] return [], code diff --git a/ffcx/codegeneration/expression_generator.py b/ffcx/codegeneration/expression_generator.py index 553e8b315..c0a81a36e 100644 --- a/ffcx/codegeneration/expression_generator.py +++ b/ffcx/codegeneration/expression_generator.py @@ -12,9 +12,9 @@ import ufl from ffcx.codegeneration import geometry from ffcx.codegeneration.backend import FFCXBackend -from ffcx.codegeneration.C.cnodes import CNode +import ffcx.codegeneration.lnodes as L +from ffcx.codegeneration.lnodes import LNode from ffcx.ir.representation import ExpressionIR -from ffcx.naming import scalar_to_value_type logger = logging.getLogger("ffcx") @@ -27,22 +27,17 @@ def __init__(self, ir: ExpressionIR, backend: FFCXBackend): self.ir = ir self.backend = backend - self.scope: Dict[Any, CNode] = {} + self.scope: Dict[Any, LNode] = {} self._ufl_names: Set[Any] = set() self.symbol_counters: DefaultDict[Any, int] = collections.defaultdict(int) self.shared_symbols: Dict[Any, Any] = {} self.quadrature_rule = list(self.ir.integrand.keys())[0] def generate(self): - L = self.backend.language - parts = [] - scalar_type = self.backend.access.options["scalar_type"] - value_type = scalar_to_value_type(scalar_type) - - parts += self.generate_element_tables(value_type) + parts += self.generate_element_tables() # Generate the tables of geometry data that are needed - parts += self.generate_geometry_tables(value_type) + parts += self.generate_geometry_tables() parts += self.generate_piecewise_partition() all_preparts = [] @@ -58,10 +53,8 @@ def generate(self): return L.StatementList(parts) - def generate_geometry_tables(self, float_type: str): + def generate_geometry_tables(self): """Generate static tables of geometry data.""" - L = self.backend.language - # Currently we only support circumradius ufl_geometry = { ufl.geometry.ReferenceCellVolume: "reference_cell_volume", @@ -79,24 +72,21 @@ def generate_geometry_tables(self, float_type: str): parts = [] for i, cell_list in cells.items(): for c in cell_list: - parts.append(geometry.write_table(L, ufl_geometry[i], c, float_type)) + parts.append(geometry.write_table(ufl_geometry[i], c)) return parts - def generate_element_tables(self, float_type: str): + def generate_element_tables(self): """Generate tables of FE basis evaluated at specified points.""" - L = self.backend.language parts = [] tables = self.ir.unique_tables - - padlen = self.ir.options["padlen"] table_names = sorted(tables) for name in table_names: table = tables[name] - decl = L.ArrayDecl( - f"static const {float_type}", name, table.shape, table, padlen=padlen) + symbol = L.Symbol(name, dtype=L.DataType.REAL) + decl = L.ArrayDecl(symbol, sizes=table.shape, values=table, const=True) parts += [decl] # Add leading comment if there are any tables @@ -112,8 +102,6 @@ def generate_quadrature_loop(self): In the context of expressions quadrature loop is not accumulated. """ - L = self.backend.language - # Generate varying partition body = self.generate_varying_partition() body = L.commented_code_list( @@ -138,12 +126,10 @@ def generate_quadrature_loop(self): def generate_varying_partition(self): """Generate factors of blocks which are not cellwise constant.""" - L = self.backend.language - # Get annotated graph of factorisation F = self.ir.integrand[self.quadrature_rule]["factorization"] - arraysymbol = L.Symbol(f"sv_{self.quadrature_rule.id()}") + arraysymbol = L.Symbol(f"sv_{self.quadrature_rule.id()}", dtype=L.DataType.SCALAR) parts = self.generate_partition(arraysymbol, F, "varying") parts = L.commented_code_list( parts, f"Unstructured varying computations for quadrature rule {self.quadrature_rule.id()}") @@ -151,12 +137,10 @@ def generate_varying_partition(self): def generate_piecewise_partition(self): """Generate factors of blocks which are constant (i.e. do not depend on quadrature points).""" - L = self.backend.language - # Get annotated graph of factorisation F = self.ir.integrand[self.quadrature_rule]["factorization"] - arraysymbol = L.Symbol("sp") + arraysymbol = L.Symbol("sp", dtype=L.DataType.SCALAR) parts = self.generate_partition(arraysymbol, F, "piecewise") parts = L.commented_code_list(parts, "Unstructured piecewise computations") return parts @@ -188,8 +172,6 @@ def generate_dofblock_partition(self): def generate_block_parts(self, blockmap, blockdata): """Generate and return code parts for a given block.""" - L = self.backend.language - # The parts to return preparts = [] quadparts = [] @@ -288,8 +270,6 @@ def get_arg_factors(self, blockdata, block_rank, indices): Indices used to index element tables """ - L = self.backend.language - arg_factors = [] for i in range(block_rank): mad = blockdata.ma_data[i] @@ -309,21 +289,18 @@ def get_arg_factors(self, blockdata, block_rank, indices): def new_temp_symbol(self, basename): """Create a new code symbol named basename + running counter.""" - L = self.backend.language name = "%s%d" % (basename, self.symbol_counters[basename]) self.symbol_counters[basename] += 1 - return L.Symbol(name) + return L.Symbol(name, dtype=L.DataType.SCALAR) def get_var(self, v): if v._ufl_is_literal_: - return self.backend.ufl_to_language.get(v) + return L.ufl_to_lnodes(v) f = self.scope.get(v) return f def generate_partition(self, symbol, F, mode): """Generate computations of factors of blocks.""" - L = self.backend.language - definitions = [] pre_definitions = dict() intermediates = [] @@ -337,7 +314,7 @@ def generate_partition(self, symbol, F, mode): mt = attr.get('mt') if v._ufl_is_literal_: - vaccess = self.backend.ufl_to_language.get(v) + vaccess = L.ufl_to_lnodes(v) elif mt is not None: # All finite element based terminals have table data, as well # as some, but not all, of the symbolic geometric terminals @@ -366,7 +343,7 @@ def generate_partition(self, symbol, F, mode): # Mapping UFL operator to target language self._ufl_names.add(v._ufl_handler_name_) - vexpr = self.backend.ufl_to_language.get(v, *vops) + vexpr = L.ufl_to_lnodes(v, *vops) # Create a new intermediate for each subexpression # except boolean conditions and its childs @@ -392,7 +369,7 @@ def generate_partition(self, symbol, F, mode): intermediates.append(L.Assign(vaccess, vexpr)) else: scalar_type = self.backend.access.options["scalar_type"] - vaccess = L.Symbol("%s_%d" % (symbol.name, j)) + vaccess = L.Symbol("%s_%d" % (symbol.name, j), dtype=L.DataType.SCALAR) intermediates.append(L.VariableDecl(f"const {scalar_type}", vaccess, vexpr)) # Store access node for future reference @@ -410,7 +387,6 @@ def generate_partition(self, symbol, F, mode): if intermediates: if use_symbol_array: - scalar_type = self.backend.access.options["scalar_type"] - parts += [L.ArrayDecl(scalar_type, symbol, len(intermediates))] + parts += [L.ArrayDecl(symbol, sizes=len(intermediates))] parts += intermediates return parts diff --git a/ffcx/codegeneration/geometry.py b/ffcx/codegeneration/geometry.py index 2df2bcd93..a5b87f215 100644 --- a/ffcx/codegeneration/geometry.py +++ b/ffcx/codegeneration/geometry.py @@ -5,31 +5,31 @@ # SPDX-License-Identifier: LGPL-3.0-or-later import numpy as np - +import ffcx.codegeneration.lnodes as L import basix -def write_table(L, tablename, cellname, type: str): +def write_table(tablename, cellname): if tablename == "facet_edge_vertices": - return facet_edge_vertices(L, tablename, cellname) + return facet_edge_vertices(tablename, cellname) if tablename == "reference_facet_jacobian": - return reference_facet_jacobian(L, tablename, cellname, type) + return reference_facet_jacobian(tablename, cellname) if tablename == "reference_cell_volume": - return reference_cell_volume(L, tablename, cellname, type) + return reference_cell_volume(tablename, cellname) if tablename == "reference_facet_volume": - return reference_facet_volume(L, tablename, cellname, type) + return reference_facet_volume(tablename, cellname) if tablename == "reference_edge_vectors": - return reference_edge_vectors(L, tablename, cellname, type) + return reference_edge_vectors(tablename, cellname) if tablename == "facet_reference_edge_vectors": - return facet_reference_edge_vectors(L, tablename, cellname, type) + return facet_reference_edge_vectors(tablename, cellname) if tablename == "reference_facet_normals": - return reference_facet_normals(L, tablename, cellname, type) + return reference_facet_normals(tablename, cellname) if tablename == "facet_orientation": - return facet_orientation(L, tablename, cellname, type) + return facet_orientation(tablename, cellname) raise ValueError(f"Unknown geometry table name: {tablename}") -def facet_edge_vertices(L, tablename, cellname): +def facet_edge_vertices(tablename, cellname): celltype = getattr(basix.CellType, cellname) topology = basix.topology(celltype) triangle_edges = basix.topology(basix.CellType.triangle)[1] @@ -41,47 +41,52 @@ def facet_edge_vertices(L, tablename, cellname): edge_vertices = [] for facet in topology[-2]: if len(facet) == 3: - edge_vertices += [[facet[i] for i in edge] for edge in triangle_edges] + edge_vertices += [[[facet[i] for i in edge] for edge in triangle_edges]] elif len(facet) == 4: - edge_vertices += [[facet[i] for i in edge] for edge in quadrilateral_edges] + edge_vertices += [[[facet[i] for i in edge] for edge in quadrilateral_edges]] else: raise ValueError("Only triangular and quadrilateral faces supported.") out = np.array(edge_vertices, dtype=int) - return L.ArrayDecl("static const unsigned int", f"{cellname}_{tablename}", out.shape, out) + symbol = L.Symbol(f"{cellname}_{tablename}", dtype=L.DataType.INT) + return L.ArrayDecl(symbol, values=out, const=True) -def reference_facet_jacobian(L, tablename, cellname, type: str): +def reference_facet_jacobian(tablename, cellname): celltype = getattr(basix.CellType, cellname) out = basix.cell.facet_jacobians(celltype) - return L.ArrayDecl(f"static const {type}", f"{cellname}_{tablename}", out.shape, out) + symbol = L.Symbol(f"{cellname}_{tablename}", dtype=L.DataType.REAL) + return L.ArrayDecl(symbol, values=out, const=True) -def reference_cell_volume(L, tablename, cellname, type: str): +def reference_cell_volume(tablename, cellname): celltype = getattr(basix.CellType, cellname) out = basix.cell.volume(celltype) - return L.VariableDecl(f"static const {type}", f"{cellname}_{tablename}", out) + symbol = L.Symbol(f"{cellname}_{tablename}", dtype=L.DataType.REAL) + return L.VariableDecl(symbol, out) -def reference_facet_volume(L, tablename, cellname, type: str): +def reference_facet_volume(tablename, cellname): celltype = getattr(basix.CellType, cellname) volumes = basix.cell.facet_reference_volumes(celltype) for i in volumes[1:]: if not np.isclose(i, volumes[0]): raise ValueError("Reference facet volume not supported for this cell type.") - return L.VariableDecl(f"static const {type}", f"{cellname}_{tablename}", volumes[0]) + symbol = L.Symbol(f"{cellname}_{tablename}", dtype=L.DataType.REAL) + return L.VariableDecl(symbol, volumes[0]) -def reference_edge_vectors(L, tablename, cellname, type: str): +def reference_edge_vectors(tablename, cellname): celltype = getattr(basix.CellType, cellname) topology = basix.topology(celltype) geometry = basix.geometry(celltype) edge_vectors = [geometry[j] - geometry[i] for i, j in topology[1]] - out = np.array(edge_vectors[cellname]) - return L.ArrayDecl(f"static const {type}", f"{cellname}_{tablename}", out.shape, out) + out = np.array(edge_vectors) + symbol = L.Symbol(f"{cellname}_{tablename}", dtype=L.DataType.REAL) + return L.ArrayDecl(symbol, values=out, const=True) -def facet_reference_edge_vectors(L, tablename, cellname, type: str): +def facet_reference_edge_vectors(tablename, cellname): celltype = getattr(basix.CellType, cellname) topology = basix.topology(celltype) geometry = basix.geometry(celltype) @@ -101,16 +106,19 @@ def facet_reference_edge_vectors(L, tablename, cellname, type: str): raise ValueError("Only triangular and quadrilateral faces supported.") out = np.array(edge_vectors) - return L.ArrayDecl(f"static const {type}", f"{cellname}_{tablename}", out.shape, out) + symbol = L.Symbol(f"{cellname}_{tablename}", dtype=L.DataType.REAL) + return L.ArrayDecl(symbol, values=out, const=True) -def reference_facet_normals(L, tablename, cellname, type: str): +def reference_facet_normals(tablename, cellname): celltype = getattr(basix.CellType, cellname) out = basix.cell.facet_outward_normals(celltype) - return L.ArrayDecl(f"static const {type}", f"{cellname}_{tablename}", out.shape, out) + symbol = L.Symbol(f"{cellname}_{tablename}", dtype=L.DataType.REAL) + return L.ArrayDecl(symbol, values=out, const=True) -def facet_orientation(L, tablename, cellname, type: str): +def facet_orientation(tablename, cellname): celltype = getattr(basix.CellType, cellname) out = basix.cell.facet_orientations(celltype) - return L.ArrayDecl(f"static const {type}", f"{cellname}_{tablename}", len(out), out) + symbol = L.Symbol(f"{cellname}_{tablename}", dtype=L.DataType.REAL) + return L.ArrayDecl(symbol, values=out, const=True) diff --git a/ffcx/codegeneration/integral_generator.py b/ffcx/codegeneration/integral_generator.py index 1ebcef70f..2713f92ad 100644 --- a/ffcx/codegeneration/integral_generator.py +++ b/ffcx/codegeneration/integral_generator.py @@ -1,4 +1,4 @@ -# Copyright (C) 2015-2021 Martin Sandve Alnæs, Michal Habera, Igor Baratta +# Copyright (C) 2015-2023 Martin Sandve Alnæs, Michal Habera, Igor Baratta, Chris Richardson # # This file is part of FFCx. (https://www.fenicsproject.org) # @@ -10,11 +10,11 @@ import ufl from ffcx.codegeneration import geometry -from ffcx.codegeneration.C.cnodes import BinOp, CNode from ffcx.ir.elementtables import piecewise_ttypes from ffcx.ir.integral import BlockDataT +import ffcx.codegeneration.lnodes as L +from ffcx.codegeneration.lnodes import LNode, BinOp from ffcx.ir.representationutils import QuadratureRule -from ffcx.naming import scalar_to_value_type logger = logging.getLogger("ffcx") @@ -25,7 +25,6 @@ def __init__(self, ir, backend): self.ir = ir # Backend specific plugin with attributes - # - language: for translating ufl operators to target language # - symbols: for translating ufl operators to target language # - definitions: for defining backend specific variables # - access: for accessing backend specific variables @@ -57,7 +56,7 @@ def set_var(self, quadrature_rule, v, vaccess): Scope is determined by quadrature_rule which identifies the quadrature loop scope or None if outside quadrature loops. - v is the ufl expression and vaccess is the CNodes + v is the ufl expression and vaccess is the LNodes expression to access the value in the code. """ @@ -72,10 +71,10 @@ def get_var(self, quadrature_rule, v): If v is not found in quadrature loop scope, the piecewise scope (None) is checked. - Returns the CNodes expression to access the value in the code. + Returns the LNodes expression to access the value in the code. """ if v._ufl_is_literal_: - return self.backend.ufl_to_language.get(v) + return L.ufl_to_lnodes(v) f = self.scopes[quadrature_rule].get(v) if f is None: f = self.scopes[None].get(v) @@ -83,13 +82,12 @@ def get_var(self, quadrature_rule, v): def new_temp_symbol(self, basename): """Create a new code symbol named basename + running counter.""" - L = self.backend.language name = "%s%d" % (basename, self.symbol_counters[basename]) self.symbol_counters[basename] += 1 - return L.Symbol(name) + return L.Symbol(name, dtype=L.DataType.SCALAR) def get_temp_symbol(self, tempname, key): - key = (tempname, ) + key + key = (tempname,) + key s = self.shared_symbols.get(key) defined = s is not None if not defined: @@ -104,32 +102,21 @@ def generate(self): context that matches a suitable version of the UFC tabulate_tensor signatures. """ - L = self.backend.language - # Assert that scopes are empty: expecting this to be called only # once assert not any(d for d in self.scopes.values()) parts = [] - scalar_type = self.backend.access.options["scalar_type"] - value_type = scalar_to_value_type(scalar_type) - alignment = self.ir.options['assume_aligned'] - if alignment != -1: - scalar_type = self.backend.access.options["scalar_type"] - parts += [L.VerbatimStatement(f"A = ({scalar_type}*)__builtin_assume_aligned(A, {alignment});"), - L.VerbatimStatement(f"w = (const {scalar_type}*)__builtin_assume_aligned(w, {alignment});"), - L.VerbatimStatement(f"c = (const {scalar_type}*)__builtin_assume_aligned(c, {alignment});"), - L.VerbatimStatement(f"coordinate_dofs = (const {value_type}*)__builtin_assume_aligned(coordinate_dofs, {alignment});")] # noqa # Generate the tables of quadrature points and weights - parts += self.generate_quadrature_tables(value_type) + parts += self.generate_quadrature_tables() # Generate the tables of basis function values and # pre-integrated blocks - parts += self.generate_element_tables(value_type) + parts += self.generate_element_tables() # Generate the tables of geometry data that are needed - parts += self.generate_geometry_tables(value_type) + parts += self.generate_geometry_tables() # Loop generation code will produce parts to go before # quadloops, to define the quadloops, and to go after the @@ -160,11 +147,9 @@ def generate(self): return L.StatementList(parts) - def generate_quadrature_tables(self, value_type: str) -> List[str]: + def generate_quadrature_tables(self): """Generate static tables of quadrature points and weights.""" - L = self.backend.language - - parts: List[str] = [] + parts = [] # No quadrature tables for custom (given argument) or point # (evaluation in single vertex) @@ -172,25 +157,18 @@ def generate_quadrature_tables(self, value_type: str) -> List[str]: if self.ir.integral_type in skip: return parts - padlen = self.ir.options["padlen"] - # Loop over quadrature rules for quadrature_rule, integrand in self.ir.integrand.items(): - num_points = quadrature_rule.weights.shape[0] - # Generate quadrature weights array wsym = self.backend.symbols.weights_table(quadrature_rule) - parts += [L.ArrayDecl(f"static const {value_type}", wsym, num_points, - quadrature_rule.weights, padlen=padlen)] + parts += [L.ArrayDecl(wsym, values=quadrature_rule.weights, const=True)] # Add leading comment if there are any tables parts = L.commented_code_list(parts, "Quadrature rules") return parts - def generate_geometry_tables(self, float_type: str): + def generate_geometry_tables(self): """Generate static tables of geometry data.""" - L = self.backend.language - ufl_geometry = { ufl.geometry.FacetEdgeVectors: "facet_edge_vertices", ufl.geometry.CellFacetJacobian: "reference_facet_jacobian", @@ -214,17 +192,15 @@ def generate_geometry_tables(self, float_type: str): parts = [] for i, cell_list in cells.items(): for c in cell_list: - parts.append(geometry.write_table(L, ufl_geometry[i], c, float_type)) + parts.append(geometry.write_table(ufl_geometry[i], c)) return parts - def generate_element_tables(self, float_type: str): + def generate_element_tables(self): """Generate static tables with precomputed element basisfunction values in quadrature points.""" - L = self.backend.language parts = [] tables = self.ir.unique_tables table_types = self.ir.unique_table_types - padlen = self.ir.options["padlen"] if self.ir.integral_type in ufl.custom_integral_types: # Define only piecewise tables table_names = [name for name in sorted(tables) if table_types[name] in piecewise_ttypes] @@ -234,7 +210,7 @@ def generate_element_tables(self, float_type: str): for name in table_names: table = tables[name] - parts += self.declare_table(name, table, padlen, float_type) + parts += self.declare_table(name, table) # Add leading comment if there are any tables parts = L.commented_code_list(parts, [ @@ -242,19 +218,18 @@ def generate_element_tables(self, float_type: str): "FE* dimensions: [permutation][entities][points][dofs]"]) return parts - def declare_table(self, name, table, padlen, value_type: str): + def declare_table(self, name, table): """Declare a table. If the dof dimensions of the table have dof rotations, apply these rotations. """ - L = self.backend.language - return [L.ArrayDecl(f"static const {value_type}", name, table.shape, table, padlen=padlen)] + table_symbol = L.Symbol(name, dtype=L.DataType.REAL) + return [L.ArrayDecl(table_symbol, values=table, const=True)] def generate_quadrature_loop(self, quadrature_rule: QuadratureRule): """Generate quadrature loop with for this quadrature_rule.""" - L = self.backend.language # Generate varying partition pre_definitions, body = self.generate_varying_partition(quadrature_rule) @@ -278,12 +253,10 @@ def generate_quadrature_loop(self, quadrature_rule: QuadratureRule): return pre_definitions, preparts, quadparts def generate_piecewise_partition(self, quadrature_rule): - L = self.backend.language - # Get annotated graph of factorisation F = self.ir.integrand[quadrature_rule]["factorization"] - arraysymbol = L.Symbol(f"sp_{quadrature_rule.id()}") + arraysymbol = L.Symbol(f"sp_{quadrature_rule.id()}", dtype=L.DataType.SCALAR) pre_definitions, parts = self.generate_partition(arraysymbol, F, "piecewise", None) assert len(pre_definitions) == 0, "Quadrature independent code should have not pre-definitions" parts = L.commented_code_list( @@ -292,19 +265,17 @@ def generate_piecewise_partition(self, quadrature_rule): return parts def generate_varying_partition(self, quadrature_rule): - L = self.backend.language # Get annotated graph of factorisation F = self.ir.integrand[quadrature_rule]["factorization"] - arraysymbol = L.Symbol(f"sv_{quadrature_rule.id()}") + arraysymbol = L.Symbol(f"sv_{quadrature_rule.id()}", dtype=L.DataType.SCALAR) pre_definitions, parts = self.generate_partition(arraysymbol, F, "varying", quadrature_rule) parts = L.commented_code_list(parts, f"Varying computations for quadrature rule {quadrature_rule.id()}") return pre_definitions, parts def generate_partition(self, symbol, F, mode, quadrature_rule): - L = self.backend.language definitions = dict() pre_definitions = dict() @@ -322,7 +293,7 @@ def generate_partition(self, symbol, F, mode, quadrature_rule): # cache if not self.get_var(quadrature_rule, v): if v._ufl_is_literal_: - vaccess = self.backend.ufl_to_language.get(v) + vaccess = L.ufl_to_lnodes(v) elif mt is not None: # All finite element based terminals have table # data, as well as some, but not all, of the @@ -352,7 +323,7 @@ def generate_partition(self, symbol, F, mode, quadrature_rule): # Mapping UFL operator to target language self._ufl_names.add(v._ufl_handler_name_) - vexpr = self.backend.ufl_to_language.get(v, *vops) + vexpr = L.ufl_to_lnodes(v, *vops) # Create a new intermediate for each subexpression # except boolean conditions and its childs @@ -379,9 +350,8 @@ def generate_partition(self, symbol, F, mode, quadrature_rule): vaccess = symbol[j] intermediates.append(L.Assign(vaccess, vexpr)) else: - scalar_type = self.backend.access.options["scalar_type"] vaccess = L.Symbol("%s_%d" % (symbol.name, j)) - intermediates.append(L.VariableDecl(f"const {scalar_type}", vaccess, vexpr)) + intermediates.append(L.VariableDecl(vaccess, vexpr)) # Store access node for future reference self.set_var(quadrature_rule, v, vaccess) @@ -393,9 +363,7 @@ def generate_partition(self, symbol, F, mode, quadrature_rule): if intermediates: if use_symbol_array: - padlen = self.ir.options["padlen"] - parts += [L.ArrayDecl(self.backend.access.options["scalar_type"], - symbol, len(intermediates), padlen=padlen)] + parts += [L.ArrayDecl(symbol, sizes=len(intermediates))] parts += intermediates return pre_definitions, parts @@ -467,11 +435,9 @@ def generate_block_parts(self, quadrature_rule: QuadratureRule, blockmap: Tuple, Should be called with quadrature_rule=None for quadloop-independent blocks. """ - L = self.backend.language - # The parts to return - preparts: List[CNode] = [] - quadparts: List[CNode] = [] + preparts: List[LNode] = [] + quadparts: List[LNode] = [] # RHS expressions grouped by LHS "dofmap" rhs_expressions = collections.defaultdict(list) @@ -523,8 +489,7 @@ def generate_block_parts(self, quadrature_rule: QuadratureRule, blockmap: Tuple, key = (quadrature_rule, factor_index, blockdata.all_factors_piecewise) fw, defined = self.get_temp_symbol("fw", key) if not defined: - scalar_type = self.backend.access.options["scalar_type"] - quadparts.append(L.VariableDecl(f"const {scalar_type}", fw, fw_rhs)) + quadparts.append(L.VariableDecl(fw, fw_rhs)) assert not blockdata.transposed, "Not handled yet" A_shape = self.ir.tensor_shape @@ -551,7 +516,7 @@ def generate_block_parts(self, quadrature_rule: QuadratureRule, blockmap: Tuple, # List of statements to keep in the inner loop keep = collections.defaultdict(list) # List of temporary array declarations - pre_loop: List[CNode] = [] + pre_loop: List[LNode] = [] # List of loop invariant expressions to hoist hoist: List[BinOp] = [] @@ -577,34 +542,29 @@ def generate_block_parts(self, quadrature_rule: QuadratureRule, blockmap: Tuple, # floating point operations (factorize expressions by # grouping) for statement in hoist_rhs: - sum = [] - for rhs in hoist_rhs[statement]: - sum.append(L.float_product(rhs)) - sum = L.Sum(sum) + sum = L.Sum([L.float_product(rhs) for rhs in hoist_rhs[statement]]) lhs = None for h in hoist: - if (h.rhs == sum): + if h.rhs == sum: lhs = h.lhs break if lhs: keep[indices].append(L.float_product([statement, lhs])) else: t = self.new_temp_symbol("t") - scalar_type = self.backend.access.options["scalar_type"] - pre_loop.append(L.ArrayDecl(scalar_type, t, blockdims[0])) + pre_loop.append(L.ArrayDecl(t, sizes=blockdims[0])) keep[indices].append(L.float_product([statement, t[B_indices[0]]])) hoist.append(L.Assign(t[B_indices[i - 1]], sum)) else: keep[indices] = rhs_expressions[indices] - hoist_code: List[CNode] = [L.ForRange(B_indices[0], 0, blockdims[0], body=hoist)] if hoist else [] + hoist_code: List[LNode] = [L.ForRange(B_indices[0], 0, blockdims[0], body=hoist)] if hoist else [] - body: List[CNode] = [] + body: List[LNode] = [] for indices in keep: - sum = L.Sum(keep[indices]) - body.append(L.AssignAdd(A[indices], sum)) + body.append(L.AssignAdd(A[indices], L.Sum(keep[indices]))) for i in reversed(range(block_rank)): body = [L.ForRange(B_indices[i], 0, blockdims[i], body=body)] @@ -626,8 +586,6 @@ def fuse_loops(self, definitions): determine how many loops should fuse at a time. """ - L = self.backend.language - loops = collections.defaultdict(list) pre_loop = [] for access, definition in definitions.items(): diff --git a/ffcx/codegeneration/lnodes.py b/ffcx/codegeneration/lnodes.py new file mode 100644 index 000000000..463b31892 --- /dev/null +++ b/ffcx/codegeneration/lnodes.py @@ -0,0 +1,874 @@ +# Copyright (C) 2013-2023 Martin Sandve Alnæs, Chris Richardson +# +# This file is part of FFCx.(https://www.fenicsproject.org) +# +# SPDX-License-Identifier: LGPL-3.0-or-later + +import numbers +import ufl +import numpy as np +from enum import Enum + + +class PRECEDENCE: + """An enum-like class for operator precedence levels.""" + + HIGHEST = 0 + LITERAL = 0 + SYMBOL = 0 + SUBSCRIPT = 2 + + NOT = 3 + NEG = 3 + + MUL = 4 + DIV = 4 + + ADD = 5 + SUB = 5 + + LT = 7 + LE = 7 + GT = 7 + GE = 7 + EQ = 8 + NE = 8 + AND = 11 + OR = 12 + CONDITIONAL = 13 + ASSIGN = 13 + LOWEST = 15 + + +"""LNodes is intended as a minimal generic language description. +Formatting is done later, depending on the target language. + +Supported: + Floating point (and complex) and integer variables and multidimensional arrays + Range loops + Simple arithmetic, +-*/ + Math operations + Logic conditions + Comments +Not supported: + Pointers + Function Calls + Flow control (if, switch, while) + Booleans + Strings +""" + + +def is_zero_lexpr(lexpr): + return (isinstance(lexpr, LiteralFloat) and lexpr.value == 0.0) or ( + isinstance(lexpr, LiteralInt) and lexpr.value == 0 + ) + + +def is_one_lexpr(lexpr): + return (isinstance(lexpr, LiteralFloat) and lexpr.value == 1.0) or ( + isinstance(lexpr, LiteralInt) and lexpr.value == 1 + ) + + +def is_negative_one_lexpr(lexpr): + return (isinstance(lexpr, LiteralFloat) and lexpr.value == -1.0) or ( + isinstance(lexpr, LiteralInt) and lexpr.value == -1 + ) + + +def float_product(factors): + """Build product of float factors, simplifying ones and zeros and returning 1.0 if empty sequence.""" + factors = [f for f in factors if not is_one_lexpr(f)] + if len(factors) == 0: + return LiteralFloat(1.0) + elif len(factors) == 1: + return factors[0] + else: + for f in factors: + if is_zero_lexpr(f): + return f + return Product(factors) + + +class DataType(Enum): + """Representation of data types for variables in LNodes. + + These can be REAL (same type as geometry), + SCALAR (same type as tensor), or INT (for entity indices etc.) + """ + + REAL = 0 + SCALAR = 1 + INT = 2 + + +def merge_dtypes(dtype0, dtype1): + # Promote dtype to SCALAR or REAL if either argument matches + if DataType.SCALAR in (dtype0, dtype1): + return DataType.SCALAR + elif DataType.REAL in (dtype0, dtype1): + return DataType.REAL + elif (dtype0 == DataType.INT and dtype1 == DataType.INT): + return DataType.INT + else: + raise ValueError(f"Can't get dtype for binary operation with {dtype0, dtype1}") + + +class LNode(object): + """Base class for all AST nodes.""" + + def __eq__(self, other): + name = self.__class__.__name__ + raise NotImplementedError("Missing implementation of __eq__ in " + name) + + def __ne__(self, other): + return not self.__eq__(other) + + +class LExpr(LNode): + """Base class for all expressions. + + All subtypes should define a 'precedence' class attribute. + """ + + def __getitem__(self, indices): + return ArrayAccess(self, indices) + + def __neg__(self): + if isinstance(self, LiteralFloat): + return LiteralFloat(-self.value) + if isinstance(self, LiteralInt): + return LiteralInt(-self.value) + return Neg(self) + + def __add__(self, other): + other = as_lexpr(other) + if is_zero_lexpr(self): + return other + if is_zero_lexpr(other): + return self + if isinstance(other, Neg): + return Sub(self, other.arg) + return Add(self, other) + + def __radd__(self, other): + other = as_lexpr(other) + if is_zero_lexpr(self): + return other + if is_zero_lexpr(other): + return self + if isinstance(self, Neg): + return Sub(other, self.arg) + return Add(other, self) + + def __sub__(self, other): + other = as_lexpr(other) + if is_zero_lexpr(self): + return -other + if is_zero_lexpr(other): + return self + if isinstance(other, Neg): + return Add(self, other.arg) + if isinstance(self, LiteralInt) and isinstance(other, LiteralInt): + return LiteralInt(self.value - other.value) + return Sub(self, other) + + def __rsub__(self, other): + other = as_lexpr(other) + if is_zero_lexpr(self): + return other + if is_zero_lexpr(other): + return -self + if isinstance(self, Neg): + return Add(other, self.arg) + return Sub(other, self) + + def __mul__(self, other): + other = as_lexpr(other) + if is_zero_lexpr(self): + return self + if is_zero_lexpr(other): + return other + if is_one_lexpr(self): + return other + if is_one_lexpr(other): + return self + if is_negative_one_lexpr(other): + return Neg(self) + if is_negative_one_lexpr(self): + return Neg(other) + if isinstance(self, LiteralInt) and isinstance(other, LiteralInt): + return LiteralInt(self.value * other.value) + return Mul(self, other) + + def __rmul__(self, other): + other = as_lexpr(other) + if is_zero_lexpr(self): + return self + if is_zero_lexpr(other): + return other + if is_one_lexpr(self): + return other + if is_one_lexpr(other): + return self + if is_negative_one_lexpr(other): + return Neg(self) + if is_negative_one_lexpr(self): + return Neg(other) + return Mul(other, self) + + def __div__(self, other): + other = as_lexpr(other) + if is_zero_lexpr(other): + raise ValueError("Division by zero!") + if is_zero_lexpr(self): + return self + return Div(self, other) + + def __rdiv__(self, other): + other = as_lexpr(other) + if is_zero_lexpr(self): + raise ValueError("Division by zero!") + if is_zero_lexpr(other): + return other + return Div(other, self) + + # TODO: Error check types? + __truediv__ = __div__ + __rtruediv__ = __rdiv__ + __floordiv__ = __div__ + __rfloordiv__ = __rdiv__ + + +class LExprOperator(LExpr): + """Base class for all expression operators.""" + + sideeffect = False + + +class LExprTerminal(LExpr): + """Base class for all expression terminals.""" + + sideeffect = False + + +# LExprTerminal types + + +class LiteralFloat(LExprTerminal): + """A floating point literal value.""" + + precedence = PRECEDENCE.LITERAL + + def __init__(self, value): + assert isinstance(value, (float, complex)) + self.value = value + if isinstance(value, complex): + self.dtype = DataType.SCALAR + else: + self.dtype = DataType.REAL + + def __eq__(self, other): + return isinstance(other, LiteralFloat) and self.value == other.value + + def __float__(self): + return float(self.value) + + +class LiteralInt(LExprTerminal): + """An integer literal value.""" + + precedence = PRECEDENCE.LITERAL + + def __init__(self, value): + assert isinstance(value, (int, np.number)) + self.value = value + self.dtype = DataType.INT + + def __eq__(self, other): + return isinstance(other, LiteralInt) and self.value == other.value + + def __hash__(self): + return hash(self.value) + + +class Symbol(LExprTerminal): + """A named symbol.""" + + precedence = PRECEDENCE.SYMBOL + + def __init__(self, name, dtype=None): + assert isinstance(name, str) + self.name = name + self.dtype = dtype + + def __eq__(self, other): + return isinstance(other, Symbol) and self.name == other.name + + def __hash__(self): + return hash(self.name) + + +class PrefixUnaryOp(LExprOperator): + """Base class for unary operators.""" + + def __init__(self, arg): + self.arg = as_lexpr(arg) + + def __eq__(self, other): + return isinstance(other, type(self)) and self.arg == other.arg + + +class BinOp(LExprOperator): + def __init__(self, lhs, rhs): + self.lhs = as_lexpr(lhs) + self.rhs = as_lexpr(rhs) + + def __eq__(self, other): + return ( + isinstance(other, type(self)) + and self.lhs == other.lhs + and self.rhs == other.rhs + ) + + def __hash__(self): + return hash(self.lhs) + hash(self.rhs) + + +class ArithmeticBinOp(BinOp): + def __init__(self, lhs, rhs): + self.lhs = as_lexpr(lhs) + self.rhs = as_lexpr(rhs) + self.dtype = merge_dtypes(self.lhs.dtype, self.rhs.dtype) + + +class NaryOp(LExprOperator): + """Base class for special n-ary operators.""" + + def __init__(self, args): + self.args = [as_lexpr(arg) for arg in args] + + def __eq__(self, other): + return ( + isinstance(other, type(self)) + and len(self.args) == len(other.args) + and all(a == b for a, b in zip(self.args, other.args)) + ) + + +class Neg(PrefixUnaryOp): + precedence = PRECEDENCE.NEG + op = "-" + + def __init__(self, arg): + self.arg = as_lexpr(arg) + self.dtype = self.arg.dtype + + +class Not(PrefixUnaryOp): + precedence = PRECEDENCE.NOT + op = "!" + + +# Binary operators +# Arithmetic operators preserve the dtype of their operands +# The other operations (logical) do not need a dtype + +class Add(ArithmeticBinOp): + precedence = PRECEDENCE.ADD + op = "+" + + +class Sub(ArithmeticBinOp): + precedence = PRECEDENCE.SUB + op = "-" + + +class Mul(ArithmeticBinOp): + precedence = PRECEDENCE.MUL + op = "*" + + +class Div(ArithmeticBinOp): + precedence = PRECEDENCE.DIV + op = "/" + + +class EQ(BinOp): + precedence = PRECEDENCE.EQ + op = "==" + + +class NE(BinOp): + precedence = PRECEDENCE.NE + op = "!=" + + +class LT(BinOp): + precedence = PRECEDENCE.LT + op = "<" + + +class GT(BinOp): + precedence = PRECEDENCE.GT + op = ">" + + +class LE(BinOp): + precedence = PRECEDENCE.LE + op = "<=" + + +class GE(BinOp): + precedence = PRECEDENCE.GE + op = ">=" + + +class And(BinOp): + precedence = PRECEDENCE.AND + op = "&&" + + +class Or(BinOp): + precedence = PRECEDENCE.OR + op = "||" + + +class Sum(NaryOp): + """Sum of any number of operands.""" + + precedence = PRECEDENCE.ADD + op = "+" + + +class Product(NaryOp): + """Product of any number of operands.""" + + precedence = PRECEDENCE.MUL + op = "*" + + +class MathFunction(LExprOperator): + """A Math Function, with any arguments.""" + + precedence = PRECEDENCE.HIGHEST + + def __init__(self, func, args): + self.function = func + self.args = [as_lexpr(arg) for arg in args] + self.dtype = self.args[0].dtype + + def __eq__(self, other): + return ( + isinstance(other, type(self)) + and self.function == other.function + and len(self.args) == len(other.args) + and all(a == b for a, b in zip(self.args, other.args)) + ) + + +class AssignOp(BinOp): + """Base class for assignment operators.""" + + precedence = PRECEDENCE.ASSIGN + sideeffect = True + + def __init__(self, lhs, rhs): + assert isinstance(lhs, LNode) + BinOp.__init__(self, lhs, rhs) + + +class Assign(AssignOp): + op = "=" + + +class AssignAdd(AssignOp): + op = "+=" + + +class AssignSub(AssignOp): + op = "-=" + + +class AssignMul(AssignOp): + op = "*=" + + +class AssignDiv(AssignOp): + op = "/=" + + +class FlattenedArray(object): + """Syntax carrying object only, will get translated on __getitem__ to ArrayAccess.""" + + def __init__(self, array, dims=None): + assert dims is not None + assert isinstance(array, Symbol) + self.array = array + + # Allow expressions or literals as strides or dims and offset + assert isinstance(dims, (list, tuple)) + dims = tuple(as_lexpr(i) for i in dims) + self.dims = dims + n = len(dims) + literal_one = LiteralInt(1) + strides = [literal_one] * n + for i in range(n - 2, -1, -1): + s = strides[i + 1] + d = dims[i + 1] + if d == literal_one: + strides[i] = s + elif s == literal_one: + strides[i] = d + else: + strides[i] = d * s + + self.strides = strides + + def __getitem__(self, indices): + if not isinstance(indices, (list, tuple)): + indices = (indices,) + n = len(indices) + if n == 0: + # Handle scalar case, allowing dims=() and indices=() for A[0] + if len(self.strides) != 0: + raise ValueError("Empty indices for nonscalar array.") + flat = LiteralInt(0) + else: + i, s = (indices[0], self.strides[0]) + literal_one = LiteralInt(1) + flat = i if s == literal_one else s * i + for i, s in zip(indices[1:n], self.strides[1:n]): + flat = flat + s * i + # Delay applying ArrayAccess until we have all indices + if n == len(self.strides): + return ArrayAccess(self.array, flat) + else: + return FlattenedArray(self.array, strides=self.strides[n:], offset=flat) + + +class ArrayAccess(LExprOperator): + precedence = PRECEDENCE.SUBSCRIPT + + def __init__(self, array, indices): + # Typecheck array argument + if isinstance(array, Symbol): + self.array = array + self.dtype = array.dtype + elif isinstance(array, ArrayDecl): + self.array = array.symbol + self.dtype = array.symbol.dtype + else: + raise ValueError("Unexpected array type %s." % (type(array).__name__,)) + + # Allow expressions or literals as indices + if not isinstance(indices, (list, tuple)): + indices = (indices,) + self.indices = tuple(as_lexpr(i) for i in indices) + + # Early error checking for negative array dimensions + if any(isinstance(i, int) and i < 0 for i in self.indices): + raise ValueError("Index value < 0.") + + # Additional dimension checks possible if we get an ArrayDecl instead of just a name + if isinstance(array, ArrayDecl): + if len(self.indices) != len(array.sizes): + raise ValueError("Invalid number of indices.") + ints = (int, LiteralInt) + if any( + (isinstance(i, ints) and isinstance(d, ints) and int(i) >= int(d)) + for i, d in zip(self.indices, array.sizes) + ): + raise ValueError("Index value >= array dimension.") + + def __getitem__(self, indices): + """Handle nested expr[i][j].""" + if isinstance(indices, list): + indices = tuple(indices) + elif not isinstance(indices, tuple): + indices = (indices,) + return ArrayAccess(self.array, self.indices + indices) + + def __eq__(self, other): + return ( + isinstance(other, type(self)) + and self.array == other.array + and self.indices == other.indices + ) + + def __hash__(self): + return hash(self.array) + + +class Conditional(LExprOperator): + precedence = PRECEDENCE.CONDITIONAL + + def __init__(self, condition, true, false): + self.condition = as_lexpr(condition) + self.true = as_lexpr(true) + self.false = as_lexpr(false) + self.dtype = merge_dtypes(self.true.dtype, self.false.dtype) + + def __eq__(self, other): + return ( + isinstance(other, type(self)) + and self.condition == other.condition + and self.true == other.true + and self.false == other.false + ) + + +def as_lexpr(node): + """Typechecks and wraps an object as a valid LExpr. + + Accepts LExpr nodes, treats int and float as literals. + + """ + if isinstance(node, LExpr): + return node + elif isinstance(node, numbers.Integral): + return LiteralInt(node) + elif isinstance(node, numbers.Real): + return LiteralFloat(node) + else: + raise RuntimeError("Unexpected LExpr type %s:\n%s" % (type(node), str(node))) + + +class Statement(LNode): + """Make an expression into a statement.""" + + is_scoped = False + + def __init__(self, expr): + self.expr = as_lexpr(expr) + + def __eq__(self, other): + return isinstance(other, type(self)) and self.expr == other.expr + + +class StatementList(LNode): + """A simple sequence of statements. No new scopes are introduced.""" + + def __init__(self, statements): + self.statements = [as_statement(st) for st in statements] + + @property + def is_scoped(self): + return all(st.is_scoped for st in self.statements) + + def __eq__(self, other): + return isinstance(other, type(self)) and self.statements == other.statements + + +class Comment(Statement): + """Line comment(s) used for annotating the generated code with human readable remarks.""" + + is_scoped = True + + def __init__(self, comment): + assert isinstance(comment, str) + self.comment = comment + + def __eq__(self, other): + return isinstance(other, type(self)) and self.comment == other.comment + + +def commented_code_list(code, comments): + """Add comment to code list if the list is not empty.""" + if isinstance(code, LNode): + code = [code] + assert isinstance(code, list) + if code: + if not isinstance(comments, (list, tuple)): + comments = [comments] + comments = [Comment(c) for c in comments] + code = comments + code + return code + + +# Type and variable declarations + + +class VariableDecl(Statement): + """Declare a variable, optionally define initial value.""" + + is_scoped = False + + def __init__(self, symbol, value=None): + + assert isinstance(symbol, Symbol) + assert symbol.dtype is not None + self.symbol = symbol + + if value is not None: + value = as_lexpr(value) + self.value = value + + def __eq__(self, other): + return ( + isinstance(other, type(self)) + and self.typename == other.typename + and self.symbol == other.symbol + and self.value == other.value + ) + + +class ArrayDecl(Statement): + """A declaration or definition of an array. + + Note that just setting values=0 is sufficient to initialize the + entire array to zero. + + Otherwise use nested lists of lists to represent multidimensional + array values to initialize to. + + """ + + is_scoped = False + + def __init__(self, symbol, sizes=None, values=None, const=False): + assert isinstance(symbol, Symbol) + self.symbol = symbol + assert symbol.dtype + + if sizes is None: + assert values is not None + sizes = values.shape + if isinstance(sizes, int): + sizes = (sizes,) + self.sizes = tuple(sizes) + + if values is None: + assert sizes is not None + + # NB! No type checking, assuming nested lists of literal values. Not applying as_lexpr. + if isinstance(values, (list, tuple)): + self.values = np.asarray(values) + else: + self.values = values + + self.const = const + + def __eq__(self, other): + attributes = ("typename", "symbol", "sizes", "padlen", "values") + return isinstance(other, type(self)) and all( + getattr(self, name) == getattr(self, name) for name in attributes + ) + + +def is_simple_inner_loop(code): + if isinstance(code, ForRange) and is_simple_inner_loop(code.body): + return True + if isinstance(code, Statement) and isinstance(code.expr, AssignOp): + return True + return False + + +class ForRange(Statement): + """Slightly higher-level for loop assuming incrementing an index over a range.""" + + is_scoped = True + + def __init__(self, index, begin, end, body): + assert isinstance(index, Symbol) + self.index = index + self.begin = as_lexpr(begin) + self.end = as_lexpr(end) + assert isinstance(body, list) + self.body = StatementList(body) + + def __eq__(self, other): + attributes = ("index", "begin", "end", "body") + return isinstance(other, type(self)) and all( + getattr(self, name) == getattr(self, name) for name in attributes + ) + + +def as_statement(node): + """Perform type checking on node and wrap in a suitable statement type if necessary.""" + if isinstance(node, StatementList) and len(node.statements) == 1: + # Cleans up the expression tree a bit + return node.statements[0] + elif isinstance(node, Statement): + # No-op + return node + elif isinstance(node, LExprOperator): + if node.sideeffect: + # Special case for using assignment expressions as statements + return Statement(node) + else: + raise RuntimeError( + "Trying to create a statement of lexprOperator type %s:\n%s" + % (type(node), str(node)) + ) + elif isinstance(node, list): + # Convenience case for list of statements + if len(node) == 1: + # Cleans up the expression tree a bit + return as_statement(node[0]) + else: + return StatementList(node) + else: + raise RuntimeError( + "Unexpected CStatement type %s:\n%s" % (type(node), str(node)) + ) + + +def _math_function(op, *args): + return MathFunction(op._ufl_handler_name_, args) + + +# Lookup table for handler to call when the ufl_to_lnodes method (below) is +# called, depending on the first argument type. +_ufl_call_lookup = { + ufl.constantvalue.IntValue: lambda x: LiteralInt(int(x)), + ufl.constantvalue.FloatValue: lambda x: LiteralFloat(float(x)), + ufl.constantvalue.ComplexValue: lambda x: LiteralFloat(x.value()), + ufl.constantvalue.Zero: lambda x: LiteralFloat(0.0), + ufl.algebra.Product: lambda x, a, b: a * b, + ufl.algebra.Sum: lambda x, a, b: a + b, + ufl.algebra.Division: lambda x, a, b: a / b, + ufl.algebra.Abs: _math_function, + ufl.algebra.Power: _math_function, + ufl.algebra.Real: _math_function, + ufl.algebra.Imag: _math_function, + ufl.algebra.Conj: _math_function, + ufl.classes.GT: lambda x, a, b: GT(a, b), + ufl.classes.GE: lambda x, a, b: GE(a, b), + ufl.classes.EQ: lambda x, a, b: EQ(a, b), + ufl.classes.NE: lambda x, a, b: NE(a, b), + ufl.classes.LT: lambda x, a, b: LT(a, b), + ufl.classes.LE: lambda x, a, b: LE(a, b), + ufl.classes.AndCondition: lambda x, a, b: And(a, b), + ufl.classes.OrCondition: lambda x, a, b: Or(a, b), + ufl.classes.NotCondition: lambda x, a: Not(a), + ufl.classes.Conditional: lambda x, c, t, f: Conditional(c, t, f), + ufl.classes.MinValue: _math_function, + ufl.classes.MaxValue: _math_function, + ufl.mathfunctions.Sqrt: _math_function, + ufl.mathfunctions.Ln: _math_function, + ufl.mathfunctions.Exp: _math_function, + ufl.mathfunctions.Cos: _math_function, + ufl.mathfunctions.Sin: _math_function, + ufl.mathfunctions.Tan: _math_function, + ufl.mathfunctions.Cosh: _math_function, + ufl.mathfunctions.Sinh: _math_function, + ufl.mathfunctions.Tanh: _math_function, + ufl.mathfunctions.Acos: _math_function, + ufl.mathfunctions.Asin: _math_function, + ufl.mathfunctions.Atan: _math_function, + ufl.mathfunctions.Erf: _math_function, + ufl.mathfunctions.Atan2: _math_function, + ufl.mathfunctions.MathFunction: _math_function, + ufl.mathfunctions.BesselJ: _math_function, + ufl.mathfunctions.BesselY: _math_function} + + +def ufl_to_lnodes(operator, *args): + # Call appropriate handler, depending on the type of operator + optype = type(operator) + if optype in _ufl_call_lookup: + return _ufl_call_lookup[optype](operator, *args) + else: + raise RuntimeError(f"Missing lookup for expr type {optype}.") diff --git a/ffcx/codegeneration/symbols.py b/ffcx/codegeneration/symbols.py index 7630a0d68..41d6dd20b 100644 --- a/ffcx/codegeneration/symbols.py +++ b/ffcx/codegeneration/symbols.py @@ -7,6 +7,7 @@ import logging import ufl +import ffcx.codegeneration.lnodes as L logger = logging.getLogger("ffcx") @@ -60,10 +61,8 @@ def format_mt_name(basename, mt): class FFCXBackendSymbols(object): """FFCx specific symbol definitions. Provides non-ufl symbols.""" - def __init__(self, language, coefficient_numbering, coefficient_offsets, + def __init__(self, coefficient_numbering, coefficient_offsets, original_constant_offsets): - self.L = language - self.S = self.L.Symbol self.coefficient_numbering = coefficient_numbering self.coefficient_offsets = coefficient_offsets @@ -71,71 +70,71 @@ def __init__(self, language, coefficient_numbering, coefficient_offsets, def element_tensor(self): """Symbol for the element tensor itself.""" - return self.S("A") + return L.Symbol("A") def entity(self, entitytype, restriction): """Entity index for lookup in element tables.""" if entitytype == "cell": # Always 0 for cells (even with restriction) - return self.L.LiteralInt(0) + return L.LiteralInt(0) elif entitytype == "facet": postfix = "[0]" if restriction == "-": postfix = "[1]" - return self.S("entity_local_index" + postfix) + return L.Symbol("entity_local_index" + postfix, dtype=L.DataType.INT) elif entitytype == "vertex": - return self.S("entity_local_index[0]") + return L.Symbol("entity_local_index[0]", dtype=L.DataType.INT) else: logging.exception(f"Unknown entitytype {entitytype}") def argument_loop_index(self, iarg): """Loop index for argument #iarg.""" indices = ["i", "j", "k", "l"] - return self.S(indices[iarg]) + return L.Symbol(indices[iarg], dtype=L.DataType.INT) def coefficient_dof_sum_index(self): """Index for loops over coefficient dofs, assumed to never be used in two nested loops.""" - return self.S("ic") + return L.Symbol("ic", dtype=L.DataType.INT) def quadrature_loop_index(self): """Reusing a single index name for all quadrature loops, assumed not to be nested.""" - return self.S("iq") + return L.Symbol("iq", dtype=L.DataType.INT) def quadrature_permutation(self, index): """Quadrature permutation, as input to the function.""" - return self.S("quadrature_permutation")[index] + return L.Symbol("quadrature_permutation", dtype=L.DataType.INT)[index] def custom_weights_table(self): """Table for chunk of custom quadrature weights (including cell measure scaling).""" - return self.S("weights_chunk") + return L.Symbol("weights_chunk", dtype=L.DataType.REAL) def custom_points_table(self): """Table for chunk of custom quadrature points (physical coordinates).""" - return self.S("points_chunk") + return L.Symbol("points_chunk", dtype=L.DataType.REAL) def weights_table(self, quadrature_rule): """Table of quadrature weights.""" - return self.S(f"weights_{quadrature_rule.id()}") + return L.Symbol(f"weights_{quadrature_rule.id()}", dtype=L.DataType.REAL) def points_table(self, quadrature_rule): """Table of quadrature points (points on the reference integration entity).""" - return self.S(f"points_{quadrature_rule.id()}") + return L.Symbol(f"points_{quadrature_rule.id()}", dtype=L.DataType.REAL) def x_component(self, mt): """Physical coordinate component.""" - return self.S(format_mt_name("x", mt)) + return L.Symbol(format_mt_name("x", mt), dtype=L.DataType.REAL) def J_component(self, mt): """Jacobian component.""" # FIXME: Add domain number! - return self.S(format_mt_name("J", mt)) + return L.Symbol(format_mt_name("J", mt), dtype=L.DataType.REAL) def domain_dof_access(self, dof, component, gdim, num_scalar_dofs, restriction): # FIXME: Add domain number or offset! offset = 0 if restriction == "-": offset = num_scalar_dofs * 3 - vc = self.S("coordinate_dofs") + vc = L.Symbol("coordinate_dofs", dtype=L.DataType.REAL) return vc[3 * dof + component + offset] def domain_dofs_access(self, gdim, num_scalar_dofs, restriction): @@ -147,14 +146,14 @@ def domain_dofs_access(self, gdim, num_scalar_dofs, restriction): def coefficient_dof_access(self, coefficient, dof_index): offset = self.coefficient_offsets[coefficient] - w = self.S("w") + w = L.Symbol("w", dtype=L.DataType.SCALAR) return w[offset + dof_index] def coefficient_dof_access_blocked(self, coefficient: ufl.Coefficient, index, block_size, dof_offset): coeff_offset = self.coefficient_offsets[coefficient] - w = self.S("w") - _w = self.S(f"_w_{coeff_offset}_{dof_offset}") + w = L.Symbol("w", dtype=L.DataType.SCALAR) + _w = L.Symbol(f"_w_{coeff_offset}_{dof_offset}", dtype=L.DataType.SCALAR) unit_stride_access = _w[index] original_access = w[coeff_offset + index * block_size + dof_offset] return unit_stride_access, original_access @@ -162,17 +161,14 @@ def coefficient_dof_access_blocked(self, coefficient: ufl.Coefficient, index, def coefficient_value(self, mt): """Symbol for variable holding value or derivative component of coefficient.""" c = self.coefficient_numbering[mt.terminal] - return self.S(format_mt_name("w%d" % (c, ), mt)) + return L.Symbol(format_mt_name("w%d" % (c, ), mt), dtype=L.DataType.SCALAR) def constant_index_access(self, constant, index): offset = self.original_constant_offsets[constant] - c = self.S("c") + c = L.Symbol("c", dtype=L.DataType.SCALAR) return c[offset + index] - def named_table(self, name): - return self.S(name) - def element_table(self, tabledata, entitytype, restriction): entity = self.entity(entitytype, restriction) @@ -194,4 +190,4 @@ def element_table(self, tabledata, entitytype, restriction): qp = 0 # Return direct access to element table - return self.named_table(tabledata.name)[qp][entity][iq] + return L.Symbol(tabledata.name, dtype=L.DataType.REAL)[qp][entity][iq] diff --git a/ffcx/codegeneration/utils.py b/ffcx/codegeneration/utils.py new file mode 100644 index 000000000..06497c172 --- /dev/null +++ b/ffcx/codegeneration/utils.py @@ -0,0 +1,34 @@ +# Copyright (C) 2020-2023 Michal Habera and Chris Richardson +# +# This file is part of FFCx.(https://www.fenicsproject.org) +# +# SPDX-License-Identifier: LGPL-3.0-or-later + +def cdtype_to_numpy(cdtype: str): + """Map a C data type string NumPy datatype string.""" + if cdtype == "double": + return "float64" + elif cdtype == "double _Complex": + return "complex128" + elif cdtype == "float": + return "float32" + elif cdtype == "float _Complex": + return "complex64" + elif cdtype == "long double": + return "longdouble" + else: + raise RuntimeError(f"Unknown NumPy type for: {cdtype}") + + +def scalar_to_value_type(scalar_type: str) -> str: + """The C value type associated with a C scalar type. + + Args: + scalar_type: A C type. + + Returns: + The value type associated with ``scalar_type``. E.g., if + ``scalar_type`` is ``float _Complex`` the return value is 'float'. + + """ + return scalar_type.replace(' _Complex', '') diff --git a/test/test_flops.py b/test/test_flops.py deleted file mode 100644 index 9ac99ad77..000000000 --- a/test/test_flops.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright (C) 2021 Igor A. Baratta -# -# This file is part of FFCx. (https://www.fenicsproject.org) -# -# SPDX-License-Identifier: LGPL-3.0-or-later - - -import ufl -import basix.ufl -from ffcx.codegeneration.flop_count import count_flops - - -def create_form(degree): - mesh = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, rank=1)) - element = basix.ufl.element("Lagrange", "triangle", degree) - V = ufl.FunctionSpace(mesh, element) - - u = ufl.TrialFunction(V) - v = ufl.TestFunction(V) - - return ufl.inner(ufl.grad(u), ufl.grad(v)) * ufl.dx + ufl.inner(u, v) * ufl.ds - - -def test_flops(): - k1, k2 = 2, 4 - a1 = create_form(k1) - a2 = create_form(k2) - - dofs1 = (k1 + 1.) * (k1 + 2.) / 2. - dofs2 = (k2 + 1.) * (k2 + 2.) / 2. - - flops_1 = count_flops(a1) - assert len(flops_1) == 2 - - flops_2 = count_flops(a2) - assert len(flops_2) == 2 - - r = sum(flops_2, 0.) / sum(flops_1, 0.) - - assert r > (dofs2**2 / dofs1**2) From 63f060e6618901e7c082c780f9627bd0b9153bb4 Mon Sep 17 00:00:00 2001 From: Chris Richardson Date: Tue, 5 Sep 2023 11:31:25 +0100 Subject: [PATCH 30/44] Fix issue #594 (#600) * add brackets for neg * Add test --- demo/VectorConstant.py | 39 +++++++++++++++++++++++ ffcx/codegeneration/C/c_implementation.py | 16 ++++------ ffcx/codegeneration/lnodes.py | 15 +++++++++ 3 files changed, 61 insertions(+), 9 deletions(-) create mode 100644 demo/VectorConstant.py diff --git a/demo/VectorConstant.py b/demo/VectorConstant.py new file mode 100644 index 000000000..a91856f12 --- /dev/null +++ b/demo/VectorConstant.py @@ -0,0 +1,39 @@ +# Copyright (C) 2016 Jan Blechta +# +# This file is part of FFCx. +# +# FFCx is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# FFCx is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with FFCx. If not, see . +# +# The bilinear form a(u, v) and linear form L(v) for +# Poisson's equation using bilinear elements on bilinear mesh geometry. +import basix.ufl +from ufl import (Constant, Coefficient, FunctionSpace, Mesh, TestFunction, TrialFunction, + dx, grad, inner) + +coords = basix.ufl.element("P", "triangle", 2, rank=1) +mesh = Mesh(coords) +dx = dx(mesh) + +element = basix.ufl.element("P", mesh.ufl_cell().cellname(), 2) +space = FunctionSpace(mesh, element) + +u = TrialFunction(space) +v = TestFunction(space) +f = Coefficient(space) + +L = f * v * dx + +mu = Constant(mesh, shape=(3,)) +theta = - (mu[1] - 2) / mu[0] - (2 * (2 * mu[0] - 2) * (mu[0] - 1)) / (mu[0] * (mu[1] - 2)) +a = theta * inner(grad(u), grad(v)) * dx diff --git a/ffcx/codegeneration/C/c_implementation.py b/ffcx/codegeneration/C/c_implementation.py index f6d18bacd..27b64a024 100644 --- a/ffcx/codegeneration/C/c_implementation.py +++ b/ffcx/codegeneration/C/c_implementation.py @@ -236,13 +236,11 @@ def format_binary_op(self, oper) -> str: # Return combined string return f"{lhs} {oper.op} {rhs}" - def format_neg(self, val) -> str: - arg = self.c_format(val.arg) - return f"-{arg}" - - def format_not(self, val) -> str: - arg = self.c_format(val.arg) - return f"{val.op}({arg})" + def format_unary_op(self, oper) -> str: + arg = self.c_format(oper.arg) + if oper.arg.precedence >= oper.precedence: + return f"{oper.op}({arg})" + return f"{oper.op}{arg}" def format_literal_float(self, val) -> str: value = self._format_number(val.value) @@ -319,13 +317,13 @@ def format_math_function(self, c) -> str: "Assign": format_assign, "AssignAdd": format_assign, "Product": format_nary_op, - "Neg": format_neg, + "Neg": format_unary_op, "Sum": format_nary_op, "Add": format_binary_op, "Sub": format_binary_op, "Mul": format_binary_op, "Div": format_binary_op, - "Not": format_not, + "Not": format_unary_op, "LiteralFloat": format_literal_float, "LiteralInt": format_literal_int, "Symbol": format_symbol, diff --git a/ffcx/codegeneration/lnodes.py b/ffcx/codegeneration/lnodes.py index 463b31892..4b15546e5 100644 --- a/ffcx/codegeneration/lnodes.py +++ b/ffcx/codegeneration/lnodes.py @@ -275,6 +275,9 @@ def __eq__(self, other): def __float__(self): return float(self.value) + def __repr__(self): + return str(self.value) + class LiteralInt(LExprTerminal): """An integer literal value.""" @@ -292,6 +295,9 @@ def __eq__(self, other): def __hash__(self): return hash(self.value) + def __repr__(self): + return str(self.value) + class Symbol(LExprTerminal): """A named symbol.""" @@ -309,6 +315,9 @@ def __eq__(self, other): def __hash__(self): return hash(self.name) + def __repr__(self): + return self.name + class PrefixUnaryOp(LExprOperator): """Base class for unary operators.""" @@ -335,6 +344,9 @@ def __eq__(self, other): def __hash__(self): return hash(self.lhs) + hash(self.rhs) + def __repr__(self): + return str(self.lhs) + str(self.op) + str(self.rhs) + class ArithmeticBinOp(BinOp): def __init__(self, lhs, rhs): @@ -600,6 +612,9 @@ def __eq__(self, other): def __hash__(self): return hash(self.array) + def __repr__(self): + return str(self.array) + "[" + ", ".join(str(i) for i in self.indices) + "]" + class Conditional(LExprOperator): precedence = PRECEDENCE.CONDITIONAL From c78b636c41f6bec53ff0ab0c75405860f0c5011d Mon Sep 17 00:00:00 2001 From: Matthew Scroggs Date: Wed, 6 Sep 2023 11:15:15 +0100 Subject: [PATCH 31/44] Run DOLFINx demos when PR is opened (#601) * run DOLFINx demos when PR is opened * Update .github/workflows/dolfin-tests.yml Co-authored-by: Chris Richardson --------- Co-authored-by: Chris Richardson --- .github/workflows/dolfin-tests.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/dolfin-tests.yml b/.github/workflows/dolfin-tests.yml index 538490e3d..189989dcd 100644 --- a/.github/workflows/dolfin-tests.yml +++ b/.github/workflows/dolfin-tests.yml @@ -91,3 +91,5 @@ jobs: - name: Run DOLFINx Python unit tests run: python3 -m pytest -n auto dolfinx/python/test/unit + - name: Run DOLFINx Python demos + run: python3 -m pytest -n=2 -m serial dolfinx/python/demo/test.py From fecdd00f7a3cff29868457913a067b87841e5f50 Mon Sep 17 00:00:00 2001 From: Matthew Scroggs Date: Wed, 6 Sep 2023 11:42:18 +0100 Subject: [PATCH 32/44] Fix #602 (#604) * fix #602 * mypy and 0 * parenthesis might be needed here? * less hacky * fix earlier in lnodes --- ffcx/codegeneration/lnodes.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/ffcx/codegeneration/lnodes.py b/ffcx/codegeneration/lnodes.py index 4b15546e5..715f50ad9 100644 --- a/ffcx/codegeneration/lnodes.py +++ b/ffcx/codegeneration/lnodes.py @@ -831,6 +831,14 @@ def as_statement(node): def _math_function(op, *args): + name = op._ufl_handler_name_ + dtype = args[0].dtype + if name in ("conj", "real") and dtype == DataType.REAL: + assert len(args) == 1 + return args[0] + if name == "imag" and dtype == DataType.REAL: + assert len(args) == 1 + return LiteralFloat(0.0) return MathFunction(op._ufl_handler_name_, args) From df5026b47b6b8530e137b133106c7b4014ca42a6 Mon Sep 17 00:00:00 2001 From: Chris Richardson Date: Wed, 6 Sep 2023 12:40:09 +0100 Subject: [PATCH 33/44] Remove unused stuff (#603) * Remove unused options * Minor tidy up * Remove moved code from naming.py * Fix tests --- ffcx/codegeneration/C/expressions.py | 2 +- ffcx/codegeneration/C/integrals.py | 12 +---------- ffcx/codegeneration/lnodes.py | 4 ++-- ffcx/naming.py | 30 ---------------------------- ffcx/options.py | 10 +--------- test/test_add_mode.py | 2 +- test/test_jit_expression.py | 2 +- test/test_jit_forms.py | 2 +- 8 files changed, 8 insertions(+), 56 deletions(-) diff --git a/ffcx/codegeneration/C/expressions.py b/ffcx/codegeneration/C/expressions.py index 530c0ab2a..486fbc538 100644 --- a/ffcx/codegeneration/C/expressions.py +++ b/ffcx/codegeneration/C/expressions.py @@ -10,7 +10,7 @@ from ffcx.codegeneration.expression_generator import ExpressionGenerator from ffcx.codegeneration.backend import FFCXBackend from ffcx.codegeneration.C.c_implementation import CFormatter -from ffcx.naming import cdtype_to_numpy, scalar_to_value_type +from ffcx.codegeneration.utils import cdtype_to_numpy, scalar_to_value_type logger = logging.getLogger("ffcx") diff --git a/ffcx/codegeneration/C/integrals.py b/ffcx/codegeneration/C/integrals.py index 386e3ed40..5cf70d487 100644 --- a/ffcx/codegeneration/C/integrals.py +++ b/ffcx/codegeneration/C/integrals.py @@ -10,7 +10,7 @@ from ffcx.codegeneration.C import integrals_template as ufcx_integrals from ffcx.codegeneration.backend import FFCXBackend from ffcx.codegeneration.C.c_implementation import CFormatter -from ffcx.naming import cdtype_to_numpy, scalar_to_value_type +from ffcx.codegeneration.utils import cdtype_to_numpy, scalar_to_value_type logger = logging.getLogger("ffcx") @@ -41,13 +41,6 @@ def generator(ir, options): # Generate generic FFCx code snippets and add specific parts code = {} - code["class_type"] = ir.integral_type + "_integral" - code["name"] = ir.name - code["members"] = "" - code["constructor"] = "" - code["constructor_arguments"] = "" - code["initializer_list"] = "" - code["destructor"] = "" if len(ir.enabled_coefficients) > 0: values = ", ".join("1" if i else "0" for i in ir.enabled_coefficients) @@ -61,9 +54,6 @@ def generator(ir, options): code["additional_includes_set"] = set() # FIXME: Get this out of code[] code["tabulate_tensor"] = body - if options["tabulate_tensor_void"]: - code["tabulate_tensor"] = "" - implementation = ufcx_integrals.factory.format( factory_name=factory_name, enabled_coefficients=code["enabled_coefficients"], diff --git a/ffcx/codegeneration/lnodes.py b/ffcx/codegeneration/lnodes.py index 715f50ad9..af6285ae9 100644 --- a/ffcx/codegeneration/lnodes.py +++ b/ffcx/codegeneration/lnodes.py @@ -766,7 +766,7 @@ def __init__(self, symbol, sizes=None, values=None, const=False): self.const = const def __eq__(self, other): - attributes = ("typename", "symbol", "sizes", "padlen", "values") + attributes = ("typename", "symbol", "sizes", "values") return isinstance(other, type(self)) and all( getattr(self, name) == getattr(self, name) for name in attributes ) @@ -839,7 +839,7 @@ def _math_function(op, *args): if name == "imag" and dtype == DataType.REAL: assert len(args) == 1 return LiteralFloat(0.0) - return MathFunction(op._ufl_handler_name_, args) + return MathFunction(name, args) # Lookup table for handler to call when the ufl_to_lnodes method (below) is diff --git a/ffcx/naming.py b/ffcx/naming.py index a1c260e0f..c3bb9738b 100644 --- a/ffcx/naming.py +++ b/ffcx/naming.py @@ -101,33 +101,3 @@ def expression_name(expression, prefix): assert isinstance(expression[0], ufl.core.expr.Expr) sig = compute_signature([expression], prefix) return f"expression_{sig}" - - -def cdtype_to_numpy(cdtype: str): - """Map a C data type string NumPy datatype string.""" - if cdtype == "double": - return "float64" - elif cdtype == "double _Complex": - return "complex128" - elif cdtype == "float": - return "float32" - elif cdtype == "float _Complex": - return "complex64" - elif cdtype == "long double": - return "longdouble" - else: - raise RuntimeError(f"Unknown NumPy type for: {cdtype}") - - -def scalar_to_value_type(scalar_type: str) -> str: - """The C value type associated with a C scalar type. - - Args: - scalar_type: A C type. - - Returns: - The value type associated with ``scalar_type``. E.g., if - ``scalar_type`` is ``float _Complex`` the return value is 'float'. - - """ - return scalar_type.replace(' _Complex', '') diff --git a/ffcx/options.py b/ffcx/options.py index 3be3cad2e..ab6b01e0e 100644 --- a/ffcx/options.py +++ b/ffcx/options.py @@ -21,18 +21,10 @@ "scalar_type": ("double", """Scalar type used in generated code. Any of real or complex C floating-point types, e.g. float, double, float _Complex, double _Complex, ..."""), - "tabulate_tensor_void": - (False, "True to generate empty tabulation kernels."), "table_rtol": (1e-6, "Relative precision to use when comparing finite element table values for table reuse."), "table_atol": (1e-9, "Absolute precision to use when comparing finite element table values for reuse."), - "assume_aligned": - (-1, """Assumes alignment (in bytes) of pointers to tabulated tensor, coefficients and constants array. - This value must be compatible with alignment of data structures allocated outside FFC. - (-1 means no alignment assumed, safe option)"""), - "padlen": - (1, "Pads every declared array in tabulation kernel such that its last dimension is divisible by given value."), "verbosity": (30, "Logger verbosity. Follows standard logging library levels, i.e. INFO=20, DEBUG=10, etc.") } @@ -90,7 +82,7 @@ def get_options(priority_options: Optional[dict] = None) -> dict: Example `ffcx_options.json` file: - { "assume_aligned": 32, "epsilon": 1e-7 } + { "epsilon": 1e-7 } """ options: Dict[str, Any] = {} diff --git a/test/test_add_mode.py b/test/test_add_mode.py index 87d159a78..5582d261c 100644 --- a/test/test_add_mode.py +++ b/test/test_add_mode.py @@ -10,7 +10,7 @@ import ffcx.codegeneration.jit import basix.ufl import ufl -from ffcx.naming import cdtype_to_numpy, scalar_to_value_type +from ffcx.codegeneration.utils import cdtype_to_numpy, scalar_to_value_type @pytest.mark.parametrize("mode", diff --git a/test/test_jit_expression.py b/test/test_jit_expression.py index 5673f6160..d3ab21494 100644 --- a/test/test_jit_expression.py +++ b/test/test_jit_expression.py @@ -12,7 +12,7 @@ import basix.ufl import ffcx.codegeneration.jit import ufl -from ffcx.naming import cdtype_to_numpy, scalar_to_value_type +from ffcx.codegeneration.utils import cdtype_to_numpy, scalar_to_value_type def float_to_type(name): diff --git a/test/test_jit_forms.py b/test/test_jit_forms.py index caa085d35..7ae89ec7c 100644 --- a/test/test_jit_forms.py +++ b/test/test_jit_forms.py @@ -12,7 +12,7 @@ import basix.ufl import ffcx.codegeneration.jit import ufl -from ffcx.naming import cdtype_to_numpy, scalar_to_value_type +from ffcx.codegeneration.utils import cdtype_to_numpy, scalar_to_value_type @pytest.mark.parametrize("mode,expected_result", [ From 5b72761f5b124ac0addb09da39645e8c4cb879fd Mon Sep 17 00:00:00 2001 From: Matthew Scroggs Date: Wed, 6 Sep 2023 15:27:08 +0100 Subject: [PATCH 34/44] Move QuadratureElement and RealElement implementations to Basix (#592) * replace QuadratureElement and RealElement classes with functions that give deprecation warnings * return * new QuadratureElement * use new basix functions * formatting * remove caching of deprecated function * unused import * formatting * main basix --------- Co-authored-by: Chris Richardson --- ffcx/analysis.py | 6 +- ffcx/element_interface.py | 393 ++------------------------------------ 2 files changed, 24 insertions(+), 375 deletions(-) diff --git a/ffcx/analysis.py b/ffcx/analysis.py index 0e55a6e19..dd9cf7aa5 100644 --- a/ffcx/analysis.py +++ b/ffcx/analysis.py @@ -20,7 +20,7 @@ import basix.ufl import ufl -from ffcx.element_interface import QuadratureElement, convert_element +from ffcx.element_interface import convert_element logger = logging.getLogger("ffcx") @@ -174,9 +174,9 @@ def _analyze_form(form: ufl.form.Form, options: typing.Dict) -> ufl.algorithms.f custom_q = None for e in form_data.unique_elements: e = convert_element(e) - if isinstance(e, QuadratureElement): + if e.has_custom_quadrature: if custom_q is None: - custom_q = e._points, e._weights + custom_q = e.custom_quadrature() else: assert np.allclose(e._points, custom_q[0]) assert np.allclose(e._weights, custom_q[1]) diff --git a/ffcx/element_interface.py b/ffcx/element_interface.py index 140a296b5..b4008d863 100644 --- a/ffcx/element_interface.py +++ b/ffcx/element_interface.py @@ -5,12 +5,8 @@ # SPDX-License-Identifier: LGPL-3.0-or-later """Finite element interface.""" -from __future__ import annotations - import typing import warnings -from functools import lru_cache - import basix import basix.ufl @@ -24,45 +20,9 @@ def convert_element(element: ufl.finiteelement.FiniteElementBase) -> basix.ufl._ if isinstance(element, basix.ufl._ElementBase): return element else: - return _cached_conversion(element) - - -@lru_cache() -def _cached_conversion(element: ufl.finiteelement.FiniteElementBase) -> basix.ufl._ElementBase: - """Create an FFCx element from a UFL element. - - Args: - element: A UFL finite element - - Returns: - A Basix finite element - """ - warnings.warn( - "Use of elements created by UFL is deprecated. You should create elements directly using Basix.", - DeprecationWarning) - - # Tackle compositional elements, e.g. VectorElement first, then elements - # implemented by FFCx, then finally elements convertible by Basix. - if hasattr(ufl, "VectorElement") and isinstance(element, ufl.VectorElement): - return basix.ufl.blocked_element( - _cached_conversion(element.sub_elements()[0]), shape=(element.num_sub_elements(), )) - elif hasattr(ufl, "TensorElement") and isinstance(element, ufl.TensorElement): - if len(element.symmetry()) == 0: - return basix.ufl.blocked_element(_cached_conversion(element.sub_elements()[0]), shape=element._value_shape) - else: - assert element.symmetry()[(1, 0)] == (0, 1) - return basix.ufl.blocked_element(_cached_conversion(element.sub_elements()[0]), - element._value_shape, symmetry=True) - elif hasattr(ufl, "MixedElement") and isinstance(element, ufl.MixedElement): - return basix.ufl.mixed_element([_cached_conversion(e) for e in element.sub_elements()]) - elif hasattr(ufl, "EnrichedElement") and isinstance(element, ufl.EnrichedElement): - return basix.ufl.enriched_element([_cached_conversion(e) for e in element._elements]) - elif element.family() == "Quadrature": - return QuadratureElement(element.cell().cellname(), element.value_shape(), scheme=element.quadrature_scheme(), - degree=element.degree()) - elif element.family() == "Real": - return RealElement(element) - else: + warnings.warn( + "Use of elements created by UFL is deprecated. You should create elements directly using Basix.", + DeprecationWarning) return basix.ufl.convert_ufl_element(element) @@ -99,334 +59,23 @@ def map_facet_points(points: npt.NDArray[np.float64], facet: int, cellname: str) for p in points], dtype=np.float64) -class QuadratureElement(basix.ufl._ElementBase): - """A quadrature element.""" - - _points: npt.NDArray[np.float64] - _weights: npt.NDArray[np.float64] - _entity_counts: typing.List[int] - _cellname: str - - def __init__(self, cellname: str, value_shape: typing.Tuple[int, ...], scheme: typing.Optional[str] = None, - degree: typing.Optional[int] = None, points: typing.Optional[npt.NDArray[np.float64]] = None, - weights: typing.Optional[npt.NDArray[np.float64]] = None, mapname: str = "identity"): - """Initialise the element.""" - if scheme is not None: - assert degree is not None - assert points is None - assert weights is None - repr = f"QuadratureElement({cellname}, {scheme}, {degree})" - self._points, self._weights = create_quadrature(cellname, degree, scheme, []) - else: - assert degree is None - assert points is not None - assert weights is not None - self._points = points - self._weights = weights - repr = f"QuadratureElement({cellname}, {points}, {weights})" - degree = len(points) - - self._cellname = cellname - basix_cell = basix.cell.string_to_type(cellname) - self._entity_counts = [len(i) for i in basix.topology(basix_cell)] - - super().__init__(repr, "quadrature element", cellname, value_shape, degree, mapname=mapname) - - def basix_sobolev_space(self): - """Return the underlying Sobolev space.""" - return basix.sobolev_spaces.L2 - - def __eq__(self, other) -> bool: - """Check if two elements are equal.""" - return isinstance(other, QuadratureElement) and np.allclose(self._points, other._points) and \ - np.allclose(self._weights, other._weights) - - def __hash__(self) -> int: - """Return a hash.""" - return super().__hash__() - - def tabulate(self, nderivs: int, points: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: - """Tabulate the basis functions of the element. - - Args: - nderivs: Number of derivatives to tabulate. - points: Points to tabulate at - - Returns: - Tabulated basis functions - """ - if nderivs > 0: - raise ValueError("Cannot take derivatives of Quadrature element.") - - if points.shape != self._points.shape: - raise ValueError("Mismatch of tabulation points and element points.") - tables = np.asarray([np.eye(points.shape[0], points.shape[0])]) - return tables - - def get_component_element(self, flat_component: int) -> typing.Tuple[basix.ufl._ElementBase, int, int]: - """Get element that represents a component of the element, and the offset and stride of the component. - - Args: - flat_component: The component - - Returns: - component element, offset of the component, stride of the component - """ - return self, 0, 1 - - @property - def ufcx_element_type(self) -> str: - """Element type.""" - return "ufcx_quadrature_element" - - @property - def dim(self) -> int: - """Number of DOFs the element has.""" - return self._points.shape[0] - - @property - def num_entity_dofs(self) -> typing.List[typing.List[int]]: - """Number of DOFs associated with each entity.""" - dofs = [] - for d in self._entity_counts[:-1]: - dofs += [[0] * d] - - dofs += [[self.dim]] - return dofs - - @property - def entity_dofs(self) -> typing.List[typing.List[typing.List[int]]]: - """DOF numbers associated with each entity.""" - start_dof = 0 - entity_dofs = [] - for i in self.num_entity_dofs: - dofs_list = [] - for j in i: - dofs_list.append([start_dof + k for k in range(j)]) - start_dof += j - entity_dofs.append(dofs_list) - return entity_dofs - - @property - def num_entity_closure_dofs(self) -> typing.List[typing.List[int]]: - """Number of DOFs associated with the closure of each entity.""" - return self.num_entity_dofs - - @property - def entity_closure_dofs(self) -> typing.List[typing.List[typing.List[int]]]: - """DOF numbers associated with the closure of each entity.""" - return self.entity_dofs - - @property - def num_global_support_dofs(self) -> int: - """Get the number of global support DOFs.""" - return 0 - - @property - def reference_topology(self) -> typing.List[typing.List[typing.List[int]]]: - """Topology of the reference element.""" - raise NotImplementedError() - - @property - def reference_geometry(self) -> npt.NDArray[np.float64]: - """Geometry of the reference element.""" - raise NotImplementedError() - - @property - def family_name(self) -> str: - """Family name of the element.""" - return "quadrature" - - @property - def lagrange_variant(self) -> typing.Union[basix.LagrangeVariant, None]: - """Basix Lagrange variant used to initialise the element.""" - return None - - @property - def dpc_variant(self) -> typing.Union[basix.DPCVariant, None]: - """Basix DPC variant used to initialise the element.""" - return None - - @property - def element_family(self) -> typing.Union[basix.ElementFamily, None]: - """Basix element family used to initialise the element.""" - return None - - @property - def cell_type(self) -> basix.CellType: - """Basix cell type used to initialise the element.""" - return basix.cell.string_to_type(self._cellname) - - @property - def discontinuous(self) -> bool: - """True if the discontinuous version of the element is used.""" - return False - - @property - def map_type(self) -> basix.MapType: - """The Basix map type.""" - return basix.MapType.identity - - @property - def polyset_type(self) -> basix.PolysetType: - """The polyset type of the element.""" - raise NotImplementedError() - - -class RealElement(basix.ufl._ElementBase): - """A real element.""" - - _family_name: str - _cellname: str - _entity_counts: typing.List[int] - - def __init__(self, element: ufl.finiteelement.FiniteElementBase): - """Initialise the element.""" - self._cellname = element.cell().cellname() - self._family_name = element.family() - tdim = element.cell().topological_dimension() - - self._entity_counts = [] - if tdim >= 1: - self._entity_counts.append(element.cell().num_vertices()) - if tdim >= 2: - self._entity_counts.append(element.cell().num_edges()) - if tdim >= 3: - self._entity_counts.append(element.cell().num_facets()) - self._entity_counts.append(1) - - super().__init__( - f"RealElement({element})", "real element", element.cell().cellname(), element.value_shape(), - element.degree()) - - def __eq__(self, other) -> bool: - """Check if two elements are equal.""" - return isinstance(other, RealElement) - - def __hash__(self) -> int: - """Return a hash.""" - return super().__hash__() - - def tabulate(self, nderivs: int, points: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: - """Tabulate the basis functions of the element. - - Args: - nderivs: Number of derivatives to tabulate. - points: Points to tabulate at - - Returns: - Tabulated basis functions - """ - out = np.zeros((nderivs + 1, len(points), 1)) - out[0, :] = 1. - return out - - def get_component_element(self, flat_component: int) -> typing.Tuple[basix.ufl._ElementBase, int, int]: - """Get element that represents a component of the element, and the offset and stride of the component. - - Args: - flat_component: The component - - Returns: - component element, offset of the component, stride of the component - - """ - assert flat_component < self.value_size - return self, 0, 1 - - @property - def ufcx_element_type(self) -> str: - """Element type.""" - return "ufcx_real_element" - - @property - def dim(self) -> int: - """Number of DOFs the element has.""" - return 0 - - @property - def num_entity_dofs(self) -> typing.List[typing.List[int]]: - """Number of DOFs associated with each entity.""" - dofs = [] - for d in self._entity_counts[:-1]: - dofs += [[0] * d] - - dofs += [[self.dim]] - return dofs - - @property - def entity_dofs(self) -> typing.List[typing.List[typing.List[int]]]: - """DOF numbers associated with each entity.""" - start_dof = 0 - entity_dofs = [] - for i in self.num_entity_dofs: - dofs_list = [] - for j in i: - dofs_list.append([start_dof + k for k in range(j)]) - start_dof += j - entity_dofs.append(dofs_list) - return entity_dofs - - @property - def num_entity_closure_dofs(self) -> typing.List[typing.List[int]]: - """Number of DOFs associated with the closure of each entity.""" - return self.num_entity_dofs - - @property - def entity_closure_dofs(self) -> typing.List[typing.List[typing.List[int]]]: - """DOF numbers associated with the closure of each entity.""" - return self.entity_dofs - - @property - def num_global_support_dofs(self) -> int: - """Get the number of global support DOFs.""" - return 1 - - @property - def reference_topology(self) -> typing.List[typing.List[typing.List[int]]]: - """Topology of the reference element.""" - raise NotImplementedError() - - @property - def reference_geometry(self) -> npt.NDArray[np.float64]: - """Geometry of the reference element.""" - raise NotImplementedError() - - @property - def family_name(self) -> str: - """Family name of the element.""" - return self._family_name - - @property - def lagrange_variant(self) -> typing.Union[basix.LagrangeVariant, None]: - """Basix Lagrange variant used to initialise the element.""" - return None - - @property - def dpc_variant(self) -> typing.Union[basix.DPCVariant, None]: - """Basix DPC variant used to initialise the element.""" - return None - - @property - def element_family(self) -> typing.Union[basix.ElementFamily, None]: - """Basix element family used to initialise the element.""" - return None - - @property - def cell_type(self) -> basix.CellType: - """Basix cell type used to initialise the element.""" - return basix.cell.string_to_type(self._cellname) - - @property - def discontinuous(self) -> bool: - """True if the discontinuous version of the element is used.""" - return False +# TODO: remove this deprecated function +def QuadratureElement( + cellname: str, value_shape: typing.Tuple[int, ...], scheme: typing.Optional[str] = None, + degree: typing.Optional[int] = None, points: typing.Optional[npt.NDArray[np.float64]] = None, + weights: typing.Optional[npt.NDArray[np.float64]] = None, mapname: str = "identity" +) -> basix.ufl._ElementBase: + warnings.warn( + "ffcx.element_interface.QuadratureElement is deprecated and will be removed after December 2023. " + "Use basix.ufl.quadrature_element instead.", DeprecationWarning) + return basix.ufl.quadrature_element( + cell=cellname, value_shape=value_shape, scheme=scheme, degree=degree, points=points, weights=weights, + mapname=mapname) - def basix_sobolev_space(self): - """Return the underlying Sobolev space.""" - return basix.sobolev_spaces.Hinf - @property - def map_type(self) -> basix.MapType: - """The Basix map type.""" - return basix.MapType.identity +# TODO: remove this deprecated function +def RealElement(element: ufl.finiteelement.FiniteElementBase) -> basix.ufl._ElementBase: + warnings.warn( + "ffcx.element_interface.RealElement is deprecated and will be removed after December 2023. " + "Use basix.ufl.real_element instead.", DeprecationWarning) + return basix.ufl.real_element(cell=element.cell().cellname(), value_shape=element.value_shape()) From 863350a8a1a720cf358ab5325e3ffd746972d917 Mon Sep 17 00:00:00 2001 From: Chris Richardson Date: Thu, 7 Sep 2023 14:41:31 +0100 Subject: [PATCH 35/44] Add file template (#606) --- ffcx/codegeneration/C/file.py | 48 ++++++++++++ ffcx/codegeneration/C/file_template.py | 45 +++++++++++ ffcx/codegeneration/codegeneration.py | 12 ++- ffcx/formatting.py | 104 +------------------------ 4 files changed, 103 insertions(+), 106 deletions(-) create mode 100644 ffcx/codegeneration/C/file.py create mode 100644 ffcx/codegeneration/C/file_template.py diff --git a/ffcx/codegeneration/C/file.py b/ffcx/codegeneration/C/file.py new file mode 100644 index 000000000..8e24157a5 --- /dev/null +++ b/ffcx/codegeneration/C/file.py @@ -0,0 +1,48 @@ +# Copyright (C) 2009-2018 Anders Logg, Martin Sandve Alnæs and Garth N. Wells +# +# This file is part of FFCx.(https://www.fenicsproject.org) +# +# SPDX-License-Identifier: LGPL-3.0-or-later +# +# Note: Most of the code in this file is a direct translation from the +# old implementation in FFC + +import logging +import pprint +import textwrap + +from ffcx.codegeneration.C import file_template +from ffcx import __version__ as FFCX_VERSION +from ffcx.codegeneration import __version__ as UFC_VERSION + + +logger = logging.getLogger("ffcx") + + +def generator(options): + """Generate UFC code for file output.""" + logger.info("Generating code for file") + + # Attributes + d = {"ffcx_version": FFCX_VERSION, "ufcx_version": UFC_VERSION} + d["options"] = textwrap.indent(pprint.pformat(options), "// ") + extra_c_includes = [] + if "_Complex" in options["scalar_type"]: + extra_c_includes += ["complex.h"] + d["extra_c_includes"] = "\n".join( + f"#include <{header}>" for header in extra_c_includes + ) + + # Format declaration code + code_pre = ( + file_template.declaration_pre.format_map(d), + file_template.implementation_pre.format_map(d), + ) + + # Format implementation code + code_post = ( + file_template.declaration_post.format_map(d), + file_template.implementation_post.format_map(d), + ) + + return code_pre, code_post diff --git a/ffcx/codegeneration/C/file_template.py b/ffcx/codegeneration/C/file_template.py new file mode 100644 index 000000000..0f6c983f4 --- /dev/null +++ b/ffcx/codegeneration/C/file_template.py @@ -0,0 +1,45 @@ +# Code generation format strings for UFC (Unified Form-assembly Code) +# This code is released into the public domain. +# +# The FEniCS Project (http://www.fenicsproject.org/) 2018. + +declaration_pre = """ +// This code conforms with the UFC specification version {ufcx_version} +// and was automatically generated by FFCx version {ffcx_version}. +// +// This code was generated with the following options: +// +{options} + +#pragma once +#include + +#ifdef __cplusplus +extern "C" {{ +#endif +""" + +declaration_post = """ +#ifdef __cplusplus +}} +#endif +""" + +implementation_pre = """ +// This code conforms with the UFC specification version {ufcx_version} +// and was automatically generated by FFCx version {ffcx_version}. +// +// This code was generated with the following options: +// +{options} + +#include +#include +#include +#include +#include +{extra_c_includes} + +""" + +implementation_post = "" diff --git a/ffcx/codegeneration/codegeneration.py b/ffcx/codegeneration/codegeneration.py index ece9f2d23..ad5c22764 100644 --- a/ffcx/codegeneration/codegeneration.py +++ b/ffcx/codegeneration/codegeneration.py @@ -20,6 +20,7 @@ generator as finite_element_generator from ffcx.codegeneration.C.form import generator as form_generator from ffcx.codegeneration.C.integrals import generator as integral_generator +from ffcx.codegeneration.C.file import generator as file_generator logger = logging.getLogger("ffcx") @@ -28,14 +29,17 @@ class CodeBlocks(typing.NamedTuple): """ Storage of code blocks of the form (declaration, implementation). - Blocks for elements, dofmaps, integrals, forms and expressions is stored + Blocks for elements, dofmaps, integrals, forms and expressions, + and start and end of file output """ + file_pre: typing.List[typing.Tuple[str, str]] elements: typing.List[typing.Tuple[str, str]] dofmaps: typing.List[typing.Tuple[str, str]] integrals: typing.List[typing.Tuple[str, str]] forms: typing.List[typing.Tuple[str, str]] expressions: typing.List[typing.Tuple[str, str]] + file_post: typing.List[typing.Tuple[str, str]] def generate_code(ir, options) -> CodeBlocks: @@ -50,5 +54,7 @@ def generate_code(ir, options) -> CodeBlocks: code_integrals = [integral_generator(integral_ir, options) for integral_ir in ir.integrals] code_forms = [form_generator(form_ir, options) for form_ir in ir.forms] code_expressions = [expression_generator(expression_ir, options) for expression_ir in ir.expressions] - return CodeBlocks(elements=code_finite_elements, dofmaps=code_dofmaps, - integrals=code_integrals, forms=code_forms, expressions=code_expressions) + code_file_pre, code_file_post = file_generator(options) + return CodeBlocks(file_pre=[code_file_pre], elements=code_finite_elements, dofmaps=code_dofmaps, + integrals=code_integrals, forms=code_forms, expressions=code_expressions, + file_post=[code_file_post]) diff --git a/ffcx/formatting.py b/ffcx/formatting.py index f9932aaae..87698160b 100644 --- a/ffcx/formatting.py +++ b/ffcx/formatting.py @@ -15,48 +15,9 @@ import logging import os -import pprint -import textwrap - -from ffcx import __version__ as FFCX_VERSION -from ffcx.codegeneration import __version__ as UFC_VERSION logger = logging.getLogger("ffcx") -FORMAT_TEMPLATE = { - "ufc comment": - """\ -// This code conforms with the UFC specification version {ufcx_version} -// and was automatically generated by FFCx version {ffcx_version}. -""", - "dolfinx comment": - """\ -// This code conforms with the UFC specification version {ufcx_version} -// and was automatically generated by FFCx version {ffcx_version}. -// -""", - "header_h": - """ -#pragma once - -""", - "header_c": - """ -""", -} - -c_extern_pre = """ -#ifdef __cplusplus -extern "C" { -#endif -""" - -c_extern_post = """ -#ifdef __cplusplus -} -#endif -""" - def format_code(code, options: dict): """Format given code in UFC format. Returns two strings with header and source file contents.""" @@ -64,34 +25,12 @@ def format_code(code, options: dict): logger.info("Compiler stage 5: Formatting code") logger.info(79 * "*") - # Generate code for comment at top of file - code_h_pre = _generate_comment(options) + "\n" - code_c_pre = _generate_comment(options) + "\n" - - # Generate code for header - code_h_pre += FORMAT_TEMPLATE["header_h"] - code_c_pre += FORMAT_TEMPLATE["header_c"] - - # Generate includes and add to preamble - includes_h, includes_c = _generate_includes(options) - code_h_pre += includes_h - code_c_pre += includes_c - - # Enclose header with 'extern "C"' - code_h_pre += c_extern_pre - code_h_post = c_extern_post - - code_h = "" code_c = "" - + code_h = "" for parts_code in code: code_h += "".join([c[0] for c in parts_code]) code_c += "".join([c[1] for c in parts_code]) - # Add headers to body - code_h = code_h_pre + code_h + code_h_post - code_c = code_c_pre + code_c - return code_h, code_c @@ -105,44 +44,3 @@ def _write_file(output, prefix, postfix, output_dir): filename = os.path.join(output_dir, prefix + postfix) with open(filename, "w") as hfile: hfile.write(output) - - -def _generate_comment(options): - """Generate code for comment on top of file.""" - # Generate top level comment - comment = FORMAT_TEMPLATE["ufc comment"].format(ffcx_version=FFCX_VERSION, ufcx_version=UFC_VERSION) - - # Add option information - comment += "//\n" - comment += "// This code was generated with the following options:\n" - comment += "//\n" - comment += textwrap.indent(pprint.pformat(options), "// ") - comment += "\n" - - return comment - - -def _generate_includes(options: dict): - - default_h_includes = [ - "#include ", - ] - - default_c_includes = [ - "#include ", # This should really be set by the backend - "#include ", # This should really be set by the backend - "#include ", # This should really be set by the backend - "#include ", # This should really be set by the backend - "#include " - ] - - if "_Complex" in options["scalar_type"]: - default_c_includes += ["#include "] - - s_h = set(default_h_includes) - s_c = set(default_c_includes) - - includes_h = "\n".join(sorted(s_h)) + "\n" if s_h else "" - includes_c = "\n".join(sorted(s_c)) + "\n" if s_c else "" - - return includes_h, includes_c From 019bdc7439b6817af2990c146fe7be12df66798f Mon Sep 17 00:00:00 2001 From: Chris Richardson Date: Fri, 8 Sep 2023 11:26:02 +0100 Subject: [PATCH 36/44] Remove precision option (#609) * Remove precision option * Put in 16sf precision --- ffcx/analysis.py | 25 ++++------------------- ffcx/codegeneration/C/c_implementation.py | 17 +++++---------- ffcx/codegeneration/C/integrals.py | 2 +- ffcx/ir/representation.py | 2 -- 4 files changed, 10 insertions(+), 36 deletions(-) diff --git a/ffcx/analysis.py b/ffcx/analysis.py index dd9cf7aa5..9419c90a5 100644 --- a/ffcx/analysis.py +++ b/ffcx/analysis.py @@ -181,8 +181,8 @@ def _analyze_form(form: ufl.form.Form, options: typing.Dict) -> ufl.algorithms.f assert np.allclose(e._points, custom_q[0]) assert np.allclose(e._weights, custom_q[1]) - # Determine unique quadrature degree, quadrature scheme and - # precision per each integral data + # Determine unique quadrature degree and quadrature scheme + # per each integral data for id, integral_data in enumerate(form_data.integral_data): # Iterate through groups of integral data. There is one integral # data for all integrals with same domain, itype, subdomain_id @@ -192,22 +192,6 @@ def _analyze_form(form: ufl.form.Form, options: typing.Dict) -> ufl.algorithms.f # all integrals in this integral data group, i.e. must be the # same for for the same (domain, itype, subdomain_id) - # Extract precision - p_default = -1 - precisions = set([integral.metadata().get("precision", p_default) - for integral in integral_data.integrals]) - precisions.discard(p_default) - - if len(precisions) == 1: - p = precisions.pop() - elif len(precisions) == 0: - # Default precision - p = None - else: - raise RuntimeError("Only one precision allowed within integrals grouped by subdomain.") - - integral_data.metadata["precision"] = p - qd_default = -1 qr_default = "default" @@ -228,12 +212,11 @@ def _analyze_form(form: ufl.form.Form, options: typing.Dict) -> ufl.algorithms.f logger.info(f"Integral {i}, integral group {id}:") logger.info(f"--- quadrature rule: {qr}") logger.info(f"--- quadrature degree: {qd}") - logger.info(f"--- precision: {p}") - metadata.update({"quadrature_degree": qd, "quadrature_rule": qr, "precision": p}) + metadata.update({"quadrature_degree": qd, "quadrature_rule": qr}) else: metadata.update({"quadrature_points": custom_q[0], "quadrature_weights": custom_q[1], - "quadrature_rule": "custom", "precision": p}) + "quadrature_rule": "custom"}) integral_data.integrals[i] = integral.reconstruct(metadata=metadata) diff --git a/ffcx/codegeneration/C/c_implementation.py b/ffcx/codegeneration/C/c_implementation.py index 27b64a024..0e265b8d8 100644 --- a/ffcx/codegeneration/C/c_implementation.py +++ b/ffcx/codegeneration/C/c_implementation.py @@ -6,8 +6,7 @@ import warnings import ffcx.codegeneration.lnodes as L -from ffcx.codegeneration.utils import scalar_to_value_type, cdtype_to_numpy -import numpy as np +from ffcx.codegeneration.utils import scalar_to_value_type math_table = { "double": { @@ -139,22 +138,16 @@ class CFormatter(object): - def __init__(self, scalar, precision=None) -> None: + def __init__(self, scalar) -> None: self.scalar_type = scalar self.real_type = scalar_to_value_type(scalar) - if precision is None: - np_type = cdtype_to_numpy(self.real_type) - self.precision = np.finfo(np_type).precision + 1 - else: - assert isinstance(precision, int) - self.precision = precision def _format_number(self, x): - p = self.precision + # Use 16sf for precision (good for float64 or less) if isinstance(x, complex): - return f"({x.real:.{p}}+I*{x.imag:.{p}})" + return f"({x.real:.16}+I*{x.imag:.16})" elif isinstance(x, float): - return f"{x:.{p}}" + return f"{x:.16}" return str(x) def _build_initializer_lists(self, values): diff --git a/ffcx/codegeneration/C/integrals.py b/ffcx/codegeneration/C/integrals.py index 5cf70d487..4536423c6 100644 --- a/ffcx/codegeneration/C/integrals.py +++ b/ffcx/codegeneration/C/integrals.py @@ -36,7 +36,7 @@ def generator(ir, options): parts = ig.generate() # Format code as string - CF = CFormatter(options["scalar_type"], ir.precision) + CF = CFormatter(options["scalar_type"]) body = CF.c_format(parts) # Generate generic FFCx code snippets and add specific parts diff --git a/ffcx/ir/representation.py b/ffcx/ir/representation.py index 5513bdc3e..fd8158047 100644 --- a/ffcx/ir/representation.py +++ b/ffcx/ir/representation.py @@ -135,7 +135,6 @@ class IntegralIR(typing.NamedTuple): unique_table_types: typing.Dict[str, str] integrand: typing.Dict[QuadratureRule, dict] name: str - precision: int needs_facet_permutations: bool coordinate_element: str @@ -486,7 +485,6 @@ def _compute_integral_ir(form_data, form_index, element_numbers, integral_names, _offset += np.prod(constant.ufl_shape, dtype=int) ir["original_constant_offsets"] = original_constant_offsets - ir["precision"] = itg_data.metadata["precision"] # Create map from number of quadrature points -> integrand integrands = {rule: integral.integrand() for rule, integral in sorted_integrals.items()} From de8487984786151bdf1146facccb04f10d641ad6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B8rgen=20Schartum=20Dokken?= Date: Tue, 12 Sep 2023 15:40:53 +0200 Subject: [PATCH 37/44] Remove generated kernel duplication (#589) * Try repeating fix from prior to https://github.com/FEniCS/ffcx/pull/587 * Flake8 * Update CI * Fix integral count * Add test that fails on main * Fix flake8 * Fix order of offsets. This behavior is very implicit. Introduced in: https://github.com/FEniCS/dolfinx/pull/2744/ * Flatten intermediate representation of integral_names and subdomain_ids following reviewer comments. * Flake8 * Zip strict is only in python >=3.10 * Update .github/workflows/dolfin-tests.yml * Update .github/workflows/pythonapp.yml --------- Co-authored-by: Matthew Scroggs --- ffcx/codegeneration/C/form.py | 18 +++++++++++++--- ffcx/ir/representation.py | 39 +++++++++++++---------------------- test/test_jit_forms.py | 26 +++++++++++++++++++++++ 3 files changed, 55 insertions(+), 28 deletions(-) diff --git a/ffcx/codegeneration/C/form.py b/ffcx/codegeneration/C/form.py index 275fe0d8a..30c72091a 100644 --- a/ffcx/codegeneration/C/form.py +++ b/ffcx/codegeneration/C/form.py @@ -3,12 +3,16 @@ # This file is part of FFCx.(https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later - +# +# Modified by Chris Richardson and Jørgen S. Dokken 2023 +# # Note: Most of the code in this file is a direct translation from the # old implementation in FFC import logging +import numpy + from ffcx.codegeneration.C import form_template logger = logging.getLogger("ffcx") @@ -81,8 +85,16 @@ def generator(ir, options): integral_offsets = [0] # Note: the order of this list is defined by the enum ufcx_integral_type in ufcx.h for itg_type in ("cell", "exterior_facet", "interior_facet"): - integrals += [f"&{itg}" for itg in ir.integral_names[itg_type]] - integral_ids += ir.subdomain_ids[itg_type] + unsorted_integrals = [] + unsorted_ids = [] + for name, id in zip(ir.integral_names[itg_type], ir.subdomain_ids[itg_type]): + unsorted_integrals += [f"&{name}"] + unsorted_ids += [id] + + id_sort = numpy.argsort(unsorted_ids) + integrals += [unsorted_integrals[i] for i in id_sort] + integral_ids += [unsorted_ids[i] for i in id_sort] + integral_offsets.append(len(integrals)) if len(integrals) > 0: diff --git a/ffcx/ir/representation.py b/ffcx/ir/representation.py index fd8158047..fc425391e 100644 --- a/ffcx/ir/representation.py +++ b/ffcx/ir/representation.py @@ -18,7 +18,6 @@ import itertools import logging -import numbers import typing import warnings @@ -560,34 +559,24 @@ def _compute_form_ir(form_data, form_id, prefix, form_names, integral_names, ele ir["name_from_uflfile"] = f"form_{prefix}_{form_name}" # Store names of integrals and subdomain_ids for this form, grouped - # by integral types Since form points to all integrals it contains, + # by integral types since form points to all integrals it contains, # it has to know their names for codegen phase ir["integral_names"] = {} ir["subdomain_ids"] = {} ufcx_integral_types = ("cell", "exterior_facet", "interior_facet") - for integral_type in ufcx_integral_types: - ir["subdomain_ids"][integral_type] = [] - ir["integral_names"][integral_type] = [] - - for itg_index, itg_data in enumerate(form_data.integral_data): - if (itg_data.integral_type == integral_type): - if itg_data.subdomain_id == "otherwise": - # UFL is using "otherwise" for default integrals - # (over whole mesh) but FFCx needs integers, so - # otherwise = -1 - if len(ir["subdomain_ids"][integral_type]) > 0 and ir["subdomain_ids"][integral_type][0] == -1: - raise ValueError("Only one default ('otherwise') integral allowed.") - - # Put default integral as first - ir["subdomain_ids"][integral_type] = [-1] + ir["subdomain_ids"][integral_type] - ir["integral_names"][integral_type] = [ - integral_names[(form_id, itg_index)]] + ir["integral_names"][integral_type] - elif itg_data.subdomain_id < 0: - raise ValueError("Integral subdomain ID must be non-negative.") - else: - assert isinstance(itg_data.subdomain_id, numbers.Integral) - ir["subdomain_ids"][integral_type] += [itg_data.subdomain_id] - ir["integral_names"][integral_type] += [integral_names[(form_id, itg_index)]] + ir["subdomain_ids"] = {itg_type: [] for itg_type in ufcx_integral_types} + ir["integral_names"] = {itg_type: [] for itg_type in ufcx_integral_types} + for itg_index, itg_data in enumerate(form_data.integral_data): + # UFL is using "otherwise" for default integrals (over whole mesh) + # but FFCx needs integers, so otherwise = -1 + integral_type = itg_data.integral_type + subdomain_ids = [sid if sid != "otherwise" else -1 for sid in itg_data.subdomain_id] + + if min(subdomain_ids) < -1: + raise ValueError("Integral subdomain IDs must be non-negative.") + ir["subdomain_ids"][integral_type] += subdomain_ids + for _ in range(len(subdomain_ids)): + ir["integral_names"][integral_type] += [integral_names[(form_id, itg_index)]] return FormIR(**ir) diff --git a/test/test_jit_forms.py b/test/test_jit_forms.py index 7ae89ec7c..5ace4f7a7 100644 --- a/test/test_jit_forms.py +++ b/test/test_jit_forms.py @@ -879,3 +879,29 @@ def test_manifold_derivatives(compile_args): ffi.cast('uint8_t *', perm.ctypes.data)) assert np.isclose(J[0], 0.0) + + +def test_integral_grouping(compile_args): + """ + We group integrals with common integrands to avoid duplicated integration kernels. + This means that `inner(u, v)*dx((1,2,3)) + inner(grad(u), grad(v))*dx(2) + inner(u,v)*dx` + is grouped as + 1. `inner(u,v)*dx(("everywhere", 1, 3))` + 2. `(inner(grad(u), grad(v)) + inner(u, v))*dx(2)` + Each of the forms has one generated `tabulate_tensor_*` function, which is referred to multiple times in + `integrals_` and `integral_ids_` + """ + mesh = ufl.Mesh(ufl.VectorElement("Lagrange", ufl.triangle, 1)) + V = ufl.FunctionSpace(mesh, ufl.FiniteElement("Lagrange", ufl.triangle, 1)) + u = ufl.TrialFunction(V) + v = ufl.TestFunction(V) + a = ufl.inner(u, v) * ufl.dx((1, 2, 3)) + ufl.inner(ufl.grad(u), ufl.grad(v)) * ufl.dx(2) + ufl.inner(u, v) * ufl.dx + compiled_forms, module, _ = ffcx.codegeneration.jit.compile_forms( + [a], cffi_extra_compile_args=compile_args) + # NOTE: This assumes that the first integral type is cell integrals, see UFCx.h + cell = module.lib.cell + num_integrals = compiled_forms[0].form_integral_offsets[cell + 1] - compiled_forms[0].form_integral_offsets[cell] + assert num_integrals == 4 + unique_integrals = set([compiled_forms[0].form_integrals[compiled_forms[0].form_integral_offsets[cell] + i] + for i in range(num_integrals)]) + assert len(unique_integrals) == 2 From 3425b7feb19e52a5cf1823b62bc2cbf57ce411cd Mon Sep 17 00:00:00 2001 From: Chris Richardson Date: Tue, 12 Sep 2023 18:31:37 +0100 Subject: [PATCH 38/44] Add `MultiIndex` to LNodes and remove `FlattenedArray` (#610) * Updates to MultiIndex * Add a simple formatting rule in C * Replace FlattenedArray * Minor fixes * minor fix to pass the tests * hopefully fix some tests * update quadrature rule * Fix MultiIndex when zero size * Minor tweak * Some fixes * Use MultiIndex in expression * Remove FlattenedArray * Minor edit --------- Co-authored-by: IgorBaratta --- ffcx/codegeneration/C/c_implementation.py | 30 ++-- ffcx/codegeneration/expression_generator.py | 13 +- ffcx/codegeneration/integral_generator.py | 9 +- ffcx/codegeneration/lnodes.py | 184 +++++++++++--------- ffcx/codegeneration/symbols.py | 2 +- 5 files changed, 130 insertions(+), 108 deletions(-) diff --git a/ffcx/codegeneration/C/c_implementation.py b/ffcx/codegeneration/C/c_implementation.py index 0e265b8d8..c95641517 100644 --- a/ffcx/codegeneration/C/c_implementation.py +++ b/ffcx/codegeneration/C/c_implementation.py @@ -142,6 +142,15 @@ def __init__(self, scalar) -> None: self.scalar_type = scalar self.real_type = scalar_to_value_type(scalar) + def _dtype_to_name(self, dtype): + if dtype == L.DataType.SCALAR: + return self.scalar_type + if dtype == L.DataType.REAL: + return self.real_type + if dtype == L.DataType.INT: + return "int" + raise ValueError(f"Invalid dtype: {dtype}") + def _format_number(self, x): # Use 16sf for precision (good for float64 or less) if isinstance(x, complex): @@ -167,16 +176,7 @@ def format_comment(self, c) -> str: def format_array_decl(self, arr) -> str: dtype = arr.symbol.dtype - assert dtype is not None - - if dtype == L.DataType.SCALAR: - typename = self.scalar_type - elif dtype == L.DataType.REAL: - typename = self.real_type - elif dtype == L.DataType.INT: - typename = "int" - else: - raise ValueError(f"Invalid dtype: {dtype}") + typename = self._dtype_to_name(dtype) symbol = self.c_format(arr.symbol) dims = "".join([f"[{i}]" for i in arr.sizes]) @@ -196,11 +196,7 @@ def format_array_access(self, arr) -> str: def format_variable_decl(self, v) -> str: val = self.c_format(v.value) symbol = self.c_format(v.symbol) - assert v.symbol.dtype - if v.symbol.dtype == L.DataType.SCALAR: - typename = self.scalar_type - elif v.symbol.dtype == L.DataType.REAL: - typename = self.real_type + typename = self._dtype_to_name(v.symbol.dtype) return f"{typename} {symbol} = {val};\n" def format_nary_op(self, oper) -> str: @@ -283,6 +279,9 @@ def format_conditional(self, s) -> str: def format_symbol(self, s) -> str: return f"{s.name}" + def format_multi_index(self, mi) -> str: + return self.c_format(mi.global_index) + def format_math_function(self, c) -> str: # Get a table of functions for this type, if available arg_type = self.scalar_type @@ -304,6 +303,7 @@ def format_math_function(self, c) -> str: "Comment": format_comment, "ArrayDecl": format_array_decl, "ArrayAccess": format_array_access, + "MultiIndex": format_multi_index, "VariableDecl": format_variable_decl, "ForRange": format_for_range, "Statement": format_statement, diff --git a/ffcx/codegeneration/expression_generator.py b/ffcx/codegeneration/expression_generator.py index c0a81a36e..f00b36a8c 100644 --- a/ffcx/codegeneration/expression_generator.py +++ b/ffcx/codegeneration/expression_generator.py @@ -191,10 +191,8 @@ def generate_block_parts(self, blockmap, blockdata): components = ufl.product(self.ir.expression_shape) num_points = self.quadrature_rule.points.shape[0] - A_shape = self.ir.tensor_shape - Asym = self.backend.symbols.element_tensor() - A = L.FlattenedArray(Asym, dims=[num_points, components] + A_shape) - + A_shape = [num_points, components] + self.ir.tensor_shape + A = self.backend.symbols.element_tensor() iq = self.backend.symbols.quadrature_loop_index() # Check if DOFs in dofrange are equally spaced. @@ -218,7 +216,8 @@ def generate_block_parts(self, blockmap, blockdata): f = self.get_var(F.nodes[fi_ci[0]]["expression"]) arg_factors = self.get_arg_factors(blockdata, block_rank, B_indices) Brhs = L.float_product([f] + arg_factors) - quadparts.append(L.AssignAdd(A[(A_indices[0], fi_ci[1]) + A_indices[1:]], Brhs)) + multi_index = L.MultiIndex([A_indices[0], fi_ci[1]] + A_indices[1:], A_shape) + quadparts.append(L.AssignAdd(A[multi_index], Brhs)) else: # Prepend dimensions of dofmap block with free index @@ -250,7 +249,9 @@ def generate_block_parts(self, blockmap, blockdata): for fi_ci in blockdata.factor_indices_comp_indices: f = self.get_var(F.nodes[fi_ci[0]]["expression"]) Brhs = L.float_product([f] + arg_factors) - body.append(L.AssignAdd(A[(A_indices[0], fi_ci[1]) + A_indices[1:]], Brhs)) + indices = [A_indices[0], fi_ci[1]] + list(A_indices[1:]) + multi_index = L.MultiIndex(indices, A_shape) + body.append(L.AssignAdd(A[multi_index], Brhs)) for i in reversed(range(block_rank)): body = L.ForRange( diff --git a/ffcx/codegeneration/integral_generator.py b/ffcx/codegeneration/integral_generator.py index 2713f92ad..25d1d23c8 100644 --- a/ffcx/codegeneration/integral_generator.py +++ b/ffcx/codegeneration/integral_generator.py @@ -492,10 +492,6 @@ def generate_block_parts(self, quadrature_rule: QuadratureRule, blockmap: Tuple, quadparts.append(L.VariableDecl(fw, fw_rhs)) assert not blockdata.transposed, "Not handled yet" - A_shape = self.ir.tensor_shape - - Asym = self.backend.symbols.element_tensor() - A = L.FlattenedArray(Asym, dims=A_shape) # Fetch code to access modified arguments arg_factors = self.get_arg_factors(blockdata, block_rank, quadrature_rule, iq, B_indices) @@ -563,8 +559,11 @@ def generate_block_parts(self, quadrature_rule: QuadratureRule, blockmap: Tuple, body: List[LNode] = [] + A = self.backend.symbols.element_tensor() + A_shape = self.ir.tensor_shape for indices in keep: - body.append(L.AssignAdd(A[indices], L.Sum(keep[indices]))) + multi_index = L.MultiIndex(list(indices), A_shape) + body.append(L.AssignAdd(A[multi_index], L.Sum(keep[indices]))) for i in reversed(range(block_rank)): body = [L.ForRange(B_indices[i], 0, blockdims[i], body=body)] diff --git a/ffcx/codegeneration/lnodes.py b/ffcx/codegeneration/lnodes.py index af6285ae9..317dc8690 100644 --- a/ffcx/codegeneration/lnodes.py +++ b/ffcx/codegeneration/lnodes.py @@ -101,10 +101,13 @@ class DataType(Enum): REAL = 0 SCALAR = 1 INT = 2 + NONE = 3 def merge_dtypes(dtype0, dtype1): # Promote dtype to SCALAR or REAL if either argument matches + if DataType.NONE in (dtype0, dtype1): + raise ValueError(f"Invalid DataType in LNodes {dtype0, dtype1}") if DataType.SCALAR in (dtype0, dtype1): return DataType.SCALAR elif DataType.REAL in (dtype0, dtype1): @@ -132,6 +135,8 @@ class LExpr(LNode): All subtypes should define a 'precedence' class attribute. """ + dtype = DataType.NONE + def __getitem__(self, indices): return ArrayAccess(self, indices) @@ -304,7 +309,7 @@ class Symbol(LExprTerminal): precedence = PRECEDENCE.SYMBOL - def __init__(self, name, dtype=None): + def __init__(self, name: str, dtype): assert isinstance(name, str) self.name = name self.dtype = dtype @@ -319,6 +324,67 @@ def __repr__(self): return self.name +class MultiIndex(LExpr): + """A multi-index for accessing tensors flattened in memory.""" + + def __init__(self, symbols: list, sizes: list): + self.dtype = DataType.INT + self.sizes = sizes + self.symbols = [as_lexpr(sym) for sym in symbols] + for sym in self.symbols: + assert sym.dtype == DataType.INT + + dim = len(sizes) + if dim == 0: + self.global_index: LExpr = LiteralInt(0) + else: + stride = [np.prod(sizes[i:]) for i in range(dim)] + [LiteralInt(1)] + self.global_index = Sum(n * sym for n, sym in zip(stride[1:], symbols)) + + def size(self): + return np.prod(self.sizes) + + def local_index(self, idx): + assert idx < len(self.symbols) + return self.symbols[idx] + + def intersection(self, other): + symbols = [] + sizes = [] + for (sym, size) in zip(self.symbols, self.sizes): + if sym in other.symbols: + i = other.symbols.index(sym) + assert other.sizes[i] == size + symbols.append(sym) + sizes.append(size) + return MultiIndex(symbols, sizes) + + def union(self, other): + # NB result may depend on order a.union(b) != b.union(a) + symbols = self.symbols.copy() + sizes = self.sizes.copy() + for (sym, size) in zip(other.symbols, other.sizes): + if sym in symbols: + i = symbols.index(sym) + assert sizes[i] == size + else: + symbols.append(sym) + sizes.append(size) + return MultiIndex(symbols, sizes) + + def difference(self, other): + symbols = [] + sizes = [] + for (idx, size) in zip(self.symbols, self.sizes): + if idx not in other.symbols: + symbols.append(idx) + sizes.append(size) + return MultiIndex(symbols, sizes) + + def __hash__(self): + return hash(self.global_idx) + + class PrefixUnaryOp(LExprOperator): """Base class for unary operators.""" @@ -345,7 +411,7 @@ def __hash__(self): return hash(self.lhs) + hash(self.rhs) def __repr__(self): - return str(self.lhs) + str(self.op) + str(self.rhs) + return f"({self.lhs} {self.op} {self.rhs})" class ArithmeticBinOp(BinOp): @@ -358,6 +424,8 @@ def __init__(self, lhs, rhs): class NaryOp(LExprOperator): """Base class for special n-ary operators.""" + op = "" + def __init__(self, args): self.args = [as_lexpr(arg) for arg in args] @@ -368,6 +436,9 @@ def __eq__(self, other): and all(a == b for a, b in zip(self.args, other.args)) ) + def __repr__(self) -> str: + return f"{self.op} ".join(f"{i} " for i in self.args) + class Neg(PrefixUnaryOp): precedence = PRECEDENCE.NEG @@ -511,55 +582,6 @@ class AssignDiv(AssignOp): op = "/=" -class FlattenedArray(object): - """Syntax carrying object only, will get translated on __getitem__ to ArrayAccess.""" - - def __init__(self, array, dims=None): - assert dims is not None - assert isinstance(array, Symbol) - self.array = array - - # Allow expressions or literals as strides or dims and offset - assert isinstance(dims, (list, tuple)) - dims = tuple(as_lexpr(i) for i in dims) - self.dims = dims - n = len(dims) - literal_one = LiteralInt(1) - strides = [literal_one] * n - for i in range(n - 2, -1, -1): - s = strides[i + 1] - d = dims[i + 1] - if d == literal_one: - strides[i] = s - elif s == literal_one: - strides[i] = d - else: - strides[i] = d * s - - self.strides = strides - - def __getitem__(self, indices): - if not isinstance(indices, (list, tuple)): - indices = (indices,) - n = len(indices) - if n == 0: - # Handle scalar case, allowing dims=() and indices=() for A[0] - if len(self.strides) != 0: - raise ValueError("Empty indices for nonscalar array.") - flat = LiteralInt(0) - else: - i, s = (indices[0], self.strides[0]) - literal_one = LiteralInt(1) - flat = i if s == literal_one else s * i - for i, s in zip(indices[1:n], self.strides[1:n]): - flat = flat + s * i - # Delay applying ArrayAccess until we have all indices - if n == len(self.strides): - return ArrayAccess(self.array, flat) - else: - return FlattenedArray(self.array, strides=self.strides[n:], offset=flat) - - class ArrayAccess(LExprOperator): precedence = PRECEDENCE.SUBSCRIPT @@ -662,6 +684,36 @@ def __eq__(self, other): return isinstance(other, type(self)) and self.expr == other.expr +def as_statement(node): + """Perform type checking on node and wrap in a suitable statement type if necessary.""" + if isinstance(node, StatementList) and len(node.statements) == 1: + # Cleans up the expression tree a bit + return node.statements[0] + elif isinstance(node, Statement): + # No-op + return node + elif isinstance(node, LExprOperator): + if node.sideeffect: + # Special case for using assignment expressions as statements + return Statement(node) + else: + raise RuntimeError( + "Trying to create a statement of lexprOperator type %s:\n%s" + % (type(node), str(node)) + ) + elif isinstance(node, list): + # Convenience case for list of statements + if len(node) == 1: + # Cleans up the expression tree a bit + return as_statement(node[0]) + else: + return StatementList(node) + else: + raise RuntimeError( + "Unexpected Statement type %s:\n%s" % (type(node), str(node)) + ) + + class StatementList(LNode): """A simple sequence of statements. No new scopes are introduced.""" @@ -800,36 +852,6 @@ def __eq__(self, other): ) -def as_statement(node): - """Perform type checking on node and wrap in a suitable statement type if necessary.""" - if isinstance(node, StatementList) and len(node.statements) == 1: - # Cleans up the expression tree a bit - return node.statements[0] - elif isinstance(node, Statement): - # No-op - return node - elif isinstance(node, LExprOperator): - if node.sideeffect: - # Special case for using assignment expressions as statements - return Statement(node) - else: - raise RuntimeError( - "Trying to create a statement of lexprOperator type %s:\n%s" - % (type(node), str(node)) - ) - elif isinstance(node, list): - # Convenience case for list of statements - if len(node) == 1: - # Cleans up the expression tree a bit - return as_statement(node[0]) - else: - return StatementList(node) - else: - raise RuntimeError( - "Unexpected CStatement type %s:\n%s" % (type(node), str(node)) - ) - - def _math_function(op, *args): name = op._ufl_handler_name_ dtype = args[0].dtype diff --git a/ffcx/codegeneration/symbols.py b/ffcx/codegeneration/symbols.py index 41d6dd20b..51e4aaad1 100644 --- a/ffcx/codegeneration/symbols.py +++ b/ffcx/codegeneration/symbols.py @@ -70,7 +70,7 @@ def __init__(self, coefficient_numbering, coefficient_offsets, def element_tensor(self): """Symbol for the element tensor itself.""" - return L.Symbol("A") + return L.Symbol("A", dtype=L.DataType.SCALAR) def entity(self, entitytype, restriction): """Entity index for lookup in element tables.""" From d4c9d6c8039930441d477f0aeeb126c9fc023b0e Mon Sep 17 00:00:00 2001 From: Matthew Scroggs Date: Wed, 20 Sep 2023 08:17:28 +0100 Subject: [PATCH 39/44] update tests to pass function spaces and not elements into ufl (#612) --- test/test_add_mode.py | 8 ++++-- test/test_cache.py | 4 ++- test/test_jit_forms.py | 58 ++++++++++++++++++++++++++++++------------ 3 files changed, 51 insertions(+), 19 deletions(-) diff --git a/test/test_add_mode.py b/test/test_add_mode.py index 5582d261c..4d6f5f127 100644 --- a/test/test_add_mode.py +++ b/test/test_add_mode.py @@ -23,7 +23,9 @@ ]) def test_additive_facet_integral(mode, compile_args): element = basix.ufl.element("Lagrange", "triangle", 1) - u, v = ufl.TrialFunction(element), ufl.TestFunction(element) + domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, rank=1)) + space = ufl.FunctionSpace(domain, element) + u, v = ufl.TrialFunction(space), ufl.TestFunction(space) a = ufl.inner(u, v) * ufl.ds forms = [a] compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms( @@ -73,7 +75,9 @@ def test_additive_facet_integral(mode, compile_args): @pytest.mark.parametrize("mode", ["double", "float", "long double", "double _Complex", "float _Complex"]) def test_additive_cell_integral(mode, compile_args): element = basix.ufl.element("Lagrange", "triangle", 1) - u, v = ufl.TrialFunction(element), ufl.TestFunction(element) + domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, rank=1)) + space = ufl.FunctionSpace(domain, element) + u, v = ufl.TrialFunction(space), ufl.TestFunction(space) a = ufl.inner(ufl.grad(u), ufl.grad(v)) * ufl.dx forms = [a] compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms( diff --git a/test/test_cache.py b/test/test_cache.py index beb21b34f..85edf241b 100644 --- a/test/test_cache.py +++ b/test/test_cache.py @@ -13,7 +13,9 @@ def test_cache_modes(compile_args): element = basix.ufl.element("Lagrange", "triangle", 1) - u, v = ufl.TrialFunction(element), ufl.TestFunction(element) + domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, rank=1)) + space = ufl.FunctionSpace(domain, element) + u, v = ufl.TrialFunction(space), ufl.TestFunction(space) a = ufl.inner(ufl.grad(u), ufl.grad(v)) * ufl.dx forms = [a] diff --git a/test/test_jit_forms.py b/test/test_jit_forms.py index 5ace4f7a7..cc3ca6990 100644 --- a/test/test_jit_forms.py +++ b/test/test_jit_forms.py @@ -25,8 +25,10 @@ ]) def test_laplace_bilinear_form_2d(mode, expected_result, compile_args): element = basix.ufl.element("Lagrange", "triangle", 1) - kappa = ufl.Constant(ufl.triangle, shape=(2, 2)) - u, v = ufl.TrialFunction(element), ufl.TestFunction(element) + domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, rank=1)) + space = ufl.FunctionSpace(domain, element) + kappa = ufl.Constant(domain, shape=(2, 2)) + u, v = ufl.TrialFunction(space), ufl.TestFunction(space) a = ufl.tr(kappa) * ufl.inner(ufl.grad(u), ufl.grad(v)) * ufl.dx forms = [a] @@ -99,7 +101,9 @@ def test_laplace_bilinear_form_2d(mode, expected_result, compile_args): ]) def test_mass_bilinear_form_2d(mode, expected_result, compile_args): element = basix.ufl.element("Lagrange", "triangle", 1) - u, v = ufl.TrialFunction(element), ufl.TestFunction(element) + domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, rank=1)) + space = ufl.FunctionSpace(domain, element) + u, v = ufl.TrialFunction(space), ufl.TestFunction(space) a = ufl.inner(u, v) * ufl.dx L = ufl.conj(v) * ufl.dx forms = [a, L] @@ -151,7 +155,9 @@ def test_mass_bilinear_form_2d(mode, expected_result, compile_args): ]) def test_helmholtz_form_2d(mode, expected_result, compile_args): element = basix.ufl.element("Lagrange", "triangle", 1) - u, v = ufl.TrialFunction(element), ufl.TestFunction(element) + domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, rank=1)) + space = ufl.FunctionSpace(domain, element) + u, v = ufl.TrialFunction(space), ufl.TestFunction(space) if mode == "double": k = 1.0 elif mode == "double _Complex": @@ -206,7 +212,9 @@ def test_helmholtz_form_2d(mode, expected_result, compile_args): ]) def test_laplace_bilinear_form_3d(mode, expected_result, compile_args): element = basix.ufl.element("Lagrange", "tetrahedron", 1) - u, v = ufl.TrialFunction(element), ufl.TestFunction(element) + domain = ufl.Mesh(basix.ufl.element("Lagrange", "tetrahedron", 1, rank=1)) + space = ufl.FunctionSpace(domain, element) + u, v = ufl.TrialFunction(space), ufl.TestFunction(space) a = ufl.inner(ufl.grad(u), ufl.grad(v)) * ufl.dx forms = [a] compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms( @@ -242,8 +250,10 @@ def test_laplace_bilinear_form_3d(mode, expected_result, compile_args): def test_form_coefficient(compile_args): element = basix.ufl.element("Lagrange", "triangle", 1) - u, v = ufl.TestFunction(element), ufl.TrialFunction(element) - g = ufl.Coefficient(element) + domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, rank=1)) + space = ufl.FunctionSpace(domain, element) + u, v = ufl.TestFunction(space), ufl.TrialFunction(space) + g = ufl.Coefficient(space) a = g * ufl.inner(u, v) * ufl.dx forms = [a] compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms(forms, cffi_extra_compile_args=compile_args) @@ -277,7 +287,9 @@ def test_form_coefficient(compile_args): def test_subdomains(compile_args): element = basix.ufl.element("Lagrange", "triangle", 1) - u, v = ufl.TrialFunction(element), ufl.TestFunction(element) + domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, rank=1)) + space = ufl.FunctionSpace(domain, element) + u, v = ufl.TrialFunction(space), ufl.TestFunction(space) a0 = ufl.inner(u, v) * ufl.dx + ufl.inner(u, v) * ufl.dx(2) a1 = ufl.inner(u, v) * ufl.dx(2) + ufl.inner(u, v) * ufl.dx a2 = ufl.inner(u, v) * ufl.dx(2) + ufl.inner(u, v) * ufl.dx(1) @@ -316,7 +328,9 @@ def test_subdomains(compile_args): @pytest.mark.parametrize("mode", ["double", "double _Complex"]) def test_interior_facet_integral(mode, compile_args): element = basix.ufl.element("Lagrange", "triangle", 1) - u, v = ufl.TrialFunction(element), ufl.TestFunction(element) + domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, rank=1)) + space = ufl.FunctionSpace(domain, element) + u, v = ufl.TrialFunction(space), ufl.TestFunction(space) a0 = ufl.inner(ufl.jump(ufl.grad(u)), ufl.jump(ufl.grad(v))) * ufl.dS forms = [a0] compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms( @@ -361,8 +375,10 @@ def test_interior_facet_integral(mode, compile_args): @pytest.mark.parametrize("mode", ["double", "double _Complex"]) def test_conditional(mode, compile_args): element = basix.ufl.element("Lagrange", "triangle", 1) - u, v = ufl.TrialFunction(element), ufl.TestFunction(element) - x = ufl.SpatialCoordinate(ufl.triangle) + domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, rank=1)) + space = ufl.FunctionSpace(domain, element) + u, v = ufl.TrialFunction(space), ufl.TestFunction(space) + x = ufl.SpatialCoordinate(domain) condition = ufl.Or(ufl.ge(ufl.real(x[0] + x[1]), 0.1), ufl.ge(ufl.real(x[1] + x[1]**2), 0.1)) c1 = ufl.conditional(condition, 2.0, 1.0) @@ -456,7 +472,9 @@ def test_custom_quadrature(compile_args): def test_curl_curl(compile_args): V = basix.ufl.element("N1curl", "triangle", 2) - u, v = ufl.TrialFunction(V), ufl.TestFunction(V) + domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, rank=1)) + space = ufl.FunctionSpace(domain, V) + u, v = ufl.TrialFunction(space), ufl.TestFunction(space) a = ufl.inner(ufl.curl(u), ufl.curl(v)) * ufl.dx forms = [a] @@ -509,7 +527,9 @@ def lagrange_triangle_symbolic(order, corners=[(1, 0), (2, 0), (0, 1)], fun=lamb def test_lagrange_triangle(compile_args, order, mode, sym_fun, ufl_fun): sym = lagrange_triangle_symbolic(order, fun=sym_fun) element = basix.ufl.element("Lagrange", "triangle", order) - v = ufl.TestFunction(element) + domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, rank=1)) + space = ufl.FunctionSpace(domain, element) + v = ufl.TestFunction(space) a = ufl_fun(v) * ufl.dx forms = [a] @@ -600,7 +620,9 @@ def lagrange_tetrahedron_symbolic(order, corners=[(1, 0, 0), (2, 0, 0), (0, 1, 0 def test_lagrange_tetrahedron(compile_args, order, mode, sym_fun, ufl_fun): sym = lagrange_tetrahedron_symbolic(order, fun=sym_fun) element = basix.ufl.element("Lagrange", "tetrahedron", order) - v = ufl.TestFunction(element) + domain = ufl.Mesh(basix.ufl.element("Lagrange", "tetrahedron", 1, rank=1)) + space = ufl.FunctionSpace(domain, element) + v = ufl.TestFunction(space) a = ufl_fun(v) * ufl.dx forms = [a] @@ -638,7 +660,9 @@ def test_lagrange_tetrahedron(compile_args, order, mode, sym_fun, ufl_fun): def test_prism(compile_args): element = basix.ufl.element("Lagrange", "prism", 1) - v = ufl.TestFunction(element) + domain = ufl.Mesh(basix.ufl.element("Lagrange", "prism", 1, rank=1)) + space = ufl.FunctionSpace(domain, element) + v = ufl.TestFunction(space) L = v * ufl.dx forms = [L] @@ -726,7 +750,9 @@ def test_invalid_function_name(compile_args): ufl.Coefficient.__str__ = lambda self: "invalid function name" V = basix.ufl.element("Lagrange", "triangle", 1) - u = ufl.Coefficient(V) + domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, rank=1)) + space = ufl.FunctionSpace(domain, V) + u = ufl.Coefficient(space) a = ufl.inner(u, u) * ufl.dx forms = [a] From 398ddae3857ff4f1ca64a4b1eaf25dfdef5f59aa Mon Sep 17 00:00:00 2001 From: "Garth N. Wells" Date: Thu, 21 Sep 2023 08:05:02 +0100 Subject: [PATCH 40/44] Fix dolfinx cmake in CI (#613) --- .github/workflows/dolfin-tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/dolfin-tests.yml b/.github/workflows/dolfin-tests.yml index 189989dcd..d86af68f5 100644 --- a/.github/workflows/dolfin-tests.yml +++ b/.github/workflows/dolfin-tests.yml @@ -82,7 +82,7 @@ jobs: - name: Build DOLFINx C++ unit tests run: | - cmake -G Ninja -DCMAKE_BUILD_TYPE=Developer -B build/test/ -S build/test/ + cmake -G Ninja -DCMAKE_BUILD_TYPE=Developer -B build/test/ -S dolfinx/cpp/test/ cmake --build build/test - name: Run DOLFINx C++ unit tests run: | From 48ea74f3081b291d5df1471e9612504822574d48 Mon Sep 17 00:00:00 2001 From: Chris Richardson Date: Mon, 25 Sep 2023 13:02:27 +0100 Subject: [PATCH 41/44] Use ArrayAccess for entity_local_index (#615) * Fix incorrect use of Symbol * Add check --- ffcx/codegeneration/lnodes.py | 1 + ffcx/codegeneration/symbols.py | 12 +++++++----- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/ffcx/codegeneration/lnodes.py b/ffcx/codegeneration/lnodes.py index 317dc8690..5b56ad573 100644 --- a/ffcx/codegeneration/lnodes.py +++ b/ffcx/codegeneration/lnodes.py @@ -311,6 +311,7 @@ class Symbol(LExprTerminal): def __init__(self, name: str, dtype): assert isinstance(name, str) + assert name.replace("_", "").isalnum() self.name = name self.dtype = dtype diff --git a/ffcx/codegeneration/symbols.py b/ffcx/codegeneration/symbols.py index 51e4aaad1..bba6864da 100644 --- a/ffcx/codegeneration/symbols.py +++ b/ffcx/codegeneration/symbols.py @@ -77,13 +77,15 @@ def entity(self, entitytype, restriction): if entitytype == "cell": # Always 0 for cells (even with restriction) return L.LiteralInt(0) - elif entitytype == "facet": - postfix = "[0]" + + entity_local_index = L.Symbol("entity_local_index", dtype=L.DataType.INT) + if entitytype == "facet": if restriction == "-": - postfix = "[1]" - return L.Symbol("entity_local_index" + postfix, dtype=L.DataType.INT) + return entity_local_index[1] + else: + return entity_local_index[0] elif entitytype == "vertex": - return L.Symbol("entity_local_index[0]", dtype=L.DataType.INT) + return entity_local_index[0] else: logging.exception(f"Unknown entitytype {entitytype}") From 2b1e8b9763476ee5aec3b75cd5f8310752b5b1e2 Mon Sep 17 00:00:00 2001 From: "Garth N. Wells" Date: Thu, 28 Sep 2023 07:52:36 +0100 Subject: [PATCH 42/44] Mutability fix in unit test (#616) --- test/test_jit_forms.py | 28 ++++++++++------------------ 1 file changed, 10 insertions(+), 18 deletions(-) diff --git a/test/test_jit_forms.py b/test/test_jit_forms.py index cc3ca6990..08af851b4 100644 --- a/test/test_jit_forms.py +++ b/test/test_jit_forms.py @@ -545,14 +545,11 @@ def test_lagrange_triangle(compile_args, order, mode, sym_fun, ufl_fun): np_type = cdtype_to_numpy(mode) b = np.zeros((order + 2) * (order + 1) // 2, dtype=np_type) w = np.array([], dtype=np_type) - geom_type = scalar_to_value_type(mode) np_gtype = cdtype_to_numpy(geom_type) - coords = np.array([[1.0, 0.0, 0.0], [2.0, 0.0, 0.0], [0.0, 1.0, 0.0]], dtype=np_gtype) - kernel = getattr(default_integral, f"tabulate_tensor_{np_type}") kernel(ffi.cast('{type} *'.format(type=mode), b.ctypes.data), ffi.cast('{type} *'.format(type=mode), w.ctypes.data), @@ -804,9 +801,7 @@ def test_interval_vertex_quadrature(compile_args): def test_facet_vertex_quadrature(compile_args): - """ - Test facet vertex quadrature - """ + """Test facet vertex quadrature""" c_el = basix.ufl.element("Lagrange", "quadrilateral", 1, shape=(2,)) mesh = ufl.Mesh(c_el) @@ -863,10 +858,7 @@ def test_facet_vertex_quadrature(compile_args): def test_manifold_derivatives(compile_args): - """ - Test higher order derivatives on manifolds - """ - + """Test higher order derivatives on manifolds""" c_el = basix.ufl.element("Lagrange", "interval", 1, shape=(2,), gdim=2) mesh = ufl.Mesh(c_el) @@ -888,8 +880,7 @@ def test_manifold_derivatives(compile_args): default_integral = compiled_forms[0].form_integrals[0] scale = 2.5 coords = np.array([0.0, 0.0, 0.0, 0.0, scale, 0.0], dtype=np.float64) - dof_coords = el.element.points.reshape(-1) - dof_coords *= scale + dof_coords = scale * el.element.points.reshape(-1) w = np.array([d * d_c**order for d_c in dof_coords], dtype=np.float64) c = np.array([], dtype=np.float64) @@ -908,14 +899,15 @@ def test_manifold_derivatives(compile_args): def test_integral_grouping(compile_args): - """ - We group integrals with common integrands to avoid duplicated integration kernels. - This means that `inner(u, v)*dx((1,2,3)) + inner(grad(u), grad(v))*dx(2) + inner(u,v)*dx` - is grouped as + """We group integrals with common integrands to avoid duplicated + integration kernels. This means that `inner(u, v)*dx((1,2,3)) + + inner(grad(u), grad(v))*dx(2) + inner(u,v)*dx` is grouped as 1. `inner(u,v)*dx(("everywhere", 1, 3))` 2. `(inner(grad(u), grad(v)) + inner(u, v))*dx(2)` - Each of the forms has one generated `tabulate_tensor_*` function, which is referred to multiple times in - `integrals_` and `integral_ids_` + Each of the forms has one generated `tabulate_tensor_*` function, + which is referred to multiple times in `integrals_` and + `integral_ids_` + """ mesh = ufl.Mesh(ufl.VectorElement("Lagrange", ufl.triangle, 1)) V = ufl.FunctionSpace(mesh, ufl.FiniteElement("Lagrange", ufl.triangle, 1)) From a2e7649005eddb81d17521d2c8e346f7b229f816 Mon Sep 17 00:00:00 2001 From: Chris Richardson Date: Thu, 28 Sep 2023 15:23:43 +0100 Subject: [PATCH 43/44] Add test to LNodes and reuse more Symbols (#617) * Add gemv example * Reuse symbols * A few fixes * Clean up more symbols * Minor updates * Create more symbols apriori * Make table symbols persist * Fix typo --------- Co-authored-by: Igor Baratta --- ffcx/codegeneration/access.py | 4 +- ffcx/codegeneration/definitions.py | 14 +-- ffcx/codegeneration/expression_generator.py | 7 +- ffcx/codegeneration/integral_generator.py | 9 +- ffcx/codegeneration/symbols.py | 82 ++++++++------- test/test_lnodes.py | 106 ++++++++++++++++++++ 6 files changed, 164 insertions(+), 58 deletions(-) create mode 100644 test/test_lnodes.py diff --git a/ffcx/codegeneration/access.py b/ffcx/codegeneration/access.py index b047251ee..cc3ddcb94 100644 --- a/ffcx/codegeneration/access.py +++ b/ffcx/codegeneration/access.py @@ -95,8 +95,8 @@ def spatial_coordinate(self, e, mt, tabledata, num_points): raise RuntimeError("FIXME: Jacobian in custom integrals is not implemented.") # Access predefined quadrature points table - x = self.symbols.custom_points_table() - iq = self.symbols.quadrature_loop_index() + x = self.symbols.custom_points_table + iq = self.symbols.quadrature_loop_index gdim, = mt.terminal.ufl_shape if gdim == 1: index = iq diff --git a/ffcx/codegeneration/definitions.py b/ffcx/codegeneration/definitions.py index 3fbbacbda..7574ef165 100644 --- a/ffcx/codegeneration/definitions.py +++ b/ffcx/codegeneration/definitions.py @@ -81,7 +81,7 @@ def coefficient(self, t, mt, tabledata, quadrature_rule, access): # Get access to element table FE = self.symbols.element_table(tabledata, self.entitytype, mt.restriction) - ic = self.symbols.coefficient_dof_sum_index() + ic = self.symbols.coefficient_dof_sum_index code = [] pre_code = [] @@ -134,17 +134,11 @@ def _define_coordinate_dofs_lincomb(self, e, mt, tabledata, quadrature_rule, acc # Get access to element table FE = self.symbols.element_table(tabledata, self.entitytype, mt.restriction) - ic = self.symbols.coefficient_dof_sum_index() - dof_access = L.Symbol("coordinate_dofs", dtype=L.DataType.REAL) - - # coordinate dofs is always 3d - dim = 3 - offset = 0 - if mt.restriction == "-": - offset = num_scalar_dofs * dim + ic = self.symbols.coefficient_dof_sum_index + dof_access = self.symbols.domain_dof_access(ic, begin, 3, num_scalar_dofs, mt.restriction) code = [] - body = [L.AssignAdd(access, dof_access[ic * dim + begin + offset] * FE[ic])] + body = [L.AssignAdd(access, dof_access * FE[ic])] code += [L.VariableDecl(access, 0.0)] code += [L.ForRange(ic, 0, num_scalar_dofs, body)] diff --git a/ffcx/codegeneration/expression_generator.py b/ffcx/codegeneration/expression_generator.py index f00b36a8c..9dcf8c8b9 100644 --- a/ffcx/codegeneration/expression_generator.py +++ b/ffcx/codegeneration/expression_generator.py @@ -86,6 +86,7 @@ def generate_element_tables(self): for name in table_names: table = tables[name] symbol = L.Symbol(name, dtype=L.DataType.REAL) + self.backend.symbols.element_tables[name] = symbol decl = L.ArrayDecl(symbol, sizes=table.shape, values=table, const=True) parts += [decl] @@ -118,7 +119,7 @@ def generate_quadrature_loop(self): # Could happen for integral with everything zero and optimized away quadparts = [] else: - iq = self.backend.symbols.quadrature_loop_index() + iq = self.backend.symbols.quadrature_loop_index num_points = self.quadrature_rule.points.shape[0] quadparts = [L.ForRange(iq, 0, num_points, body=body)] @@ -192,8 +193,8 @@ def generate_block_parts(self, blockmap, blockdata): num_points = self.quadrature_rule.points.shape[0] A_shape = [num_points, components] + self.ir.tensor_shape - A = self.backend.symbols.element_tensor() - iq = self.backend.symbols.quadrature_loop_index() + A = self.backend.symbols.element_tensor + iq = self.backend.symbols.quadrature_loop_index # Check if DOFs in dofrange are equally spaced. expand_loop = False diff --git a/ffcx/codegeneration/integral_generator.py b/ffcx/codegeneration/integral_generator.py index 25d1d23c8..3dc61164b 100644 --- a/ffcx/codegeneration/integral_generator.py +++ b/ffcx/codegeneration/integral_generator.py @@ -226,6 +226,7 @@ def declare_table(self, name, table): """ table_symbol = L.Symbol(name, dtype=L.DataType.REAL) + self.backend.symbols.element_tables[name] = table_symbol return [L.ArrayDecl(table_symbol, values=table, const=True)] def generate_quadrature_loop(self, quadrature_rule: QuadratureRule): @@ -247,7 +248,7 @@ def generate_quadrature_loop(self, quadrature_rule: QuadratureRule): quadparts = [] else: num_points = quadrature_rule.points.shape[0] - iq = self.backend.symbols.quadrature_loop_index() + iq = self.backend.symbols.quadrature_loop_index quadparts = [L.ForRange(iq, 0, num_points, body=body)] return pre_definitions, preparts, quadparts @@ -445,7 +446,7 @@ def generate_block_parts(self, quadrature_rule: QuadratureRule, blockmap: Tuple, block_rank = len(blockmap) blockdims = tuple(len(dofmap) for dofmap in blockmap) - iq = self.backend.symbols.quadrature_loop_index() + iq = self.backend.symbols.quadrature_loop_index # Override dof index with quadrature loop index for arguments # with quadrature element, to index B like B[iq*num_dofs + iq] @@ -474,7 +475,7 @@ def generate_block_parts(self, quadrature_rule: QuadratureRule, blockmap: Tuple, # Quadrature weight was removed in representation, add it back now if self.ir.integral_type in ufl.custom_integral_types: - weights = self.backend.symbols.custom_weights_table() + weights = self.backend.symbols.custom_weights_table weight = weights[iq] else: weights = self.backend.symbols.weights_table(quadrature_rule) @@ -559,7 +560,7 @@ def generate_block_parts(self, quadrature_rule: QuadratureRule, blockmap: Tuple, body: List[LNode] = [] - A = self.backend.symbols.element_tensor() + A = self.backend.symbols.element_tensor A_shape = self.ir.tensor_shape for indices in keep: multi_index = L.MultiIndex(list(indices), A_shape) diff --git a/ffcx/codegeneration/symbols.py b/ffcx/codegeneration/symbols.py index bba6864da..dcccb4606 100644 --- a/ffcx/codegeneration/symbols.py +++ b/ffcx/codegeneration/symbols.py @@ -68,9 +68,29 @@ def __init__(self, coefficient_numbering, coefficient_offsets, self.original_constant_offsets = original_constant_offsets - def element_tensor(self): - """Symbol for the element tensor itself.""" - return L.Symbol("A", dtype=L.DataType.SCALAR) + # Keep tabs on tables, so the symbols can be reused + self.quadrature_weight_tables = {} + self.element_tables = {} + + # Reusing a single symbol for all quadrature loops, assumed not to be nested. + self.quadrature_loop_index = L.Symbol("iq", dtype=L.DataType.INT) + + # Symbols for the tabulate_tensor function arguments + self.element_tensor = L.Symbol("A", dtype=L.DataType.SCALAR) + self.coefficients = L.Symbol("w", dtype=L.DataType.SCALAR) + self.constants = L.Symbol("c", dtype=L.DataType.SCALAR) + self.coordinate_dofs = L.Symbol("coordinate_dofs", dtype=L.DataType.REAL) + self.entity_local_index = L.Symbol("entity_local_index", dtype=L.DataType.INT) + self.quadrature_permutation = L.Symbol("quadrature_permutation", dtype=L.DataType.INT) + + # Index for loops over coefficient dofs, assumed to never be used in two nested loops. + self.coefficient_dof_sum_index = L.Symbol("ic", dtype=L.DataType.INT) + + # Table for chunk of custom quadrature weights (including cell measure scaling). + self.custom_weights_table = L.Symbol("weights_chunk", dtype=L.DataType.REAL) + + # Table for chunk of custom quadrature points (physical coordinates). + self.custom_points_table = L.Symbol("points_chunk", dtype=L.DataType.REAL) def entity(self, entitytype, restriction): """Entity index for lookup in element tables.""" @@ -78,14 +98,13 @@ def entity(self, entitytype, restriction): # Always 0 for cells (even with restriction) return L.LiteralInt(0) - entity_local_index = L.Symbol("entity_local_index", dtype=L.DataType.INT) if entitytype == "facet": if restriction == "-": - return entity_local_index[1] + return self.entity_local_index[1] else: - return entity_local_index[0] + return self.entity_local_index[0] elif entitytype == "vertex": - return entity_local_index[0] + return self.entity_local_index[0] else: logging.exception(f"Unknown entitytype {entitytype}") @@ -94,29 +113,13 @@ def argument_loop_index(self, iarg): indices = ["i", "j", "k", "l"] return L.Symbol(indices[iarg], dtype=L.DataType.INT) - def coefficient_dof_sum_index(self): - """Index for loops over coefficient dofs, assumed to never be used in two nested loops.""" - return L.Symbol("ic", dtype=L.DataType.INT) - - def quadrature_loop_index(self): - """Reusing a single index name for all quadrature loops, assumed not to be nested.""" - return L.Symbol("iq", dtype=L.DataType.INT) - - def quadrature_permutation(self, index): - """Quadrature permutation, as input to the function.""" - return L.Symbol("quadrature_permutation", dtype=L.DataType.INT)[index] - - def custom_weights_table(self): - """Table for chunk of custom quadrature weights (including cell measure scaling).""" - return L.Symbol("weights_chunk", dtype=L.DataType.REAL) - - def custom_points_table(self): - """Table for chunk of custom quadrature points (physical coordinates).""" - return L.Symbol("points_chunk", dtype=L.DataType.REAL) - def weights_table(self, quadrature_rule): """Table of quadrature weights.""" - return L.Symbol(f"weights_{quadrature_rule.id()}", dtype=L.DataType.REAL) + key = f"weights_{quadrature_rule.id()}" + if key not in self.quadrature_weight_tables: + self.quadrature_weight_tables[key] = L.Symbol(f"weights_{quadrature_rule.id()}", + dtype=L.DataType.REAL) + return self.quadrature_weight_tables[key] def points_table(self, quadrature_rule): """Table of quadrature points (points on the reference integration entity).""" @@ -136,8 +139,7 @@ def domain_dof_access(self, dof, component, gdim, num_scalar_dofs, restriction): offset = 0 if restriction == "-": offset = num_scalar_dofs * 3 - vc = L.Symbol("coordinate_dofs", dtype=L.DataType.REAL) - return vc[3 * dof + component + offset] + return self.coordinate_dofs[3 * dof + component + offset] def domain_dofs_access(self, gdim, num_scalar_dofs, restriction): # FIXME: Add domain number or offset! @@ -148,13 +150,13 @@ def domain_dofs_access(self, gdim, num_scalar_dofs, restriction): def coefficient_dof_access(self, coefficient, dof_index): offset = self.coefficient_offsets[coefficient] - w = L.Symbol("w", dtype=L.DataType.SCALAR) + w = self.coefficients return w[offset + dof_index] def coefficient_dof_access_blocked(self, coefficient: ufl.Coefficient, index, block_size, dof_offset): coeff_offset = self.coefficient_offsets[coefficient] - w = L.Symbol("w", dtype=L.DataType.SCALAR) + w = self.coefficients _w = L.Symbol(f"_w_{coeff_offset}_{dof_offset}", dtype=L.DataType.SCALAR) unit_stride_access = _w[index] original_access = w[coeff_offset + index * block_size + dof_offset] @@ -167,8 +169,7 @@ def coefficient_value(self, mt): def constant_index_access(self, constant, index): offset = self.original_constant_offsets[constant] - c = L.Symbol("c", dtype=L.DataType.SCALAR) - + c = self.constants return c[offset + index] def element_table(self, tabledata, entitytype, restriction): @@ -182,14 +183,17 @@ def element_table(self, tabledata, entitytype, restriction): if tabledata.is_piecewise: iq = 0 else: - iq = self.quadrature_loop_index() + iq = self.quadrature_loop_index if tabledata.is_permuted: - qp = self.quadrature_permutation(0) + qp = self.quadrature_permutation[0] if restriction == "-": - qp = self.quadrature_permutation(1) + qp = self.quadrature_permutation[1] else: qp = 0 - # Return direct access to element table - return L.Symbol(tabledata.name, dtype=L.DataType.REAL)[qp][entity][iq] + # Return direct access to element table, reusing symbol if possible + if tabledata.name not in self.element_tables: + self.element_tables[tabledata.name] = L.Symbol(tabledata.name, + dtype=L.DataType.REAL) + return self.element_tables[tabledata.name][qp][entity][iq] diff --git a/test/test_lnodes.py b/test/test_lnodes.py new file mode 100644 index 000000000..9b09ceca8 --- /dev/null +++ b/test/test_lnodes.py @@ -0,0 +1,106 @@ + +from ffcx.codegeneration import lnodes as L +from ffcx.codegeneration.C.c_implementation import CFormatter +from cffi import FFI +import numpy as np +import pytest +import importlib + + +@pytest.mark.parametrize("scalar", ("float", "double", "int")) +def test_gemm(scalar): + # Test LNodes simple matrix-matrix multiply in C + p, q, r = 5, 16, 12 + + A = L.Symbol("A", dtype=L.DataType.SCALAR) + B = L.Symbol("B", dtype=L.DataType.SCALAR) + C = L.Symbol("C", dtype=L.DataType.SCALAR) + code = [L.Comment(f"Matrix multiply A{p,r} = B{p,q} * C{q,r}")] + + i = L.Symbol("i", dtype=L.DataType.INT) + j = L.Symbol("j", dtype=L.DataType.INT) + k = L.Symbol("k", dtype=L.DataType.INT) + m_ij = L.MultiIndex([i, j], [p, q]) + m_ik = L.MultiIndex([i, k], [p, r]) + m_jk = L.MultiIndex([j, k], [q, r]) + + body = [L.AssignAdd(A[m_ik], B[m_ij] * C[m_jk])] + body = [L.ForRange(i, 0, p, body=body)] + body = [L.ForRange(j, 0, q, body=body)] + code += [L.ForRange(k, 0, r, body=body)] + + # Format into C and compile with CFFI + Q = CFormatter(scalar=scalar) + decl = f"void gemm({scalar} *A, {scalar} *B, {scalar} *C)" + c_code = decl + "{\n" + \ + Q.c_format(L.StatementList(code)) + "\n}\n" + + ffibuilder = FFI() + ffibuilder.cdef(decl + ";") + ffibuilder.set_source(f"_gemm_{scalar}", c_code) + ffibuilder.compile(verbose=True) + _gemm = importlib.import_module(f"_gemm_{scalar}") + gemm = _gemm.lib.gemm + ffi = _gemm.ffi + + c_to_np = {"double": np.float64, "float": np.float32, "int": np.int32} + np_scalar = c_to_np.get(scalar) + A = np.zeros((p, r), dtype=np_scalar) + B = np.ones((p, q), dtype=np_scalar) + C = np.ones((q, r), dtype=np_scalar) + pA = ffi.cast(f"{scalar} *", A.ctypes.data) + pB = ffi.cast(f"{scalar} *", B.ctypes.data) + pC = ffi.cast(f"{scalar} *", C.ctypes.data) + + gemm(pA, pB, pC) + assert np.all(A == q) + + +@pytest.mark.parametrize("scalar", ("float", "double", "int")) +def test_gemv(scalar): + # Test LNodes simple matvec multiply in C + p, q = 5, 16 + + y = L.Symbol("y", dtype=L.DataType.SCALAR) + A = L.Symbol("A", dtype=L.DataType.SCALAR) + x = L.Symbol("x", dtype=L.DataType.SCALAR) + code = [L.Comment(f"Matrix-vector multiply y({p}) = A{p,q} * x({q})")] + + i = L.Symbol("i", dtype=L.DataType.INT) + j = L.Symbol("j", dtype=L.DataType.INT) + m_ij = L.MultiIndex([i, j], [p, q]) + + body = [L.AssignAdd(y[i], A[m_ij] * x[j])] + body = [L.ForRange(i, 0, p, body=body)] + code += [L.ForRange(j, 0, q, body=body)] + + # Format into C and compile with CFFI + Q = CFormatter(scalar=scalar) + decl = f"void gemm({scalar} *y, {scalar} *A, {scalar} *x)" + c_code = decl + "{\n" + \ + Q.c_format(L.StatementList(code)) + "\n}\n" + + ffibuilder = FFI() + ffibuilder.cdef(decl + ";") + ffibuilder.set_source(f"_gemv_{scalar}", c_code) + ffibuilder.compile(verbose=True) + _gemv = importlib.import_module(f"_gemv_{scalar}") + gemv = _gemv.lib.gemm + ffi = _gemv.ffi + + c_to_np = {"double": np.float64, "float": np.float32, "int": np.int32} + np_scalar = c_to_np.get(scalar) + y = np.arange(p, dtype=np_scalar) + x = np.arange(q, dtype=np_scalar) + A = np.outer(y, x) + + py = ffi.cast(f"{scalar} *", y.ctypes.data) + pA = ffi.cast(f"{scalar} *", A.ctypes.data) + px = ffi.cast(f"{scalar} *", x.ctypes.data) + + # Compute expected result + s2 = q * (q - 1) * (2 * q - 1) // 6 + 1 + result = np.arange(p, dtype=np_scalar) * s2 + + gemv(py, pA, px) + assert np.all(y == result) From 1d2723891bdc990334133bf78362cc601b784089 Mon Sep 17 00:00:00 2001 From: Matthew Scroggs Date: Thu, 28 Sep 2023 17:37:24 +0100 Subject: [PATCH 44/44] Don't use `rank=` in Basix elements (#618) * update tests * remove rank from demos --- demo/ComplexPoisson.py | 2 +- demo/Components.py | 2 +- demo/ExpressionInterpolation.py | 4 ++-- demo/HyperElasticity.py | 4 ++-- demo/MetaData.py | 2 +- demo/MixedCoefficient.py | 2 +- demo/Normals.py | 2 +- demo/PoissonQuad.py | 2 +- demo/StabilisedStokes.py | 2 +- demo/VectorConstant.py | 2 +- demo/VectorPoisson.py | 2 +- test/Poisson.py | 2 +- test/test_add_mode.py | 4 ++-- test/test_blocked_elements.py | 4 ++-- test/test_cache.py | 2 +- test/test_jit_expression.py | 6 +++--- test/test_jit_forms.py | 34 ++++++++++++++++----------------- 17 files changed, 39 insertions(+), 39 deletions(-) diff --git a/demo/ComplexPoisson.py b/demo/ComplexPoisson.py index 731ec9c09..0945fcf7a 100644 --- a/demo/ComplexPoisson.py +++ b/demo/ComplexPoisson.py @@ -21,7 +21,7 @@ from ufl import (Coefficient, FunctionSpace, Mesh, TestFunction, TrialFunction, dx, grad, inner) -coords = basix.ufl.element("P", "triangle", 2, rank=1) +coords = basix.ufl.element("P", "triangle", 2, shape=(2, )) mesh = Mesh(coords) dx = dx(mesh) diff --git a/demo/Components.py b/demo/Components.py index c4183061b..67963fa22 100644 --- a/demo/Components.py +++ b/demo/Components.py @@ -19,7 +19,7 @@ import basix.ufl from ufl import Coefficient, TestFunction, as_vector, dot, dx -element = basix.ufl.element("Lagrange", "tetrahedron", 1, rank=1) +element = basix.ufl.element("Lagrange", "tetrahedron", 1, shape=(3, )) v = TestFunction(element) f = Coefficient(element) diff --git a/demo/ExpressionInterpolation.py b/demo/ExpressionInterpolation.py index f16a4c9b8..183e89b8a 100644 --- a/demo/ExpressionInterpolation.py +++ b/demo/ExpressionInterpolation.py @@ -25,12 +25,12 @@ # Define mesh cell = "triangle" -v_el = basix.ufl.element("Lagrange", cell, 1, rank=1) +v_el = basix.ufl.element("Lagrange", cell, 1, shape=(2, )) mesh = Mesh(v_el) # Define mixed function space el = basix.ufl.element("P", cell, 2) -el_int = basix.ufl.element("Discontinuous Lagrange", cell, 1, rank=1) +el_int = basix.ufl.element("Discontinuous Lagrange", cell, 1, shape=(2, )) me = basix.ufl.mixed_element([el, el_int]) V = FunctionSpace(mesh, me) u = Coefficient(V) diff --git a/demo/HyperElasticity.py b/demo/HyperElasticity.py index 4c01ea2ef..de608bc57 100644 --- a/demo/HyperElasticity.py +++ b/demo/HyperElasticity.py @@ -17,9 +17,9 @@ x = SpatialCoordinate(cell) # Elements -u_element = basix.ufl.element("P", cell.cellname(), 2, rank=1) +u_element = basix.ufl.element("P", cell.cellname(), 2, shape=(3, )) p_element = basix.ufl.element("P", cell.cellname(), 1) -A_element = basix.ufl.element("P", cell.cellname(), 1, rank=2) +A_element = basix.ufl.element("P", cell.cellname(), 1, shape=(3, 3)) # Test and trial functions v = TestFunction(u_element) diff --git a/demo/MetaData.py b/demo/MetaData.py index 3de96f592..43aaf42f3 100644 --- a/demo/MetaData.py +++ b/demo/MetaData.py @@ -20,7 +20,7 @@ from ufl import Coefficient, TestFunction, TrialFunction, dx, grad, inner element = basix.ufl.element("Lagrange", "triangle", 1) -vector_element = basix.ufl.element("Lagrange", "triangle", 1, rank=1) +vector_element = basix.ufl.element("Lagrange", "triangle", 1, shape=(2, )) u = TrialFunction(element) diff --git a/demo/MixedCoefficient.py b/demo/MixedCoefficient.py index 9082fcc3f..5c2e506ba 100644 --- a/demo/MixedCoefficient.py +++ b/demo/MixedCoefficient.py @@ -21,7 +21,7 @@ import basix.ufl from ufl import Coefficients, dot, dS, dx -DG = basix.ufl.element("DG", "triangle", 0, rank=1) +DG = basix.ufl.element("DG", "triangle", 0, shape=(2, )) CG = basix.ufl.element("Lagrange", "triangle", 2) RT = basix.ufl.element("RT", "triangle", 3) diff --git a/demo/Normals.py b/demo/Normals.py index c93a5f539..402cdc0a8 100644 --- a/demo/Normals.py +++ b/demo/Normals.py @@ -22,7 +22,7 @@ cell = triangle -element = basix.ufl.element("Lagrange", cell.cellname(), 1, rank=1) +element = basix.ufl.element("Lagrange", cell.cellname(), 1, shape=(2, )) n = FacetNormal(cell) diff --git a/demo/PoissonQuad.py b/demo/PoissonQuad.py index 1cdc6c95a..e9286cec5 100644 --- a/demo/PoissonQuad.py +++ b/demo/PoissonQuad.py @@ -21,7 +21,7 @@ from ufl import (Coefficient, FunctionSpace, Mesh, TestFunction, TrialFunction, dx, grad, inner) -coords = basix.ufl.element("P", "triangle", 2, rank=1) +coords = basix.ufl.element("P", "triangle", 2, shape=(2, )) mesh = Mesh(coords) dx = dx(mesh) diff --git a/demo/StabilisedStokes.py b/demo/StabilisedStokes.py index 5d1e7b55c..cbfe2bde6 100644 --- a/demo/StabilisedStokes.py +++ b/demo/StabilisedStokes.py @@ -21,7 +21,7 @@ from ufl import (Coefficient, TestFunctions, TrialFunctions, div, dot, dx, grad, inner) -vector = basix.ufl.element("Lagrange", "triangle", 1, rank=1) +vector = basix.ufl.element("Lagrange", "triangle", 1, shape=(2, )) scalar = basix.ufl.element("Lagrange", "triangle", 1) system = basix.ufl.mixed_element([vector, scalar]) diff --git a/demo/VectorConstant.py b/demo/VectorConstant.py index a91856f12..46d4a5013 100644 --- a/demo/VectorConstant.py +++ b/demo/VectorConstant.py @@ -21,7 +21,7 @@ from ufl import (Constant, Coefficient, FunctionSpace, Mesh, TestFunction, TrialFunction, dx, grad, inner) -coords = basix.ufl.element("P", "triangle", 2, rank=1) +coords = basix.ufl.element("P", "triangle", 2, shape=(2, )) mesh = Mesh(coords) dx = dx(mesh) diff --git a/demo/VectorPoisson.py b/demo/VectorPoisson.py index a5e4064b7..96833092f 100644 --- a/demo/VectorPoisson.py +++ b/demo/VectorPoisson.py @@ -20,7 +20,7 @@ import basix.ufl from ufl import Coefficient, TestFunction, TrialFunction, dx, grad, inner -element = basix.ufl.element("Lagrange", "triangle", 1, rank=1) +element = basix.ufl.element("Lagrange", "triangle", 1, shape=(2, )) u = TrialFunction(element) v = TestFunction(element) diff --git a/test/Poisson.py b/test/Poisson.py index 6dbe7e8b5..b1311e6d5 100644 --- a/test/Poisson.py +++ b/test/Poisson.py @@ -23,7 +23,7 @@ TrialFunction, dx, grad, inner) import basix.ufl -mesh = Mesh(basix.ufl.element('P', "triangle", 2, rank=1)) +mesh = Mesh(basix.ufl.element('P', "triangle", 2, shape=(2, ))) e = basix.ufl.element("Lagrange", "triangle", 2) diff --git a/test/test_add_mode.py b/test/test_add_mode.py index 4d6f5f127..0283089f8 100644 --- a/test/test_add_mode.py +++ b/test/test_add_mode.py @@ -23,7 +23,7 @@ ]) def test_additive_facet_integral(mode, compile_args): element = basix.ufl.element("Lagrange", "triangle", 1) - domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, rank=1)) + domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, shape=(2, ))) space = ufl.FunctionSpace(domain, element) u, v = ufl.TrialFunction(space), ufl.TestFunction(space) a = ufl.inner(u, v) * ufl.ds @@ -75,7 +75,7 @@ def test_additive_facet_integral(mode, compile_args): @pytest.mark.parametrize("mode", ["double", "float", "long double", "double _Complex", "float _Complex"]) def test_additive_cell_integral(mode, compile_args): element = basix.ufl.element("Lagrange", "triangle", 1) - domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, rank=1)) + domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, shape=(2, ))) space = ufl.FunctionSpace(domain, element) u, v = ufl.TrialFunction(space), ufl.TestFunction(space) a = ufl.inner(ufl.grad(u), ufl.grad(v)) * ufl.dx diff --git a/test/test_blocked_elements.py b/test/test_blocked_elements.py index c67660a54..35debe67e 100644 --- a/test/test_blocked_elements.py +++ b/test/test_blocked_elements.py @@ -41,7 +41,7 @@ def test_finite_element(compile_args): def test_vector_element(compile_args): - ufl_element = basix.ufl.element("Lagrange", "triangle", 1, rank=1) + ufl_element = basix.ufl.element("Lagrange", "triangle", 1, shape=(2, )) jit_compiled_elements, module, code = ffcx.codegeneration.jit.compile_elements( [ufl_element], cffi_extra_compile_args=compile_args) ufcx_element, ufcx_dofmap = jit_compiled_elements[0] @@ -71,7 +71,7 @@ def test_vector_element(compile_args): def test_tensor_element(compile_args): - ufl_element = basix.ufl.element("Lagrange", "triangle", 1, rank=2) + ufl_element = basix.ufl.element("Lagrange", "triangle", 1, shape=(2, 2)) jit_compiled_elements, module, code = ffcx.codegeneration.jit.compile_elements( [ufl_element], cffi_extra_compile_args=compile_args) ufcx_element, ufcx_dofmap = jit_compiled_elements[0] diff --git a/test/test_cache.py b/test/test_cache.py index 85edf241b..3379fac81 100644 --- a/test/test_cache.py +++ b/test/test_cache.py @@ -13,7 +13,7 @@ def test_cache_modes(compile_args): element = basix.ufl.element("Lagrange", "triangle", 1) - domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, rank=1)) + domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, shape=(2, ))) space = ufl.FunctionSpace(domain, element) u, v = ufl.TrialFunction(space), ufl.TestFunction(space) a = ufl.inner(ufl.grad(u), ufl.grad(v)) * ufl.dx diff --git a/test/test_jit_expression.py b/test/test_jit_expression.py index d3ab21494..0fedb9bf5 100644 --- a/test/test_jit_expression.py +++ b/test/test_jit_expression.py @@ -39,7 +39,7 @@ def test_matvec(compile_args): of user specified vector-valued finite element function (in P1 space). """ - e = basix.ufl.element("P", "triangle", 1, rank=1) + e = basix.ufl.element("P", "triangle", 1, shape=(2, )) mesh = ufl.Mesh(e) V = ufl.FunctionSpace(mesh, e) f = ufl.Coefficient(V) @@ -103,7 +103,7 @@ def test_rank1(compile_args): and evaluates expression [u_y, u_x] + grad(u_x) at specified points. """ - e = basix.ufl.element("P", "triangle", 1, rank=1) + e = basix.ufl.element("P", "triangle", 1, shape=(2, )) mesh = ufl.Mesh(e) V = ufl.FunctionSpace(mesh, e) @@ -164,7 +164,7 @@ def test_elimiate_zero_tables_tensor(compile_args): Test elimination of tensor-valued expressions with zero tables """ cell = "tetrahedron" - c_el = basix.ufl.element("P", cell, 1, rank=1) + c_el = basix.ufl.element("P", cell, 1, shape=(3, )) mesh = ufl.Mesh(c_el) e = basix.ufl.element("P", cell, 1) diff --git a/test/test_jit_forms.py b/test/test_jit_forms.py index 08af851b4..1fec026ae 100644 --- a/test/test_jit_forms.py +++ b/test/test_jit_forms.py @@ -25,7 +25,7 @@ ]) def test_laplace_bilinear_form_2d(mode, expected_result, compile_args): element = basix.ufl.element("Lagrange", "triangle", 1) - domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, rank=1)) + domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, shape=(2, ))) space = ufl.FunctionSpace(domain, element) kappa = ufl.Constant(domain, shape=(2, 2)) u, v = ufl.TrialFunction(space), ufl.TestFunction(space) @@ -101,7 +101,7 @@ def test_laplace_bilinear_form_2d(mode, expected_result, compile_args): ]) def test_mass_bilinear_form_2d(mode, expected_result, compile_args): element = basix.ufl.element("Lagrange", "triangle", 1) - domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, rank=1)) + domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, shape=(2, ))) space = ufl.FunctionSpace(domain, element) u, v = ufl.TrialFunction(space), ufl.TestFunction(space) a = ufl.inner(u, v) * ufl.dx @@ -155,7 +155,7 @@ def test_mass_bilinear_form_2d(mode, expected_result, compile_args): ]) def test_helmholtz_form_2d(mode, expected_result, compile_args): element = basix.ufl.element("Lagrange", "triangle", 1) - domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, rank=1)) + domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, shape=(2, ))) space = ufl.FunctionSpace(domain, element) u, v = ufl.TrialFunction(space), ufl.TestFunction(space) if mode == "double": @@ -212,7 +212,7 @@ def test_helmholtz_form_2d(mode, expected_result, compile_args): ]) def test_laplace_bilinear_form_3d(mode, expected_result, compile_args): element = basix.ufl.element("Lagrange", "tetrahedron", 1) - domain = ufl.Mesh(basix.ufl.element("Lagrange", "tetrahedron", 1, rank=1)) + domain = ufl.Mesh(basix.ufl.element("Lagrange", "tetrahedron", 1, shape=(3, ))) space = ufl.FunctionSpace(domain, element) u, v = ufl.TrialFunction(space), ufl.TestFunction(space) a = ufl.inner(ufl.grad(u), ufl.grad(v)) * ufl.dx @@ -250,7 +250,7 @@ def test_laplace_bilinear_form_3d(mode, expected_result, compile_args): def test_form_coefficient(compile_args): element = basix.ufl.element("Lagrange", "triangle", 1) - domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, rank=1)) + domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, shape=(2, ))) space = ufl.FunctionSpace(domain, element) u, v = ufl.TestFunction(space), ufl.TrialFunction(space) g = ufl.Coefficient(space) @@ -287,7 +287,7 @@ def test_form_coefficient(compile_args): def test_subdomains(compile_args): element = basix.ufl.element("Lagrange", "triangle", 1) - domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, rank=1)) + domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, shape=(2, ))) space = ufl.FunctionSpace(domain, element) u, v = ufl.TrialFunction(space), ufl.TestFunction(space) a0 = ufl.inner(u, v) * ufl.dx + ufl.inner(u, v) * ufl.dx(2) @@ -328,7 +328,7 @@ def test_subdomains(compile_args): @pytest.mark.parametrize("mode", ["double", "double _Complex"]) def test_interior_facet_integral(mode, compile_args): element = basix.ufl.element("Lagrange", "triangle", 1) - domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, rank=1)) + domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, shape=(2, ))) space = ufl.FunctionSpace(domain, element) u, v = ufl.TrialFunction(space), ufl.TestFunction(space) a0 = ufl.inner(ufl.jump(ufl.grad(u)), ufl.jump(ufl.grad(v))) * ufl.dS @@ -375,7 +375,7 @@ def test_interior_facet_integral(mode, compile_args): @pytest.mark.parametrize("mode", ["double", "double _Complex"]) def test_conditional(mode, compile_args): element = basix.ufl.element("Lagrange", "triangle", 1) - domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, rank=1)) + domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, shape=(2, ))) space = ufl.FunctionSpace(domain, element) u, v = ufl.TrialFunction(space), ufl.TestFunction(space) x = ufl.SpatialCoordinate(domain) @@ -433,7 +433,7 @@ def test_conditional(mode, compile_args): def test_custom_quadrature(compile_args): - ve = basix.ufl.element("P", "triangle", 1, rank=1) + ve = basix.ufl.element("P", "triangle", 1, shape=(2, )) mesh = ufl.Mesh(ve) e = basix.ufl.element("P", mesh.ufl_cell().cellname(), 2) @@ -472,7 +472,7 @@ def test_custom_quadrature(compile_args): def test_curl_curl(compile_args): V = basix.ufl.element("N1curl", "triangle", 2) - domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, rank=1)) + domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, shape=(2, ))) space = ufl.FunctionSpace(domain, V) u, v = ufl.TrialFunction(space), ufl.TestFunction(space) a = ufl.inner(ufl.curl(u), ufl.curl(v)) * ufl.dx @@ -527,7 +527,7 @@ def lagrange_triangle_symbolic(order, corners=[(1, 0), (2, 0), (0, 1)], fun=lamb def test_lagrange_triangle(compile_args, order, mode, sym_fun, ufl_fun): sym = lagrange_triangle_symbolic(order, fun=sym_fun) element = basix.ufl.element("Lagrange", "triangle", order) - domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, rank=1)) + domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, shape=(2, ))) space = ufl.FunctionSpace(domain, element) v = ufl.TestFunction(space) @@ -617,7 +617,7 @@ def lagrange_tetrahedron_symbolic(order, corners=[(1, 0, 0), (2, 0, 0), (0, 1, 0 def test_lagrange_tetrahedron(compile_args, order, mode, sym_fun, ufl_fun): sym = lagrange_tetrahedron_symbolic(order, fun=sym_fun) element = basix.ufl.element("Lagrange", "tetrahedron", order) - domain = ufl.Mesh(basix.ufl.element("Lagrange", "tetrahedron", 1, rank=1)) + domain = ufl.Mesh(basix.ufl.element("Lagrange", "tetrahedron", 1, shape=(3, ))) space = ufl.FunctionSpace(domain, element) v = ufl.TestFunction(space) @@ -657,7 +657,7 @@ def test_lagrange_tetrahedron(compile_args, order, mode, sym_fun, ufl_fun): def test_prism(compile_args): element = basix.ufl.element("Lagrange", "prism", 1) - domain = ufl.Mesh(basix.ufl.element("Lagrange", "prism", 1, rank=1)) + domain = ufl.Mesh(basix.ufl.element("Lagrange", "prism", 1, shape=(3, ))) space = ufl.FunctionSpace(domain, element) v = ufl.TestFunction(space) @@ -691,9 +691,9 @@ def test_prism(compile_args): def test_complex_operations(compile_args): mode = "double _Complex" cell = "triangle" - c_element = basix.ufl.element("Lagrange", cell, 1, rank=1) + c_element = basix.ufl.element("Lagrange", cell, 1, shape=(2, )) mesh = ufl.Mesh(c_element) - element = basix.ufl.element("DG", cell, 0, rank=1) + element = basix.ufl.element("DG", cell, 0, shape=(2, )) V = ufl.FunctionSpace(mesh, element) u = ufl.Coefficient(V) J1 = ufl.real(u)[0] * ufl.imag(u)[1] * ufl.conj(u)[0] * ufl.dx @@ -747,7 +747,7 @@ def test_invalid_function_name(compile_args): ufl.Coefficient.__str__ = lambda self: "invalid function name" V = basix.ufl.element("Lagrange", "triangle", 1) - domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, rank=1)) + domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, shape=(2, ))) space = ufl.FunctionSpace(domain, V) u = ufl.Coefficient(space) a = ufl.inner(u, u) * ufl.dx @@ -768,7 +768,7 @@ def test_invalid_function_name(compile_args): def test_interval_vertex_quadrature(compile_args): - c_el = basix.ufl.element("Lagrange", "interval", 1, rank=1) + c_el = basix.ufl.element("Lagrange", "interval", 1, shape=(1, )) mesh = ufl.Mesh(c_el) x = ufl.SpatialCoordinate(mesh)