diff --git a/cpp/dolfinx/common/IndexMap.cpp b/cpp/dolfinx/common/IndexMap.cpp index dd3ae3a7145..1c4d4e8b174 100644 --- a/cpp/dolfinx/common/IndexMap.cpp +++ b/cpp/dolfinx/common/IndexMap.cpp @@ -26,8 +26,8 @@ namespace /// @param comm MPI communicator. /// @param owners List of ranks that own each ghost index. /// @return (src ranks, destination ranks). Both lists are sorted. -std::array, 2> build_src_dest(MPI_Comm comm, - std::span owners) +std::array, 2> +build_src_dest(MPI_Comm comm, std::span owners, int tag) { if (dolfinx::MPI::size(comm) == 1) { @@ -40,8 +40,9 @@ std::array, 2> build_src_dest(MPI_Comm comm, auto [unique_end, range_end] = std::ranges::unique(src); src.erase(unique_end, range_end); src.shrink_to_fit(); - std::vector dest = dolfinx::MPI::compute_graph_edges_nbx(comm, src); + std::vector dest = dolfinx::MPI::compute_graph_edges_nbx(comm, src, tag); std::ranges::sort(dest); + return {std::move(src), std::move(dest)}; } @@ -57,9 +58,9 @@ std::array, 2> build_src_dest(MPI_Comm comm, /// @param[in] dest Destination ranks on `comm`. /// @param[in] ghosts Ghost indices on calling process. /// @param[in] owners Owning rank for each entry in `ghosts`. -/// @param[in] include_ghost A list of the same length as `ghosts`, whose -/// ith entry must be non-zero (true) to include `ghost[i]`, otherwise -/// the ghost will be excluded +/// @param[in] include_ghost A list of the same length as `ghosts`, +/// whose ith entry must be non-zero (true) to include `ghost[i]`, +/// otherwise the ghost will be excluded /// @return 1) The ghost indices packed in a buffer for communication /// 2) The received indices (in receive buffer layout) /// 3) A map relating the position of a ghost in the packed @@ -879,8 +880,9 @@ IndexMap::IndexMap(MPI_Comm comm, std::int32_t local_size) : _comm(comm, true) //----------------------------------------------------------------------------- IndexMap::IndexMap(MPI_Comm comm, std::int32_t local_size, std::span ghosts, - std::span owners) - : IndexMap(comm, local_size, build_src_dest(comm, owners), ghosts, owners) + std::span owners, int tag) + : IndexMap(comm, local_size, build_src_dest(comm, owners, tag), ghosts, + owners) { // Do nothing } @@ -1002,7 +1004,7 @@ std::vector IndexMap::global_indices() const //----------------------------------------------------------------------------- MPI_Comm IndexMap::comm() const { return _comm.comm(); } //---------------------------------------------------------------------------- -graph::AdjacencyList IndexMap::index_to_dest_ranks() const +graph::AdjacencyList IndexMap::index_to_dest_ranks(int tag) const { const std::int64_t offset = _local_range[0]; @@ -1011,7 +1013,8 @@ graph::AdjacencyList IndexMap::index_to_dest_ranks() const std::ranges::sort(src); auto [unique_end, range_end] = std::ranges::unique(src); src.erase(unique_end, range_end); - auto dest = dolfinx::MPI::compute_graph_edges_nbx(_comm.comm(), src); + std::vector dest + = dolfinx::MPI::compute_graph_edges_nbx(_comm.comm(), src, tag); std::ranges::sort(dest); // Array (local idx, ghosting rank) pairs for owned indices diff --git a/cpp/dolfinx/common/IndexMap.h b/cpp/dolfinx/common/IndexMap.h index d3075266b40..f75fd0c63c7 100644 --- a/cpp/dolfinx/common/IndexMap.h +++ b/cpp/dolfinx/common/IndexMap.h @@ -119,8 +119,21 @@ class IndexMap /// of owned entries /// @param[in] ghosts The global indices of ghost entries /// @param[in] owners Owner rank (on `comm`) of each entry in `ghosts` + /// @param[in] tag Tag used in non-blocking MPI calls in the consensus + /// algorithm. + /// @note A tag can sometimes be required when there are a series of + /// calls to this constructor, or other functions that call the + /// consensus algorithm, that are close together. In cases where this + /// constructor is called a second time on rank and another rank has + /// not completed its first consensus algorithm call, communications + /// can be corrupted if each collective call of this constructor does + /// not have its own `tag` value. Each collective call to this + /// constructor must use the same `tag` value. An alternative to + /// passing a tag is to have an implicit or explicit MPI barrier + /// before and after the call to this constructor. IndexMap(MPI_Comm comm, std::int32_t local_size, - std::span ghosts, std::span owners); + std::span ghosts, std::span owners, + int tag = static_cast(dolfinx::MPI::tag::consensus_nbx)); /// @brief Create an overlapping (ghosted) index map. /// @@ -208,8 +221,16 @@ class IndexMap /// /// @brief Compute map from each local (owned) index to the set of /// ranks that have the index as a ghost. - /// @return shared indices - graph::AdjacencyList index_to_dest_ranks() const; + /// + /// @note Collective + /// + /// @param[in] tag Tag to pass to MPI calls. + /// @note See ::IndexMap(MPI_Comm,std::int32_t,std::span,std::span,int) for an explanation of when + /// `tag` is required. + /// @return Shared indices. + graph::AdjacencyList index_to_dest_ranks( + int tag = static_cast(dolfinx::MPI::tag::consensus_nbx)) const; /// @brief Build a list of owned indices that are ghosted by another /// rank. diff --git a/cpp/dolfinx/common/MPI.cpp b/cpp/dolfinx/common/MPI.cpp index d2a90f75819..8b81d9a4bc0 100644 --- a/cpp/dolfinx/common/MPI.cpp +++ b/cpp/dolfinx/common/MPI.cpp @@ -159,7 +159,8 @@ dolfinx::MPI::compute_graph_edges_pcx(MPI_Comm comm, std::span edges) } //----------------------------------------------------------------------------- std::vector -dolfinx::MPI::compute_graph_edges_nbx(MPI_Comm comm, std::span edges) +dolfinx::MPI::compute_graph_edges_nbx(MPI_Comm comm, std::span edges, + int tag) { spdlog::info( "Computing communication graph edges (using NBX algorithm). Number " @@ -171,9 +172,8 @@ dolfinx::MPI::compute_graph_edges_nbx(MPI_Comm comm, std::span edges) std::vector send_buffer(edges.size()); for (std::size_t e = 0; e < edges.size(); ++e) { - int err = MPI_Issend(send_buffer.data() + e, 1, MPI_BYTE, edges[e], - static_cast(tag::consensus_pex), comm, - &send_requests[e]); + int err = MPI_Issend(send_buffer.data() + e, 1, MPI_BYTE, edges[e], tag, + comm, &send_requests[e]); dolfinx::MPI::check_error(comm, err); } @@ -189,8 +189,7 @@ dolfinx::MPI::compute_graph_edges_nbx(MPI_Comm comm, std::span edges) // Check for message int request_pending; MPI_Status status; - int err = MPI_Iprobe(MPI_ANY_SOURCE, static_cast(tag::consensus_pex), - comm, &request_pending, &status); + int err = MPI_Iprobe(MPI_ANY_SOURCE, tag, comm, &request_pending, &status); dolfinx::MPI::check_error(comm, err); // Check if message is waiting to be processed @@ -199,8 +198,7 @@ dolfinx::MPI::compute_graph_edges_nbx(MPI_Comm comm, std::span edges) // Receive it int other_rank = status.MPI_SOURCE; std::byte buffer_recv; - int err = MPI_Recv(&buffer_recv, 1, MPI_BYTE, other_rank, - static_cast(tag::consensus_pex), comm, + int err = MPI_Recv(&buffer_recv, 1, MPI_BYTE, other_rank, tag, comm, MPI_STATUS_IGNORE); dolfinx::MPI::check_error(comm, err); other_ranks.push_back(other_rank); diff --git a/cpp/dolfinx/common/MPI.h b/cpp/dolfinx/common/MPI.h index 0c32ba144a3..ab04d93cc64 100644 --- a/cpp/dolfinx/common/MPI.h +++ b/cpp/dolfinx/common/MPI.h @@ -33,8 +33,9 @@ namespace dolfinx::MPI /// MPI communication tags enum class tag : int { - consensus_pcx, - consensus_pex + consensus_pcx = 1200, + consensus_pex = 1201, + consensus_nbx = 1202, }; /// @brief A duplicate MPI communicator and manage lifetime of the @@ -72,22 +73,21 @@ class Comm int rank(MPI_Comm comm); /// Return size of the group (number of processes) associated with the -/// communicator +/// communicator. int size(MPI_Comm comm); /// @brief Check MPI error code. If the error code is not equal to /// MPI_SUCCESS, then std::abort is called. -/// @param[in] comm MPI communicator -/// @param[in] code Error code returned by an MPI function call +/// @param[in] comm MPI communicator. +/// @param[in] code Error code returned by an MPI function call. void check_error(MPI_Comm comm, int code); /// @brief Return local range for the calling process, partitioning the /// global [0, N - 1] range across all ranks into partitions of almost /// equal size. /// @param[in] rank MPI rank of the caller -/// @param[in] N The value to partition -/// @param[in] size The number of MPI ranks across which to partition -/// `N` +/// @param[in] N The value to partition. +/// @param[in] size Number of MPI ranks across which to partition `N`. constexpr std::array local_range(int rank, std::int64_t N, int size) { @@ -108,10 +108,10 @@ constexpr std::array local_range(int rank, std::int64_t N, /// @brief Return which rank owns index in global range [0, N - 1] /// (inverse of MPI::local_range). -/// @param[in] size Number of MPI ranks -/// @param[in] index The index to determine owning rank -/// @param[in] N Total number of indices -/// @return The rank of the owning process +/// @param[in] size Number of MPI ranks. +/// @param[in] index The index to determine the owning rank of. +/// @param[in] N Total number of indices. +/// @return Rank of the owning process. constexpr int index_owner(int size, std::size_t index, std::size_t N) { assert(index < N); @@ -171,19 +171,21 @@ std::vector compute_graph_edges_pcx(MPI_Comm comm, /// implements the NBX algorithm presented in /// https://dx.doi.org/10.1145/1837853.1693476. /// -/// @note For sparse graphs, this function has \f$O(\log p)\f$ cost, -/// where \f$p\f$is the number of MPI ranks. It is suitable for modest -/// MPI rank counts. -/// /// @note The order of the returned ranks is not deterministic. /// /// @note Collective. /// /// @param[in] comm MPI communicator /// @param[in] edges Edges (ranks) from this rank (the caller). -/// @return Ranks that have defined edges from them to this rank. -std::vector compute_graph_edges_nbx(MPI_Comm comm, - std::span edges); +/// @param[in] tag Tag used in non-blocking MPI calls. A tag can be +/// required when this function is called a second time on some ranks +/// before a previous call has completed on all other ranks. @return +/// Ranks that have defined edges from them to this rank. @note An +/// alternative to passing a tag is to ensure that there is an implicit +/// or explicit barrier before and after the call to this function. +std::vector +compute_graph_edges_nbx(MPI_Comm comm, std::span edges, + int tag = static_cast(tag::consensus_nbx)); /// @brief Distribute row data to 'post office' ranks. /// diff --git a/cpp/dolfinx/mesh/Topology.cpp b/cpp/dolfinx/mesh/Topology.cpp index c1bed136587..451965634fd 100644 --- a/cpp/dolfinx/mesh/Topology.cpp +++ b/cpp/dolfinx/mesh/Topology.cpp @@ -170,8 +170,7 @@ determine_sharing_ranks(MPI_Comm comm, std::span indices) } owner.push_back((*it_owner)[2]); - // Update number of items to be sent to each rank and record - // owner + // Update number of items to be sent to each rank and record owner for (auto itx = it; itx != it1; ++itx) { auto& data = *itx; @@ -713,45 +712,39 @@ std::vector convert_to_local_indexing( } // namespace //----------------------------------------------------------------------------- -Topology::Topology(MPI_Comm comm, CellType cell_type) - : _comm(comm), _index_map(cell_dim(cell_type) + 1, {nullptr}), - _connectivity( - cell_dim(cell_type) + 1, - std::vector>>( - cell_dim(cell_type) + 1)) +Topology::Topology( + MPI_Comm comm, CellType cell_type, + std::shared_ptr vertex_map, + std::shared_ptr cell_map, + std::shared_ptr> cells, + const std::optional>& original_index) + : Topology(comm, {cell_type}, vertex_map, {cell_map}, {cells}, + original_index + ? std::vector>{*original_index} + : std::optional>>( + std::nullopt)) { - std::int8_t tdim = cell_dim(cell_type); - - // Create all the entity types in mesh, one per dimension for a single cell - // type mesh. - _entity_type_offsets.resize(tdim + 2); - for (std::int8_t i = 0; i < tdim + 2; ++i) - _entity_type_offsets[i] = i; - - _entity_types = {CellType::point}; - if (tdim > 0) - _entity_types.push_back(CellType::interval); - if (tdim == 2) - _entity_types.push_back(cell_type); - else if (tdim == 3) - { - _entity_types.push_back(cell_facet_type(cell_type, 0)); - _entity_types.push_back(cell_type); - } - // One facet type - _interprocess_facets.resize(1); } //----------------------------------------------------------------------------- -Topology::Topology(MPI_Comm comm, const std::vector& cell_types) - : _comm(comm), _entity_types({mesh::CellType::point}), - _entity_type_offsets({0, 1}) +Topology::Topology( + MPI_Comm comm, std::vector cell_types, + std::shared_ptr vertex_map, + std::vector> cell_maps, + std::vector>> cells, + const std::optional>>& original_index) + : original_cell_index(original_index + ? *original_index + : std::vector>()), + _comm(comm), _entity_types({mesh::CellType::point}), + _entity_type_offsets({0, 1}), _interprocess_facets(1) { assert(!cell_types.empty()); - std::int8_t tdim = cell_dim(cell_types[0]); - assert(tdim > 0); - for ([[maybe_unused]] auto ct : cell_types) + std::int8_t tdim = cell_dim(cell_types.front()); + +#ifndef NDEBUG + for (auto ct : cell_types) assert(cell_dim(ct) == tdim); - _interprocess_facets.resize(1); +#endif // Create all the entity types in the mesh if (tdim > 1) @@ -789,6 +782,18 @@ Topology::Topology(MPI_Comm comm, const std::vector& cell_types) _connectivity.resize(conn_size); for (auto& c : _connectivity) c.resize(conn_size); + + // Set data + this->set_index_map(0, vertex_map); + this->set_connectivity( + std::make_shared>( + vertex_map->size_local() + vertex_map->num_ghosts()), + 0, 0); + for (std::size_t i = 0; i < cell_types.size(); ++i) + { + this->set_index_map(tdim, i, cell_maps[i]); + this->set_connectivity(cells[i], {tdim, i}, {0, 0}); + } } //----------------------------------------------------------------------------- int Topology::dim() const noexcept { return _entity_type_offsets.size() - 2; } @@ -797,6 +802,7 @@ void Topology::set_index_map(int dim, std::shared_ptr map) { assert(dim < (int)_entity_type_offsets.size() - 1); + // Check there is only one index map in this dimension if (_entity_type_offsets[dim + 1] - _entity_type_offsets[dim] != 1) throw std::runtime_error("Cannot set IndexMap on mixed topology mesh"); @@ -808,7 +814,6 @@ void Topology::set_index_map(std::int8_t dim, std::int8_t i, { assert(dim < (std::int8_t)_entity_type_offsets.size() - 1); assert(i < (_entity_type_offsets[dim + 1] - _entity_type_offsets[dim])); - _index_map[_entity_type_offsets[dim] + i] = map; } //----------------------------------------------------------------------------- @@ -830,9 +835,9 @@ Topology::index_maps(std::int8_t dim) const //----------------------------------------------------------------------------- std::int32_t Topology::create_entities(int dim) { - // TODO: is this check sufficient/correct? Does not catch the cell_entity - // entity case. Should there also be a check for - // connectivity(this->dim(), dim) ? + // TODO: is this check sufficient/correct? Does not catch the + // cell_entity entity case. Should there also be a check for + // connectivity(this->dim(), dim)? // Skip if already computed (vertices (dim=0) should always exist) if (connectivity(dim, 0)) return -1; @@ -864,6 +869,7 @@ std::int32_t Topology::create_entities(int dim) _interprocess_facets[index] = std::move(interprocess_entities); } } + return this->index_maps(dim)[0]->size_local(); } //----------------------------------------------------------------------------- @@ -917,7 +923,6 @@ void Topology::create_entity_permutations() // FIXME: Is this always required? Could it be made cheaper by doing a // local version? This call does quite a lot of parallel work // Create all mesh entities - for (int d = 0; d < tdim; ++d) create_entities(d); @@ -972,7 +977,6 @@ void Topology::set_connectivity( assert(i0 < (_entity_type_offsets[dim0 + 1] - _entity_type_offsets[dim0])); assert(dim1 < (std::int8_t)_entity_type_offsets.size() - 1); assert(i1 < (_entity_type_offsets[dim1 + 1] - _entity_type_offsets[dim1])); - _connectivity[_entity_type_offsets[dim0] + i0] [_entity_type_offsets[dim1] + i1] = c; @@ -1129,18 +1133,19 @@ Topology mesh::create_topology( } // Get global indices of ghost cells - std::vector> cell_ghost_indices(cell_type.size()); - std::vector> index_map_c(cell_type.size()); + std::vector> cell_ghost_indices; + std::vector> index_map_c; for (std::size_t i = 0; i < cell_type.size(); ++i) { std::span cell_idx(original_cell_index[i]); - cell_ghost_indices[i] = graph::build::compute_ghost_indices( + cell_ghost_indices.push_back(graph::build::compute_ghost_indices( comm, cell_idx.first(num_local_cells[i]), - cell_idx.last(ghost_owners[i].size()), ghost_owners[i]); + cell_idx.last(ghost_owners[i].size()), ghost_owners[i])); // Create index maps for each cell type - index_map_c[i] = std::make_shared( - comm, num_local_cells[i], cell_ghost_indices[i], ghost_owners[i]); + index_map_c.push_back(std::make_shared( + comm, num_local_cells[i], cell_ghost_indices[i], ghost_owners[i], + static_cast(dolfinx::MPI::tag::consensus_nbx) + i)); } // Send and receive ((input vertex index) -> (new global index, owner @@ -1243,11 +1248,11 @@ Topology mesh::create_topology( { return {idx0, idx1}; }); std::ranges::sort(global_to_local_vertices); - std::vector> _cells_local_idx(cells.size()); - for (std::size_t i = 0; i < cell_type.size(); ++i) + std::vector> _cells_local_idx; + for (auto& c : cells) { - _cells_local_idx[i] - = convert_to_local_indexing(cells[i], global_to_local_vertices); + _cells_local_idx.push_back( + convert_to_local_indexing(c, global_to_local_vertices)); } // -- Create Topology object @@ -1280,39 +1285,27 @@ Topology mesh::create_topology( dest = dolfinx::MPI::compute_graph_edges_nbx(comm, src); } - Topology topology(comm, cell_type); - const int tdim = topology.dim(); - // Create index map for vertices auto index_map_v = std::make_shared( - comm, owned_vertices.size(), ghost_vertices, ghost_vertex_owners); - auto c0 = std::make_shared>( - index_map_v->size_local() + index_map_v->num_ghosts()); - - // Set vertex index map and 'connectivity' - topology.set_index_map(0, index_map_v); - topology.set_connectivity(c0, 0, 0); + comm, owned_vertices.size(), ghost_vertices, ghost_vertex_owners, + static_cast(dolfinx::MPI::tag::consensus_nbx) + cell_type.size()); // Set cell index map and connectivity + std::vector>> cells_c; for (std::size_t i = 0; i < cell_type.size(); ++i) { - int num_cell_vertices = mesh::num_cell_vertices(cell_type[i]); - auto cells_local_idx = std::make_shared>( + cells_c.push_back(std::make_shared>( graph::regular_adjacency_list(std::move(_cells_local_idx[i]), - num_cell_vertices)); - topology.set_index_map(tdim, i, index_map_c[i]); - topology.set_connectivity(cells_local_idx, {tdim, i}, {0, 0}); + mesh::num_cell_vertices(cell_type[i])))); } // Save original cell index - topology.original_cell_index.resize(cell_type.size()); - for (std::size_t i = 0; i < cell_type.size(); ++i) - { - topology.original_cell_index[i].assign(original_cell_index[i].begin(), - original_cell_index[i].end()); - } + std::vector> orig_index; + for (auto& idx : original_cell_index) + orig_index.emplace_back(idx.begin(), idx.end()); - return topology; + return Topology(comm, cell_type, index_map_v, index_map_c, cells_c, + orig_index); } //----------------------------------------------------------------------------- Topology @@ -1322,7 +1315,6 @@ mesh::create_topology(MPI_Comm comm, std::span cells, std::span boundary_vertices) { spdlog::info("Create topology (single cell type)"); - return create_topology(comm, {cell_type}, {cells}, {original_cell_index}, {ghost_owners}, boundary_vertices); } @@ -1373,10 +1365,6 @@ mesh::create_subtopology(const Topology& topology, int dim, subvertices0 = std::move(map_data.second); } - // Sub-topology vertex-to-vertex connectivity (identity) - auto sub_v_to_v = std::make_shared>( - submap0->size_local() + submap0->num_ghosts()); - // Sub-topology entity to vertex connectivity const CellType entity_type = cell_entity_type(topology.cell_type(), dim, 0); int num_vertices_per_entity = cell_num_entities(entity_type, 0); @@ -1410,15 +1398,8 @@ mesh::create_subtopology(const Topology& topology, int dim, auto sub_e_to_v = std::make_shared>( std::move(sub_e_to_v_vec), std::move(sub_e_to_v_offsets)); - // Create sub-topology - Topology subtopology(topology.comm(), entity_type); - subtopology.set_index_map(0, submap0); - subtopology.set_index_map(dim, submap); - subtopology.set_connectivity(sub_v_to_v, 0, 0); - subtopology.set_connectivity(sub_e_to_v, dim, 0); - - return {std::move(subtopology), std::move(subentities), - std::move(subvertices0)}; + return {Topology(topology.comm(), entity_type, submap0, submap, sub_e_to_v), + std::move(subentities), std::move(subvertices0)}; } //----------------------------------------------------------------------------- std::vector diff --git a/cpp/dolfinx/mesh/Topology.h b/cpp/dolfinx/mesh/Topology.h index e3f5cfdb1a2..f8ead9024e6 100644 --- a/cpp/dolfinx/mesh/Topology.h +++ b/cpp/dolfinx/mesh/Topology.h @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -43,16 +44,44 @@ enum class CellType; class Topology { public: - /// @brief Empty Topology constructor - /// @param comm MPI communicator - /// @param cell_type Type of cell - Topology(MPI_Comm comm, CellType cell_type); - - /// @brief Create empty mesh topology with multiple cell types - /// @param comm MPI communicator - /// @param cell_type List of cell types + /// @brief Topology constructor. + /// @param[in] comm MPI communicator. + /// @param[in] cell_type Type of cell. + /// @param[in] vertex_map Index map describing the distribution of + /// mesh vertices. + /// @param[in] cell_map Index map describing the distribution of mesh + /// cells. + /// @param[in] cells Cell-to-vertex connectivity. + /// @param[in] original_index Original index for each cell in `cells`. + Topology(MPI_Comm comm, CellType cell_type, + std::shared_ptr vertex_map, + std::shared_ptr cell_map, + std::shared_ptr> cells, + const std::optional>& original_index + = std::nullopt); + + /// @brief Create empty mesh topology with multiple cell types. + /// /// @warning Experimental - Topology(MPI_Comm comm, const std::vector& cell_type); + /// + /// @param comm MPI communicator. + /// @param[in] cell_types Types of cells. + /// @param[in] vertex_map Index map describing the distribution of + /// mesh vertices. + /// @param[in] cell_maps Index maps describing the distribution of + /// mesh cells for each cell type in `cell_types`. + /// @param[in] cells Cell-to-vertex connectivities for each cell type + /// in `cell_types`. + /// @param[in] original_cell_index Original indices for each cell in + /// `cells`. + Topology( + MPI_Comm comm, std::vector cell_types, + std::shared_ptr vertex_map, + std::vector> cell_maps, + std::vector>> cells, + const std::optional>>& + original_cell_index + = std::nullopt); /// Copy constructor Topology(const Topology& topology) = default; diff --git a/python/demo/demo_lagrange_variants.py b/python/demo/demo_lagrange_variants.py index b30bfba62de..9348aa86d29 100644 --- a/python/demo/demo_lagrange_variants.py +++ b/python/demo/demo_lagrange_variants.py @@ -44,6 +44,8 @@ # size is 1, so we take the slice `[0, :, :]` to get a 2-dimensional # array. +N = 20 + # + element = basix.ufl.element( basix.ElementFamily.P, @@ -120,7 +122,7 @@ def saw_tooth(x): # elements, and plot the finite element interpolation. # + -msh = mesh.create_unit_interval(MPI.COMM_WORLD, 10) +msh = mesh.create_unit_interval(MPI.COMM_WORLD, N) x = ufl.SpatialCoordinate(msh) u_exact = saw_tooth(x[0]) @@ -135,9 +137,9 @@ def saw_tooth(x): if MPI.COMM_WORLD.size == 1: # Skip this plotting in parallel pts: list[list[float]] = [] cells: list[int] = [] - for cell in range(10): + for cell in range(N): for i in range(51): - pts.append([cell / 10 + i / 50 / 10, 0, 0]) + pts.append([cell / N + i / 50 / N, 0, 0]) cells.append(cell) values = uh.eval(pts, cells) plt.plot(pts, [saw_tooth(i[0]) for i in pts], "k--") diff --git a/python/dolfinx/wrappers/common.cpp b/python/dolfinx/wrappers/common.cpp index d5a890ee552..ff2a5e12516 100644 --- a/python/dolfinx/wrappers/common.cpp +++ b/python/dolfinx/wrappers/common.cpp @@ -82,14 +82,15 @@ void common(nb::module_& m) [](dolfinx::common::IndexMap* self, MPICommWrapper comm, std::int32_t local_size, nb::ndarray, nb::c_contig> ghosts, - nb::ndarray, nb::c_contig> ghost_owners) + nb::ndarray, nb::c_contig> ghost_owners, + int tag) { new (self) dolfinx::common::IndexMap( comm.get(), local_size, std::span(ghosts.data(), ghosts.size()), - std::span(ghost_owners.data(), ghost_owners.size())); + std::span(ghost_owners.data(), ghost_owners.size()), tag); }, nb::arg("comm"), nb::arg("local_size"), nb::arg("ghosts"), - nb::arg("ghost_owners")) + nb::arg("ghost_owners"), nb::arg("tag")) .def( "__init__", [](dolfinx::common::IndexMap* self, MPICommWrapper comm, diff --git a/python/dolfinx/wrappers/mesh.cpp b/python/dolfinx/wrappers/mesh.cpp index 925dd2799ea..780b3e5ee2a 100644 --- a/python/dolfinx/wrappers/mesh.cpp +++ b/python/dolfinx/wrappers/mesh.cpp @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -572,9 +573,26 @@ void mesh(nb::module_& m) .def( "__init__", [](dolfinx::mesh::Topology* t, MPICommWrapper comm, - dolfinx::mesh::CellType cell_type) - { new (t) dolfinx::mesh::Topology(comm.get(), cell_type); }, - nb::arg("comm"), nb::arg("cell_type")) + dolfinx::mesh::CellType cell_type, + std::shared_ptr vertex_map, + std::shared_ptr cell_map, + std::shared_ptr> cells, + std::optional< + nb::ndarray, nb::c_contig>> + original_index) + { + std::optional> idx + = original_index + ? std::vector(original_index->data(), + original_index->data() + + original_index->size()) + : std::optional>(std::nullopt); + new (t) dolfinx::mesh::Topology(comm.get(), cell_type, vertex_map, + cell_map, cells, idx); + }, + nb::arg("comm"), nb::arg("cell_type"), nb::arg("vertex_map"), + nb::arg("cell_map"), nb::arg("cells"), + nb::arg("original_index").none()) .def("set_connectivity", nb::overload_cast< std::shared_ptr>, diff --git a/python/test/unit/common/test_index_map.py b/python/test/unit/common/test_index_map.py index c4e6c130809..5019a6a9fb9 100644 --- a/python/test/unit/common/test_index_map.py +++ b/python/test/unit/common/test_index_map.py @@ -171,7 +171,7 @@ def test_create_submap_owner_change(): owners = np.array([comm.rank - 1, comm.rank + 1], dtype=np.int32) submap_indices = np.array([0, 2, 3], dtype=np.int32) - imap = dolfinx.common.IndexMap(comm, local_size, ghosts, owners) + imap = dolfinx.common.IndexMap(comm, local_size, ghosts, owners, 1) sub_imap, sub_imap_to_imap = _cpp.common.create_sub_index_map(imap, submap_indices, True) if comm.rank == 0: @@ -235,7 +235,7 @@ def test_sub_index_map_multiple_possible_owners(): submap_size_local_expected = 0 submap_num_ghosts_expected = 0 - imap = dolfinx.common.IndexMap(comm, local_size, ghosts, owners) + imap = dolfinx.common.IndexMap(comm, local_size, ghosts, owners, 0) # Create a submap where both processes 0 and 1 include the index on process 2, # but process 2 does not include it diff --git a/python/test/unit/la/test_matrix_csr.py b/python/test/unit/la/test_matrix_csr.py index 1f8e5c35d09..69cbfffcc51 100644 --- a/python/test/unit/la/test_matrix_csr.py +++ b/python/test/unit/la/test_matrix_csr.py @@ -122,7 +122,7 @@ def test_distributed_csr(dtype): ghosts = np.array(range(n * nbr, n * nbr + nghost), dtype=np.int64) owner = np.ones_like(ghosts, dtype=np.int32) * nbr - im = IndexMap(MPI.COMM_WORLD, n, ghosts, owner) + im = IndexMap(MPI.COMM_WORLD, n, ghosts, owner, 0) sp = SparsityPattern(MPI.COMM_WORLD, [im, im], [1, 1]) for i in range(n): for j in range(n + nghost): @@ -222,7 +222,7 @@ def test_set_diagonal_distributed(dtype): nlocal = index_map.size_local assert (diag[nlocal:] == dtype(0.0)).all() - shared_dofs = index_map.index_to_dest_ranks() + shared_dofs = index_map.index_to_dest_ranks(0) for dof in range(nlocal): owners = shared_dofs.links(dof) assert diag[dof] == len(owners) + 1