diff --git a/src/atlas/functionspace/CellColumns.cc b/src/atlas/functionspace/CellColumns.cc index 755ce6743..01294f6c5 100644 --- a/src/atlas/functionspace/CellColumns.cc +++ b/src/atlas/functionspace/CellColumns.cc @@ -95,7 +95,8 @@ class CellColumnsHaloExchangeCache : public util::Cachesetup(array::make_view(mesh.cells().partition()).data(), + value->setup(mesh.mpi_comm(), + array::make_view(mesh.cells().partition()).data(), array::make_view(mesh.cells().remote_index()).data(), REMOTE_IDX_BASE, mesh.cells().size()); return value; @@ -129,7 +130,8 @@ class CellColumnsGatherScatterCache : public util::Cachesetup(array::make_view(mesh.cells().partition()).data(), + value->setup(mesh.mpi_comm(), + array::make_view(mesh.cells().partition()).data(), array::make_view(mesh.cells().remote_index()).data(), REMOTE_IDX_BASE, array::make_view(mesh.cells().global_index()).data(), mesh.cells().size()); return value; @@ -193,7 +195,6 @@ void CellColumns::set_field_metadata(const eckit::Configuration& config, Field& } idx_t CellColumns::config_size(const eckit::Configuration& config) const { - const idx_t rank = mpi::rank(); idx_t size = nb_cells(); bool global(false); if (config.get("global", global)) { @@ -201,6 +202,7 @@ idx_t CellColumns::config_size(const eckit::Configuration& config) const { idx_t owner(0); config.get("owner", owner); idx_t _nb_cells_global(nb_cells_global()); + const idx_t rank = mpi::comm(mpi_comm()).rank(); size = (rank == owner ? _nb_cells_global : 0); } } @@ -269,7 +271,7 @@ CellColumns::CellColumns(const Mesh& mesh, const eckit::Configuration& config): return nb_cells; }; - mesh::actions::build_nodes_parallel_fields(mesh_.nodes()); + mesh::actions::build_nodes_parallel_fields(mesh_); mesh::actions::build_cells_parallel_fields(mesh_); mesh::actions::build_periodic_boundaries(mesh_); diff --git a/src/atlas/functionspace/CellColumns.h b/src/atlas/functionspace/CellColumns.h index 4635bb899..a53a09268 100644 --- a/src/atlas/functionspace/CellColumns.h +++ b/src/atlas/functionspace/CellColumns.h @@ -99,6 +99,8 @@ class CellColumns : public functionspace::FunctionSpaceImpl { Field partition() const override; + std::string mpi_comm() const override { return mesh_.mpi_comm(); } + private: // methods idx_t config_size(const eckit::Configuration& config) const; array::DataType config_datatype(const eckit::Configuration&) const; diff --git a/src/atlas/functionspace/EdgeColumns.cc b/src/atlas/functionspace/EdgeColumns.cc index 75a502ab3..07a918774 100644 --- a/src/atlas/functionspace/EdgeColumns.cc +++ b/src/atlas/functionspace/EdgeColumns.cc @@ -96,7 +96,8 @@ class EdgeColumnsHaloExchangeCache : public util::Cachesetup(array::make_view(mesh.edges().partition()).data(), + value->setup(mesh.mpi_comm(), + array::make_view(mesh.edges().partition()).data(), array::make_view(mesh.edges().remote_index()).data(), REMOTE_IDX_BASE, mesh.edges().size()); return value; @@ -130,7 +131,8 @@ class EdgeColumnsGatherScatterCache : public util::Cachesetup(array::make_view(mesh.edges().partition()).data(), + value->setup(mesh.mpi_comm(), + array::make_view(mesh.edges().partition()).data(), array::make_view(mesh.edges().remote_index()).data(), REMOTE_IDX_BASE, array::make_view(mesh.edges().global_index()).data(), mesh.edges().size()); return value; @@ -194,7 +196,6 @@ void EdgeColumns::set_field_metadata(const eckit::Configuration& config, Field& } idx_t EdgeColumns::config_size(const eckit::Configuration& config) const { - const idx_t rank = mpi::rank(); idx_t size = nb_edges(); bool global(false); if (config.get("global", global)) { @@ -202,6 +203,7 @@ idx_t EdgeColumns::config_size(const eckit::Configuration& config) const { idx_t owner(0); config.get("owner", owner); idx_t _nb_edges_global(nb_edges_global()); + const idx_t rank = mpi::comm(mpi_comm()).rank(); size = (rank == owner ? _nb_edges_global : 0); } } @@ -266,7 +268,7 @@ EdgeColumns::EdgeColumns(const Mesh& mesh, const eckit::Configuration& config): return _nb_edges; }; - mesh::actions::build_nodes_parallel_fields(mesh_.nodes()); + mesh::actions::build_nodes_parallel_fields(mesh_); mesh::actions::build_periodic_boundaries(mesh_); if (halo_.size() > 0) { diff --git a/src/atlas/functionspace/EdgeColumns.h b/src/atlas/functionspace/EdgeColumns.h index 7b5963436..b3fac5a98 100644 --- a/src/atlas/functionspace/EdgeColumns.h +++ b/src/atlas/functionspace/EdgeColumns.h @@ -93,6 +93,8 @@ class EdgeColumns : public functionspace::FunctionSpaceImpl { Field partition() const override; + std::string mpi_comm() const override { return mesh_.mpi_comm(); } + private: // methods idx_t config_size(const eckit::Configuration& config) const; array::DataType config_datatype(const eckit::Configuration&) const; diff --git a/src/atlas/functionspace/FunctionSpace.cc b/src/atlas/functionspace/FunctionSpace.cc index 8e794d6d3..cbe395c76 100644 --- a/src/atlas/functionspace/FunctionSpace.cc +++ b/src/atlas/functionspace/FunctionSpace.cc @@ -58,8 +58,12 @@ idx_t FunctionSpace::size() const { return get()->size(); } -idx_t FunctionSpace::nb_partitions() const { - return get()->nb_partitions(); +idx_t FunctionSpace::part() const { + return get()->part(); +} + +idx_t FunctionSpace::nb_parts() const { + return get()->nb_parts(); } Field FunctionSpace::lonlat() const { @@ -114,6 +118,10 @@ const parallel::GatherScatter& FunctionSpace::scatter() const { return get()->scatter(); } +std::string FunctionSpace::mpi_comm() const { + return get()->mpi_comm(); +} + const util::PartitionPolygon& FunctionSpace::polygon(idx_t halo) const { return get()->polygon(halo); } diff --git a/src/atlas/functionspace/FunctionSpace.h b/src/atlas/functionspace/FunctionSpace.h index b2da9a4c5..be4e6001a 100644 --- a/src/atlas/functionspace/FunctionSpace.h +++ b/src/atlas/functionspace/FunctionSpace.h @@ -79,7 +79,9 @@ class FunctionSpace : DOXYGEN_HIDE(public util::ObjectHandlesetup(array::make_view(mesh.nodes().partition()).data(), + value->setup(mesh.mpi_comm(), array::make_view(mesh.nodes().partition()).data(), array::make_view(mesh.nodes().remote_index()).data(), REMOTE_IDX_BASE, nb_nodes); return value; @@ -154,7 +154,8 @@ class NodeColumnsGatherScatterCache : public util::Cachesetup(array::make_view(mesh.nodes().partition()).data(), + value->setup(mesh.mpi_comm(), + array::make_view(mesh.nodes().partition()).data(), array::make_view(mesh.nodes().remote_index()).data(), REMOTE_IDX_BASE, array::make_view(mesh.nodes().global_index()).data(), mask.data(), mesh.nodes().size()); return value; @@ -206,7 +207,7 @@ NodeColumns::NodeColumns(Mesh mesh, const eckit::Configuration& config): else { halo_ = mesh::Halo(mesh); } - mesh::actions::build_nodes_parallel_fields(mesh_.nodes()); + mesh::actions::build_nodes_parallel_fields(mesh_); mesh::actions::build_periodic_boundaries(mesh_); if (halo_.size() > 0) { @@ -255,7 +256,8 @@ idx_t NodeColumns::config_nb_nodes(const eckit::Configuration& config) const { idx_t owner(0); config.get("owner", owner); idx_t _nb_nodes_global = nb_nodes_global(); - size = (mpi::rank() == owner ? _nb_nodes_global : 0); + idx_t rank = mpi::comm(mpi_comm()).rank(); + size = (rank == owner ? _nb_nodes_global : 0); } } return size; diff --git a/src/atlas/functionspace/NodeColumns.h b/src/atlas/functionspace/NodeColumns.h index c5ea4f518..520d8ea71 100644 --- a/src/atlas/functionspace/NodeColumns.h +++ b/src/atlas/functionspace/NodeColumns.h @@ -258,7 +258,9 @@ class NodeColumns : public functionspace::FunctionSpaceImpl { virtual idx_t size() const override { return nb_nodes_; } - idx_t nb_partitions() const override { return mesh_.nb_partitions(); } + idx_t part() const override { return mesh_.part(); } + + idx_t nb_parts() const override { return mesh_.nb_parts(); } Field lonlat() const override { return nodes_.lonlat(); } @@ -276,6 +278,8 @@ class NodeColumns : public functionspace::FunctionSpaceImpl { const Projection& projection() const override { return mesh_.projection(); } + std::string mpi_comm() const override { return mesh_.mpi_comm(); } + private: // methods void constructor(); diff --git a/src/atlas/functionspace/PointCloud.cc b/src/atlas/functionspace/PointCloud.cc index 797b9e43c..a62a53233 100644 --- a/src/atlas/functionspace/PointCloud.cc +++ b/src/atlas/functionspace/PointCloud.cc @@ -45,8 +45,16 @@ namespace functionspace { namespace detail { +static std::string get_mpi_comm(const eckit::Configuration& config) { + if(config.has("mpi_comm")) { + return config.getString("mpi_comm"); + } + return mpi::comm().name(); +} + template <> -PointCloud::PointCloud(const std::vector& points) { +PointCloud::PointCloud(const std::vector& points, const eckit::Configuration& config) { + mpi_comm_ = get_mpi_comm(config); lonlat_ = Field("lonlat", array::make_datatype(), array::make_shape(points.size(), 2)); auto lonlat = array::make_view(lonlat_); for (idx_t j = 0, size = points.size(); j < size; ++j) { @@ -56,7 +64,8 @@ PointCloud::PointCloud(const std::vector& points) { } template <> -PointCloud::PointCloud(const std::vector& points) { +PointCloud::PointCloud(const std::vector& points, const eckit::Configuration& config) { + mpi_comm_ = get_mpi_comm(config); lonlat_ = Field("lonlat", array::make_datatype(), array::make_shape(points.size(), 2)); vertical_ = Field("vertical", array::make_datatype(), array::make_shape(points.size())); auto lonlat = array::make_view(lonlat_); @@ -68,13 +77,17 @@ PointCloud::PointCloud(const std::vector& points) { } } -PointCloud::PointCloud(const Field& lonlat): lonlat_(lonlat) {} +PointCloud::PointCloud(const Field& lonlat, const eckit::Configuration& config): lonlat_(lonlat) { + mpi_comm_ = get_mpi_comm(config); +} -PointCloud::PointCloud(const Field& lonlat, const Field& ghost): lonlat_(lonlat), ghost_(ghost) { +PointCloud::PointCloud(const Field& lonlat, const Field& ghost, const eckit::Configuration& config): lonlat_(lonlat), ghost_(ghost) { + mpi_comm_ = get_mpi_comm(config); setupHaloExchange(); } -PointCloud::PointCloud(const FieldSet & flds): lonlat_(flds["lonlat"]) { +PointCloud::PointCloud(const FieldSet& flds, const eckit::Configuration& config): lonlat_(flds["lonlat"]) { + mpi_comm_ = get_mpi_comm(config); if (flds.has("ghost")) { ghost_ = flds["ghost"]; } @@ -93,6 +106,7 @@ PointCloud::PointCloud(const FieldSet & flds): lonlat_(flds["lonlat"]) { } grid::Partitioner make_partitioner(const Grid& grid, const eckit::Configuration& config) { + auto mpi_comm = get_mpi_comm(config); auto partitioner = grid.partitioner(); if( config.has("partitioner") ) { partitioner.set("type",config.getString("partitioner")); @@ -100,6 +114,7 @@ grid::Partitioner make_partitioner(const Grid& grid, const eckit::Configuration& if( not partitioner.has("type") ) { partitioner.set("type","equal_regions"); } + partitioner.set("mpi_comm",mpi_comm); return grid::Partitioner(partitioner); } @@ -109,21 +124,23 @@ PointCloud::PointCloud(const Grid& grid, const eckit::Configuration& config) : PointCloud::PointCloud(const Grid& grid, const grid::Partitioner& _partitioner, const eckit::Configuration& config) { ATLAS_TRACE("PointCloud(grid,partitioner,config)"); - int part = mpi::rank(); + mpi_comm_ = get_mpi_comm(config); + auto& comm = mpi::comm(mpi_comm_); double halo_radius; config.get("halo_radius", halo_radius = 0.); grid::Partitioner partitioner(_partitioner); if ( not partitioner ) { - partitioner = grid::Partitioner("equal_regions"); + partitioner = grid::Partitioner("equal_regions", util::Config("mpi_comm",mpi_comm_)); } + part_ = comm.rank(); nb_partitions_ = partitioner.nb_partitions(); auto distribution = partitioner.partition(grid); - auto size_owned = distribution.nb_pts()[part]; + auto size_owned = distribution.nb_pts()[part_]; size_owned_ = size_owned; - if (halo_radius == 0. || mpi::size() == 1) { + if (halo_radius == 0. || nb_partitions_ == 1) { idx_t size_halo = size_owned; ATLAS_ASSERT(size_owned > 0); lonlat_ = Field("lonlat", array::make_datatype(), array::make_shape(size_halo, 2)); @@ -136,12 +153,12 @@ PointCloud::PointCloud(const Grid& grid, const grid::Partitioner& _partitioner, auto ridx = array::make_indexview(remote_index_); auto gidx = array::make_view(global_index_); array::make_view(ghost_).assign(0); - array::make_view(partition_).assign(part); + array::make_view(partition_).assign(part_); idx_t j{0}; gidx_t g{0}; for (auto p : grid.lonlat()) { - if( distribution.partition(g++) == part ) { + if( distribution.partition(g++) == part_ ) { gidx(j) = g+1; ridx(j) = j; lonlat(j, 0) = p.lon(); @@ -167,7 +184,7 @@ PointCloud::PointCloud(const Grid& grid, const grid::Partitioner& _partitioner, kdtree.reserve(grid.size()); idx_t j{0}; for (auto p : grid.lonlat()) { - if( distribution.partition(j) == part ) { + if( distribution.partition(j) == part_ ) { owned_lonlat.emplace_back(p); owned_grid_idx.emplace_back(j); } @@ -213,7 +230,7 @@ PointCloud::PointCloud(const Grid& grid, const grid::Partitioner& _partitioner, lonlat(j, 0) = p.lon(); lonlat(j, 1) = p.lat(); partition(j) = distribution.partition(g); - ghost(j) = partition(j) != part; + ghost(j) = partition(j) != part_; glb_idx(j) = g+1; ++j; } @@ -446,7 +463,7 @@ void PointCloud::haloExchange(const Field& field, bool on_device) const { void PointCloud::create_remote_index() const { ATLAS_TRACE(); - const eckit::mpi::Comm& comm = atlas::mpi::comm(); + const auto& comm = mpi::comm(mpi_comm_); const int mpi_rank = comm.rank(); const int mpi_size = comm.size(); auto size_halo = lonlat_.shape(0); @@ -690,16 +707,14 @@ void PointCloud::create_remote_index() const { void PointCloud::setupHaloExchange() { ATLAS_TRACE(); - const eckit::mpi::Comm& comm = atlas::mpi::comm(); - const int mpi_rank = comm.rank(); - const int mpi_size = comm.size(); - - if (ghost_ and partition_ and global_index_ and not remote_index_) { create_remote_index(); } else if (not partition_ or not remote_index_) { ATLAS_TRACE("do setup"); + const auto& comm = mpi::comm(mpi_comm_); + const int mpi_rank = comm.rank(); + const int mpi_size = comm.size(); auto lonlat_v = array::make_view(lonlat_); // data structure containing a flag to identify the 'ghost points'; @@ -826,11 +841,11 @@ void PointCloud::setupHaloExchange() { ATLAS_ASSERT(ghost_.size() == partition_.size()); halo_exchange_.reset(new parallel::HaloExchange()); - halo_exchange_->setup(array::make_view(partition_).data(), + halo_exchange_->setup(mpi_comm_, + array::make_view(partition_).data(), array::make_view(remote_index_).data(), REMOTE_IDX_BASE, ghost_.size()); - } void PointCloud::adjointHaloExchange(const FieldSet& fieldset, bool on_device) const { @@ -869,25 +884,25 @@ void PointCloud::adjointHaloExchange(const Field& field, bool) const { PointCloud::PointCloud(const FunctionSpace& functionspace): FunctionSpace(functionspace), functionspace_(dynamic_cast(get())) {} -PointCloud::PointCloud(const Field& field): - FunctionSpace(new detail::PointCloud(field)), functionspace_(dynamic_cast(get())) {} +PointCloud::PointCloud(const Field& field, const eckit::Configuration& config): + FunctionSpace(new detail::PointCloud(field,config)), functionspace_(dynamic_cast(get())) {} -PointCloud::PointCloud(const Field& field1, const Field& field2): - FunctionSpace(new detail::PointCloud(field1, field2)), functionspace_(dynamic_cast(get())) {} +PointCloud::PointCloud(const Field& field1, const Field& field2, const eckit::Configuration& config): + FunctionSpace(new detail::PointCloud(field1, field2, config)), functionspace_(dynamic_cast(get())) {} -PointCloud::PointCloud(const FieldSet& fset): - FunctionSpace(new detail::PointCloud(fset)), functionspace_(dynamic_cast(get())) {} +PointCloud::PointCloud(const FieldSet& fset, const eckit::Configuration& config): + FunctionSpace(new detail::PointCloud(fset, config)), functionspace_(dynamic_cast(get())) {} -PointCloud::PointCloud(const std::vector& points): - FunctionSpace(new detail::PointCloud(points)), functionspace_(dynamic_cast(get())) {} +PointCloud::PointCloud(const std::vector& points, const eckit::Configuration& config): + FunctionSpace(new detail::PointCloud(points, config)), functionspace_(dynamic_cast(get())) {} -PointCloud::PointCloud(const std::vector& points): - FunctionSpace(new detail::PointCloud(points)), functionspace_(dynamic_cast(get())) {} +PointCloud::PointCloud(const std::vector& points, const eckit::Configuration& config): + FunctionSpace(new detail::PointCloud(points, config)), functionspace_(dynamic_cast(get())) {} -PointCloud::PointCloud(const std::initializer_list>& points): +PointCloud::PointCloud(const std::initializer_list>& points, const eckit::Configuration& config): FunctionSpace((points.begin()->size() == 2 - ? new detail::PointCloud{std::vector(points.begin(), points.end())} - : new detail::PointCloud{std::vector(points.begin(), points.end())})), + ? new detail::PointCloud{std::vector(points.begin(), points.end()), config} + : new detail::PointCloud{std::vector(points.begin(), points.end()), config})), functionspace_(dynamic_cast(get())) {} PointCloud::PointCloud(const Grid& grid, const eckit::Configuration& config): diff --git a/src/atlas/functionspace/PointCloud.h b/src/atlas/functionspace/PointCloud.h index c28a7e774..f603a3146 100644 --- a/src/atlas/functionspace/PointCloud.h +++ b/src/atlas/functionspace/PointCloud.h @@ -43,10 +43,10 @@ namespace detail { class PointCloud : public functionspace::FunctionSpaceImpl { public: template - PointCloud(const std::vector&); - PointCloud(const Field& lonlat); - PointCloud(const Field& lonlat, const Field& ghost); - PointCloud(const FieldSet&); // assuming lonlat ghost ridx and partition present + PointCloud(const std::vector&, const eckit::Configuration& = util::NoConfig()); + PointCloud(const Field& lonlat, const eckit::Configuration& = util::NoConfig()); + PointCloud(const Field& lonlat, const Field& ghost, const eckit::Configuration& = util::NoConfig()); + PointCloud(const FieldSet&, const eckit::Configuration& = util::NoConfig()); // assuming lonlat ghost ridx and partition present PointCloud(const Grid&, const eckit::Configuration& = util::NoConfig()); PointCloud(const Grid&, const grid::Partitioner&, const eckit::Configuration& = util::NoConfig()); ~PointCloud() override {} @@ -61,7 +61,9 @@ class PointCloud : public functionspace::FunctionSpaceImpl { Field global_index() const override { return global_index_; } Field partition() const override { return partition_; } idx_t size() const override { return lonlat_.shape(0); } - idx_t nb_partitions() const override { return nb_partitions_; } + idx_t part() const override { return part_; } + idx_t nb_parts() const override { return nb_partitions_; } + std::string mpi_comm() const override { return mpi_comm_; } using FunctionSpaceImpl::createField; Field createField(const eckit::Configuration&) const override; @@ -166,7 +168,9 @@ class PointCloud : public functionspace::FunctionSpaceImpl { idx_t max_glb_idx_{0}; std::unique_ptr halo_exchange_; idx_t levels_{0}; + idx_t part_{0}; idx_t nb_partitions_{1}; + std::string mpi_comm_; void setupHaloExchange(); @@ -181,12 +185,12 @@ class PointCloud : public functionspace::FunctionSpaceImpl { class PointCloud : public FunctionSpace { public: PointCloud(const FunctionSpace&); - PointCloud(const Field& points); - PointCloud(const Field&, const Field&); - PointCloud(const FieldSet& flds); - PointCloud(const std::vector&); - PointCloud(const std::vector&); - PointCloud(const std::initializer_list>&); + PointCloud(const Field& points, const eckit::Configuration& = util::NoConfig()); + PointCloud(const Field&, const Field&, const eckit::Configuration& = util::NoConfig()); + PointCloud(const FieldSet& flds, const eckit::Configuration& = util::NoConfig()); + PointCloud(const std::vector&, const eckit::Configuration& = util::NoConfig()); + PointCloud(const std::vector&, const eckit::Configuration& = util::NoConfig()); + PointCloud(const std::initializer_list>&, const eckit::Configuration& = util::NoConfig()); PointCloud(const Grid&, const eckit::Configuration& = util::NoConfig()); PointCloud(const Grid&, const grid::Partitioner&, const eckit::Configuration& = util::NoConfig()); diff --git a/src/atlas/functionspace/StructuredColumns.h b/src/atlas/functionspace/StructuredColumns.h index ff458f6d5..4d3851452 100644 --- a/src/atlas/functionspace/StructuredColumns.h +++ b/src/atlas/functionspace/StructuredColumns.h @@ -110,7 +110,8 @@ class StructuredColumns : public FunctionSpace { void operator()(const Functor& f) const { ATLAS_ASSERT(levels); if (global) { - if (owner == mpi::rank()) { + auto mpi_rank = mpi::comm(fs_.mpi_comm()).rank(); + if (owner == mpi_rank) { const idx_t ny = fs_.grid().ny(); std::vector offset(ny); offset[0] = 0; @@ -143,7 +144,8 @@ class StructuredColumns : public FunctionSpace { void operator()(const Functor& f) const { ATLAS_ASSERT(levels == 0); if (global) { - if (owner == mpi::rank()) { + auto mpi_rank = mpi::comm(fs_.mpi_comm()).rank(); + if (owner == mpi_rank) { const idx_t ny = fs_.grid().ny(); std::vector offset(ny); offset[0] = 0; @@ -172,7 +174,8 @@ class StructuredColumns : public FunctionSpace { void operator()(const Functor& f) const { ATLAS_ASSERT(levels); if (global) { - if (owner == mpi::rank()) { + auto mpi_rank = mpi::comm(fs_.mpi_comm()).rank(); + if (owner == mpi_rank) { const idx_t size = fs_.grid().size(); atlas_omp_parallel_for(idx_t n = 0; n < size; ++n) { for (idx_t k = 0; k < levels; ++k) { @@ -197,7 +200,8 @@ class StructuredColumns : public FunctionSpace { void operator()(const Functor& f) const { ATLAS_ASSERT(levels == 0); if (global) { - if (owner == mpi::rank()) { + auto mpi_rank = mpi::comm(fs_.mpi_comm()).rank(); + if (owner == mpi_rank) { const idx_t size = fs_.grid().size(); atlas_omp_parallel_for(idx_t n = 0; n < size; ++n) { f(n); } } diff --git a/src/atlas/functionspace/detail/BlockStructuredColumns.h b/src/atlas/functionspace/detail/BlockStructuredColumns.h index ca10f9c43..d65ab8b59 100644 --- a/src/atlas/functionspace/detail/BlockStructuredColumns.h +++ b/src/atlas/functionspace/detail/BlockStructuredColumns.h @@ -111,7 +111,8 @@ class BlockStructuredColumns : public FunctionSpaceImpl { Field index_j() const { return structuredcolumns_->index_j(); } Field ghost() const override { return structuredcolumns_->ghost(); } size_t footprint() const override { return structuredcolumns_->footprint(); } - idx_t nb_partitions() const override { return structuredcolumns_->nb_partitions(); } + idx_t part() const override { return structuredcolumns_->part(); } + idx_t nb_parts() const override { return structuredcolumns_->nb_parts(); } const StructuredColumns& structuredcolumns() const { return *structuredcolumns_; } idx_t block_begin(idx_t jblk) const { return jblk * nproma_; } idx_t block_size(idx_t jblk) const { return (jblk != nblks_-1 ? nproma_ : endblk_size_); } diff --git a/src/atlas/functionspace/detail/FunctionSpaceImpl.cc b/src/atlas/functionspace/detail/FunctionSpaceImpl.cc index c5493edbd..36420b9fc 100644 --- a/src/atlas/functionspace/detail/FunctionSpaceImpl.cc +++ b/src/atlas/functionspace/detail/FunctionSpaceImpl.cc @@ -13,6 +13,7 @@ #include "atlas/option/Options.h" #include "atlas/runtime/Exception.h" #include "atlas/util/Metadata.h" +#include "atlas/parallel/mpi/mpi.h" namespace atlas { namespace functionspace { @@ -95,7 +96,11 @@ Field FunctionSpaceImpl::createField() const { return createField(option::datatypeT()); } -idx_t FunctionSpaceImpl::nb_partitions() const { +idx_t FunctionSpaceImpl::part() const { + ATLAS_NOTIMPLEMENTED; +} + +idx_t FunctionSpaceImpl::nb_parts() const { ATLAS_NOTIMPLEMENTED; } @@ -123,6 +128,10 @@ const parallel::GatherScatter& FunctionSpaceImpl::scatter() const { ATLAS_NOTIMPLEMENTED; } +std::string FunctionSpaceImpl::mpi_comm() const { + return mpi::comm().name(); +} + template Field FunctionSpaceImpl::createField() const; template Field FunctionSpaceImpl::createField() const; diff --git a/src/atlas/functionspace/detail/FunctionSpaceImpl.h b/src/atlas/functionspace/detail/FunctionSpaceImpl.h index 4c3e6323b..f2db8d01f 100644 --- a/src/atlas/functionspace/detail/FunctionSpaceImpl.h +++ b/src/atlas/functionspace/detail/FunctionSpaceImpl.h @@ -93,7 +93,9 @@ class FunctionSpaceImpl : public util::Object { virtual idx_t size() const = 0; - virtual idx_t nb_partitions() const; + virtual idx_t part() const; + + virtual idx_t nb_parts() const; virtual const util::PartitionPolygon& polygon(idx_t halo = 0) const; @@ -111,6 +113,8 @@ class FunctionSpaceImpl : public util::Object { virtual const Projection& projection() const; + virtual std::string mpi_comm() const; + private: util::Metadata* metadata_; }; diff --git a/src/atlas/functionspace/detail/NodeColumns_FieldStatistics.cc b/src/atlas/functionspace/detail/NodeColumns_FieldStatistics.cc index 4a9930ac2..88d8ed4ac 100644 --- a/src/atlas/functionspace/detail/NodeColumns_FieldStatistics.cc +++ b/src/atlas/functionspace/detail/NodeColumns_FieldStatistics.cc @@ -114,7 +114,7 @@ void dispatch_sum(const NodeColumns& fs, const Field& field, T& result, idx_t& N } } } - ATLAS_TRACE_MPI(ALLREDUCE) { mpi::comm().allReduce(local_sum, result, eckit::mpi::sum()); } + ATLAS_TRACE_MPI(ALLREDUCE) { mpi::comm(fs.mpi_comm()).allReduce(local_sum, result, eckit::mpi::sum()); } N = fs.nb_nodes_global() * arr.shape(1); } @@ -184,7 +184,7 @@ void dispatch_sum(const NodeColumns& fs, const Field& field, std::vector& res } } - ATLAS_TRACE_MPI(ALLREDUCE) { mpi::comm().allReduce(local_sum, result, eckit::mpi::sum()); } + ATLAS_TRACE_MPI(ALLREDUCE) { mpi::comm(fs.mpi_comm()).allReduce(local_sum, result, eckit::mpi::sum()); } N = fs.nb_nodes_global() * nlev; } @@ -278,7 +278,7 @@ void dispatch_sum_per_level(const NodeColumns& fs, const Field& field, Field& su } } } - ATLAS_TRACE_MPI(ALLREDUCE) { mpi::comm().allReduceInPlace(sum_per_level.data(), sum.size(), eckit::mpi::sum()); } + ATLAS_TRACE_MPI(ALLREDUCE) { mpi::comm(fs.mpi_comm()).allReduceInPlace(sum_per_level.data(), sum.size(), eckit::mpi::sum()); } N = fs.nb_nodes_global(); } @@ -310,7 +310,7 @@ void dispatch_order_independent_sum_2d(const NodeColumns& fs, const Field& field for (idx_t jnode = 0; jnode < glb.size(); ++jnode) { result += glb(jnode); } - ATLAS_TRACE_MPI(BROADCAST) { mpi::comm().broadcast(&result, 1, root); } + ATLAS_TRACE_MPI(BROADCAST) { mpi::comm(fs.mpi_comm()).broadcast(&result, 1, root); } N = fs.nb_nodes_global(); } @@ -394,7 +394,7 @@ void dispatch_order_independent_sum_2d(const NodeColumns& fs, const Field& field } } idx_t root = global.metadata().get("owner"); - ATLAS_TRACE_MPI(BROADCAST) { mpi::comm().broadcast(result, root); } + ATLAS_TRACE_MPI(BROADCAST) { mpi::comm(fs.mpi_comm()).broadcast(result, root); } N = fs.nb_nodes_global(); } @@ -510,7 +510,7 @@ void dispatch_order_independent_sum_per_level(const NodeColumns& fs, const Field } } } - mpi::comm().broadcast(sum_array, root); + mpi::comm(fs.mpi_comm()).broadcast(sum_array, root); if (mpi::rank() != root) { idx_t c(0); for (idx_t l = 0; l < sum.shape(0); ++l) { @@ -564,7 +564,7 @@ void dispatch_minimum(const NodeColumns& fs, const Field& field, std::vector& } } - ATLAS_TRACE_MPI(ALLREDUCE) { mpi::comm().allReduce(local_minimum, min, eckit::mpi::min()); } + ATLAS_TRACE_MPI(ALLREDUCE) { mpi::comm(fs.mpi_comm()).allReduce(local_minimum, min, eckit::mpi::min()); } } template @@ -626,7 +626,7 @@ void dispatch_maximum(const NodeColumns& fs, const Field& field, std::vector& } } } - ATLAS_TRACE_MPI(ALLREDUCE) { mpi::comm().allReduce(local_maximum, max, eckit::mpi::max()); } + ATLAS_TRACE_MPI(ALLREDUCE) { mpi::comm(fs.mpi_comm()).allReduce(local_maximum, max, eckit::mpi::max()); } } template @@ -722,7 +722,7 @@ void dispatch_minimum_per_level(const NodeColumns& fs, const Field& field, Field } } } - ATLAS_TRACE_MPI(ALLREDUCE) { mpi::comm().allReduceInPlace(min.data(), min_field.size(), eckit::mpi::min()); } + ATLAS_TRACE_MPI(ALLREDUCE) { mpi::comm(fs.mpi_comm()).allReduceInPlace(min.data(), min_field.size(), eckit::mpi::min()); } } void minimum_per_level(const NodeColumns& fs, const Field& field, Field& min) { @@ -744,7 +744,7 @@ void minimum_per_level(const NodeColumns& fs, const Field& field, Field& min) { } template -void dispatch_maximum_per_level(const NodeColumns&, const Field& field, Field& max_field) { +void dispatch_maximum_per_level(const NodeColumns& fs, const Field& field, Field& max_field) { array::ArrayShape shape; shape.reserve(field.rank() - 1); for (idx_t j = 1; j < field.rank(); ++j) { @@ -786,7 +786,7 @@ void dispatch_maximum_per_level(const NodeColumns&, const Field& field, Field& m } } } - ATLAS_TRACE_MPI(ALLREDUCE) { mpi::comm().allReduceInPlace(max.data(), max_field.size(), eckit::mpi::max()); } + ATLAS_TRACE_MPI(ALLREDUCE) { mpi::comm(fs.mpi_comm()).allReduceInPlace(max.data(), max_field.size(), eckit::mpi::max()); } } void maximum_per_level(const NodeColumns& fs, const Field& field, Field& max) { @@ -861,8 +861,8 @@ void dispatch_minimum_and_location(const NodeColumns& fs, const Field& field, st } ATLAS_TRACE_MPI(ALLREDUCE) { - mpi::comm().allReduce(min_and_gidx_loc, min_and_gidx_glb, eckit::mpi::minloc()); - mpi::comm().allReduce(min_and_level_loc, min_and_level_glb, eckit::mpi::minloc()); + mpi::comm(fs.mpi_comm()).allReduce(min_and_gidx_loc, min_and_gidx_glb, eckit::mpi::minloc()); + mpi::comm(fs.mpi_comm()).allReduce(min_and_level_loc, min_and_level_glb, eckit::mpi::minloc()); } for (idx_t j = 0; j < nvar; ++j) { @@ -964,8 +964,8 @@ void dispatch_maximum_and_location(const NodeColumns& fs, const Field& field, st } ATLAS_TRACE_MPI(ALLREDUCE) { - mpi::comm().allReduce(max_and_gidx_loc, max_and_gidx_glb, eckit::mpi::maxloc()); - mpi::comm().allReduce(max_and_level_loc, max_and_level_glb, eckit::mpi::maxloc()); + mpi::comm(fs.mpi_comm()).allReduce(max_and_gidx_loc, max_and_gidx_glb, eckit::mpi::maxloc()); + mpi::comm(fs.mpi_comm()).allReduce(max_and_level_loc, max_and_level_glb, eckit::mpi::maxloc()); } for (idx_t j = 0; j < nvar; ++j) { @@ -1130,7 +1130,7 @@ void dispatch_minimum_and_location_per_level(const NodeColumns& fs, const Field& } } - ATLAS_TRACE_MPI(ALLREDUCE) { mpi::comm().allReduce(min_and_gidx_loc, min_and_gidx_glb, eckit::mpi::minloc()); } + ATLAS_TRACE_MPI(ALLREDUCE) { mpi::comm(fs.mpi_comm()).allReduce(min_and_gidx_loc, min_and_gidx_glb, eckit::mpi::minloc()); } atlas_omp_parallel_for(idx_t l = 0; l < nlev; ++l) { for (idx_t j = 0; j < nvar; ++j) { @@ -1231,7 +1231,7 @@ void dispatch_maximum_and_location_per_level(const NodeColumns& fs, const Field& } } - ATLAS_TRACE_MPI(ALLREDUCE) { mpi::comm().allReduce(max_and_gidx_loc, max_and_gidx_glb, eckit::mpi::maxloc()); } + ATLAS_TRACE_MPI(ALLREDUCE) { mpi::comm(fs.mpi_comm()).allReduce(max_and_gidx_loc, max_and_gidx_glb, eckit::mpi::maxloc()); } atlas_omp_parallel_for(idx_t l = 0; l < nlev; ++l) { for (idx_t j = 0; j < nvar; ++j) { diff --git a/src/atlas/functionspace/detail/StructuredColumns.cc b/src/atlas/functionspace/detail/StructuredColumns.cc index 02bd6cbc9..7ec33057b 100644 --- a/src/atlas/functionspace/detail/StructuredColumns.cc +++ b/src/atlas/functionspace/detail/StructuredColumns.cc @@ -129,7 +129,8 @@ class StructuredColumnsHaloExchangeCache : public util::Cachesetup(array::make_view(funcspace->partition()).data(), + value->setup(funcspace->mpi_comm(), + array::make_view(funcspace->partition()).data(), array::make_view(funcspace->remote_index()).data(), REMOTE_IDX_BASE, funcspace->sizeHalo(), funcspace->sizeOwned()); return value; @@ -177,7 +178,8 @@ class StructuredColumnsGatherScatterCache : public util::Cachesetup(array::make_view(funcspace->partition()).data(), + value->setup(funcspace->mpi_comm(), + array::make_view(funcspace->partition()).data(), array::make_view(funcspace->remote_index()).data(), REMOTE_IDX_BASE, array::make_view(funcspace->global_index()).data(), funcspace->sizeOwned()); return value; @@ -383,7 +385,8 @@ idx_t StructuredColumns::config_size(const eckit::Configuration& config) const { if (global) { idx_t owner(0); config.get("owner", owner); - size = (mpi::rank() == owner ? grid_->size() : 0); + idx_t rank = mpi::comm(mpi_comm()).rank(); + size = (rank == owner ? grid_->size() : 0); } } return size; @@ -406,6 +409,15 @@ void StructuredColumns::throw_outofbounds(idx_t i, idx_t j) const { throw_Exception(ss.str(), Here()); } +namespace { + static std::string extract_mpi_comm(const eckit::Configuration& config) { + if(config.has("mpi_comm")){ + return config.getString("mpi_comm"); + } + return mpi::comm().name(); + } +} + // ---------------------------------------------------------------------------- // Constructor // ---------------------------------------------------------------------------- @@ -421,7 +433,7 @@ StructuredColumns::StructuredColumns(const Grid& grid, const grid::Distribution& StructuredColumns::StructuredColumns(const Grid& grid, const grid::Distribution& distribution, const Vertical& vertical, const eckit::Configuration& config): - vertical_(vertical), nb_levels_(vertical_.size()), grid_(new StructuredGrid(grid)) { + vertical_(vertical), nb_levels_(vertical_.size()), grid_(new StructuredGrid(grid)), mpi_comm_(extract_mpi_comm(config)) { setup(distribution, config); } @@ -434,22 +446,51 @@ StructuredColumns::StructuredColumns(const Grid& grid, const Vertical& vertical, ATLAS_TRACE("StructuredColumns constructor"); grid::Partitioner partitioner(p); + + if (config.has("mpi_comm")) { + mpi_comm_ = config.getString("mpi_comm"); + if (partitioner) { + ATLAS_ASSERT(partitioner.mpi_comm() == mpi_comm_); + } + if (config.has("partitioner.mpi_comm")) { + ATLAS_ASSERT(config.getString("partitioner.mpi_comm") == mpi_comm_); + } + } + else if (config.has("partitioner.mpi_comm")) { + mpi_comm_ = config.getString("partitioner.mpi_comm"); + } + else if (partitioner) { + mpi_comm_ = partitioner.mpi_comm(); + } + else { + mpi_comm_ = mpi::comm().name(); + } + if (not partitioner) { + util::Config partitioner_config; if (config.has("partitioner")) { - partitioner = grid::Partitioner(config.getSubConfiguration("partitioner")); + partitioner_config = config.getSubConfiguration("partitioner"); } else { if (grid_->domain().global()) { - partitioner = grid::Partitioner("equal_regions"); + partitioner_config.set("type", "equal_regions"); } else { - partitioner = grid::Partitioner("checkerboard"); + partitioner_config.set("type", "checkerboard"); } } + if (not partitioner_config.has("mpi_comm")) { + partitioner_config.set("mpi_comm", mpi_comm_); + } + partitioner = grid::Partitioner(partitioner_config); } grid::Distribution distribution; - ATLAS_TRACE_SCOPE("Partitioning grid") { distribution = grid::Distribution(grid, partitioner); } + + { + mpi::Scope mpi_scope(mpi_comm_); + ATLAS_TRACE_SCOPE("Partitioning grid") { distribution = grid::Distribution(grid, partitioner); } + } setup(distribution, config); } diff --git a/src/atlas/functionspace/detail/StructuredColumns.h b/src/atlas/functionspace/detail/StructuredColumns.h index 166373403..07e936ea3 100644 --- a/src/atlas/functionspace/detail/StructuredColumns.h +++ b/src/atlas/functionspace/detail/StructuredColumns.h @@ -167,8 +167,9 @@ class StructuredColumns : public FunctionSpaceImpl { const util::PartitionPolygons& polygons() const override; - idx_t nb_partitions() const override { return nb_partitions_; } - + idx_t part() const override { return part_; } + idx_t nb_parts() const override { return nb_partitions_; } + std::string mpi_comm() const override { return mpi_comm_; } private: // methods idx_t config_size(const eckit::Configuration& config) const; @@ -312,7 +313,9 @@ class StructuredColumns : public FunctionSpaceImpl { idx_t south_pole_included_; idx_t ny_; + idx_t part_; idx_t nb_partitions_; + std::string mpi_comm_; friend struct StructuredColumnsFortranAccess; friend struct BlockStructuredColumnsFortranAccess; diff --git a/src/atlas/functionspace/detail/StructuredColumns_create_remote_index.cc b/src/atlas/functionspace/detail/StructuredColumns_create_remote_index.cc index 7bf5c8c03..bf5292291 100644 --- a/src/atlas/functionspace/detail/StructuredColumns_create_remote_index.cc +++ b/src/atlas/functionspace/detail/StructuredColumns_create_remote_index.cc @@ -42,7 +42,7 @@ void StructuredColumns::create_remote_index() const { ATLAS_TRACE_SCOPE("Parallelisation ...") { auto build_partition_graph = [this]() -> std::unique_ptr { - const eckit::mpi::Comm& comm = mpi::comm(); + const eckit::mpi::Comm& comm = mpi::comm(mpi_comm()); const int mpi_size = int(comm.size()); const int mpi_rank = int(comm.rank()); @@ -80,7 +80,7 @@ void StructuredColumns::create_remote_index() const { auto p = array::make_view(partition()); auto g = array::make_view(global_index()); - const eckit::mpi::Comm& comm = mpi::comm(); + const eckit::mpi::Comm& comm = mpi::comm(mpi_comm()); const int mpi_rank = int(comm.rank()); auto neighbours = graph.nearestNeighbours(mpi_rank); diff --git a/src/atlas/functionspace/detail/StructuredColumns_setup.cc b/src/atlas/functionspace/detail/StructuredColumns_setup.cc index 12a08e6de..01a9a1be4 100644 --- a/src/atlas/functionspace/detail/StructuredColumns_setup.cc +++ b/src/atlas/functionspace/detail/StructuredColumns_setup.cc @@ -105,11 +105,9 @@ void StructuredColumns::setup(const grid::Distribution& distribution, const ecki distribution_ = distribution.type(); + part_ = mpi::comm(mpi_comm()).rank(); nb_partitions_ = distribution.nb_partitions(); - int mpi_rank = int(mpi::rank()); - int mpi_size = int(mpi::size()); - j_begin_ = std::numeric_limits::max(); j_end_ = std::numeric_limits::min(); i_begin_.resize(grid_->ny(), std::numeric_limits::max()); @@ -117,7 +115,7 @@ void StructuredColumns::setup(const grid::Distribution& distribution, const ecki idx_t owned(0); ATLAS_TRACE_SCOPE("Compute bounds owned") { - if (mpi_size == 1) { + if (nb_partitions_ == 1) { j_begin_ = 0; j_end_ = grid_->ny(); for (idx_t j = j_begin_; j < j_end_; ++j) { @@ -132,7 +130,7 @@ void StructuredColumns::setup(const grid::Distribution& distribution, const ecki idx_t c(0); for (idx_t j = 0; j < grid_->ny(); ++j) { for (idx_t i = 0; i < grid_->nx(j); ++i, ++c) { - if (distribution.partition(c) == mpi_rank) { + if (distribution.partition(c) == part_) { j_begin_ = std::min(j_begin_, j); j_end_ = std::max(j_end_, j + 1); i_begin_[j] = std::min(i_begin_[j], i); @@ -194,7 +192,7 @@ void StructuredColumns::setup(const grid::Distribution& distribution, const ecki auto& __i_end = thread_reduce_i_end[j][thread_num]; bool j_in_partition{false}; for (idx_t i = thread_i_begin[j]; i < thread_i_end[j]; ++i, ++c) { - if (distribution.partition(c) == mpi_rank) { + if (distribution.partition(c) == part_) { j_in_partition = true; __i_begin = std::min(__i_begin, i); __i_end = std::max(__i_end, i + 1); diff --git a/src/atlas/grid/Partitioner.cc b/src/atlas/grid/Partitioner.cc index 87512a80f..98503c209 100644 --- a/src/atlas/grid/Partitioner.cc +++ b/src/atlas/grid/Partitioner.cc @@ -37,12 +37,10 @@ Partitioner::Partitioner(const std::string& type, const idx_t nb_partitions): namespace { detail::partitioner::Partitioner* partitioner_from_config(const std::string& type, const Partitioner::Config& config) { - long partitions = mpi::size(); - config.get("partitions", partitions); - if (partitions==1) { - return Factory::build("serial"); + if (detail::partitioner::Partitioner::extract_nb_partitions(config) == 1) { + return Factory::build("serial",config); } - return Factory::build(type, partitions, config); + return Factory::build(type, config); } detail::partitioner::Partitioner* partitioner_from_config(const Partitioner::Config& config) { std::string type; @@ -75,13 +73,17 @@ std::string Partitioner::type() const { return get()->type(); } +std::string Partitioner::mpi_comm() const { + return get()->mpi_comm(); +} + MatchingPartitioner::MatchingPartitioner(): Partitioner() {} grid::detail::partitioner::Partitioner* matching_mesh_partititioner(const Mesh& mesh, const Partitioner::Config& config) { std::string type("lonlat-polygon"); config.get("type", type); - return grid::detail::partitioner::MatchingPartitionerFactory::build(type, mesh); + return grid::detail::partitioner::MatchingPartitionerFactory::build(type, mesh, config); } MatchingPartitioner::MatchingPartitioner(const Mesh& mesh): MatchingPartitioner(mesh, util::NoConfig()) {} @@ -95,7 +97,7 @@ grid::detail::partitioner::Partitioner* matching_functionspace_partititioner(con const Partitioner::Config& config) { std::string type("lonlat-polygon"); config.get("type", type); - return grid::detail::partitioner::MatchingPartitionerFactory::build(type, functionspace); + return grid::detail::partitioner::MatchingPartitionerFactory::build(type, functionspace, config); } MatchingPartitioner::MatchingPartitioner(const FunctionSpace& functionspace): diff --git a/src/atlas/grid/Partitioner.h b/src/atlas/grid/Partitioner.h index 63fc1c0cb..67b9333f8 100644 --- a/src/atlas/grid/Partitioner.h +++ b/src/atlas/grid/Partitioner.h @@ -76,7 +76,7 @@ class Partitioner : DOXYGEN_HIDE(public util::ObjectHandle(dynamic_cast(fs_).xy()); @@ -449,11 +449,11 @@ util::PartitionPolygon::PointsXY StructuredPartitionPolygon::lonlat() const { void StructuredPartitionPolygon::allGather(util::PartitionPolygons& polygons_) const { ATLAS_TRACE(); - polygons_.clear(); - polygons_.reserve(mpi::size()); + const auto& comm = mpi::comm(fs_.mpi_comm()); + const int mpi_size = int(comm.size()); - const mpi::Comm& comm = mpi::comm(); - const int mpi_size = int(comm.size()); + polygons_.clear(); + polygons_.reserve(mpi_size); auto& poly = *this; diff --git a/src/atlas/grid/detail/distribution/SerialDistribution.cc b/src/atlas/grid/detail/distribution/SerialDistribution.cc index a9b1b16a9..fde06adde 100644 --- a/src/atlas/grid/detail/distribution/SerialDistribution.cc +++ b/src/atlas/grid/detail/distribution/SerialDistribution.cc @@ -21,16 +21,21 @@ namespace grid { namespace detail { namespace distribution { -SerialDistribution::SerialDistribution(const Grid& grid): DistributionFunctionT(grid) { +SerialDistribution::SerialDistribution(const Grid& grid): + SerialDistribution(grid, mpi::rank()) { +} + +SerialDistribution::SerialDistribution(const Grid& grid, int part): DistributionFunctionT(grid) { type_ = "serial"; nb_partitions_ = 1; - rank_ = mpi::rank(); size_ = grid.size(); nb_pts_.resize(nb_partitions_, grid.size()); max_pts_ = *std::max_element(nb_pts_.begin(), nb_pts_.end()); min_pts_ = *std::min_element(nb_pts_.begin(), nb_pts_.end()); + part_ = part; } + } // namespace distribution } // namespace detail } // namespace grid diff --git a/src/atlas/grid/detail/distribution/SerialDistribution.h b/src/atlas/grid/detail/distribution/SerialDistribution.h index 090d1a54c..1fcaf244c 100644 --- a/src/atlas/grid/detail/distribution/SerialDistribution.h +++ b/src/atlas/grid/detail/distribution/SerialDistribution.h @@ -20,11 +20,12 @@ namespace distribution { class SerialDistribution : public DistributionFunctionT { public: SerialDistribution(const Grid& grid); + SerialDistribution(const Grid& grid, int part); - ATLAS_ALWAYS_INLINE int function(gidx_t gidx) const { return rank_; } + ATLAS_ALWAYS_INLINE int function(gidx_t gidx) const { return part_; } private: - int rank_{0}; + int part_{0}; }; } // namespace distribution diff --git a/src/atlas/grid/detail/partitioner/BandsPartitioner.cc b/src/atlas/grid/detail/partitioner/BandsPartitioner.cc index 04bbdab90..df9f52e76 100644 --- a/src/atlas/grid/detail/partitioner/BandsPartitioner.cc +++ b/src/atlas/grid/detail/partitioner/BandsPartitioner.cc @@ -33,9 +33,9 @@ size_t BandsPartitioner::blocksize(const Grid& grid) const { return size_t(blocksize_); } -BandsPartitioner::BandsPartitioner(): Partitioner(), blocksize_{1} {} +BandsPartitioner::BandsPartitioner( const eckit::Parametrisation& config): Partitioner(config), blocksize_{1} {} -BandsPartitioner::BandsPartitioner(int N, int blocksize): Partitioner(N), blocksize_(blocksize) {} +BandsPartitioner::BandsPartitioner(int N, int blocksize, const eckit::Parametrisation& config): Partitioner(N, config), blocksize_(blocksize) {} Distribution BandsPartitioner::partition(const Partitioner::Grid& grid) const { if (not distribution::BandsDistribution::detectOverflow(grid.size(), nb_partitions(), blocksize((grid)))) { diff --git a/src/atlas/grid/detail/partitioner/BandsPartitioner.h b/src/atlas/grid/detail/partitioner/BandsPartitioner.h index e13738065..34500a5aa 100644 --- a/src/atlas/grid/detail/partitioner/BandsPartitioner.h +++ b/src/atlas/grid/detail/partitioner/BandsPartitioner.h @@ -35,10 +35,9 @@ class BandsPartitioner : public Partitioner { static constexpr int BLOCKSIZE_NX{-1}; public: - BandsPartitioner(); - BandsPartitioner(int N): BandsPartitioner(N, 1) {} - BandsPartitioner(int N, int blocksize); - BandsPartitioner(int N, const eckit::Parametrisation& config): BandsPartitioner(N, extract_blocksize(config)) {} + BandsPartitioner(const eckit::Parametrisation& config = util::NoConfig()); + BandsPartitioner(int N, const eckit::Parametrisation& config): BandsPartitioner(N, extract_blocksize(config), config) {} + BandsPartitioner(int N, int blocksize, const eckit::Parametrisation& config = util::NoConfig()); std::string type() const override { return static_type(); } static std::string static_type() { return "bands"; } diff --git a/src/atlas/grid/detail/partitioner/CheckerboardPartitioner.cc b/src/atlas/grid/detail/partitioner/CheckerboardPartitioner.cc index d452a99a2..6f175b0bb 100644 --- a/src/atlas/grid/detail/partitioner/CheckerboardPartitioner.cc +++ b/src/atlas/grid/detail/partitioner/CheckerboardPartitioner.cc @@ -33,17 +33,24 @@ namespace partitioner { CheckerboardPartitioner::CheckerboardPartitioner(): Partitioner() {} -CheckerboardPartitioner::CheckerboardPartitioner(int N): Partitioner(N) {} +// CheckerboardPartitioner::CheckerboardPartitioner(int N): Partitioner(N) {} -CheckerboardPartitioner::CheckerboardPartitioner(int N, const eckit::Parametrisation& config): Partitioner(N) { +CheckerboardPartitioner::CheckerboardPartitioner(int N, const eckit::Parametrisation& config): + Partitioner(N, config) { config.get("bands", nbands_); config.get("regular", regular_); } -CheckerboardPartitioner::CheckerboardPartitioner(int N, int nbands): Partitioner(N), nbands_{nbands} {} +CheckerboardPartitioner::CheckerboardPartitioner(const eckit::Parametrisation& config) : + Partitioner(config) { + config.get("bands", nbands_); + config.get("regular", regular_); +} + +// CheckerboardPartitioner::CheckerboardPartitioner(int N, int nbands): Partitioner(N), nbands_{nbands} {} -CheckerboardPartitioner::CheckerboardPartitioner(int N, int nbands, bool checkerboard): - Partitioner(N), nbands_{nbands} {} +// CheckerboardPartitioner::CheckerboardPartitioner(int N, int nbands, bool checkerboard): + // Partitioner(N), nbands_{nbands} {} CheckerboardPartitioner::Checkerboard CheckerboardPartitioner::checkerboard(const Grid& grid) const { // grid dimensions diff --git a/src/atlas/grid/detail/partitioner/CheckerboardPartitioner.h b/src/atlas/grid/detail/partitioner/CheckerboardPartitioner.h index db061622e..a4189425b 100644 --- a/src/atlas/grid/detail/partitioner/CheckerboardPartitioner.h +++ b/src/atlas/grid/detail/partitioner/CheckerboardPartitioner.h @@ -23,12 +23,14 @@ class CheckerboardPartitioner : public Partitioner { public: CheckerboardPartitioner(); - CheckerboardPartitioner(int N); // N is the number of parts (aka MPI tasks) + // CheckerboardPartitioner(int N); // N is the number of parts (aka MPI tasks) CheckerboardPartitioner(int N, const eckit::Parametrisation&); CheckerboardPartitioner(int N, int nbands); CheckerboardPartitioner(int N, int nbands, bool checkerboard); + CheckerboardPartitioner(const eckit::Parametrisation&); + // Node struct that holds the x and y indices (for global, it's longitude and // latitude in millidegrees (integers)) // This structure is used in sorting algorithms, and uses less memory than diff --git a/src/atlas/grid/detail/partitioner/CubedSpherePartitioner.cc b/src/atlas/grid/detail/partitioner/CubedSpherePartitioner.cc index aa3e8ea4d..303dcbc39 100644 --- a/src/atlas/grid/detail/partitioner/CubedSpherePartitioner.cc +++ b/src/atlas/grid/detail/partitioner/CubedSpherePartitioner.cc @@ -19,7 +19,6 @@ #include #include "atlas/grid/CubedSphereGrid.h" -#include "atlas/parallel/mpi/mpi.h" #include "atlas/runtime/Exception.h" #include "atlas/runtime/Log.h" @@ -77,10 +76,21 @@ std::vector> createOffset(const std::array& globalProcStartPE, const std::vector& globalProcEndPE, const std::vector& nprocx, const std::vector& nprocy): - Partitioner(N), + Partitioner(N, util::NoConfig()), globalProcStartPE_(globalProcStartPE.begin(), globalProcStartPE.end()), globalProcEndPE_(globalProcEndPE.begin(), globalProcEndPE.end()), nprocx_{nprocx.begin(), nprocx.end()}, diff --git a/src/atlas/grid/detail/partitioner/CubedSpherePartitioner.h b/src/atlas/grid/detail/partitioner/CubedSpherePartitioner.h index 603fb07b9..657c279df 100644 --- a/src/atlas/grid/detail/partitioner/CubedSpherePartitioner.h +++ b/src/atlas/grid/detail/partitioner/CubedSpherePartitioner.h @@ -26,6 +26,7 @@ class CubedSpherePartitioner : public Partitioner { CubedSpherePartitioner(int N); // N is the number of parts (aka MPI tasks) CubedSpherePartitioner(int N, const eckit::Parametrisation&); + CubedSpherePartitioner(const eckit::Parametrisation&); CubedSpherePartitioner(const int N, const std::vector& globalProcStartPE, const std::vector& globalProcEndPE, const std::vector& nprocx, diff --git a/src/atlas/grid/detail/partitioner/EqualBandsPartitioner.h b/src/atlas/grid/detail/partitioner/EqualBandsPartitioner.h index 762327fd3..fae029eb2 100644 --- a/src/atlas/grid/detail/partitioner/EqualBandsPartitioner.h +++ b/src/atlas/grid/detail/partitioner/EqualBandsPartitioner.h @@ -22,7 +22,8 @@ namespace partitioner { class EqualBandsPartitioner : public BandsPartitioner { public: EqualBandsPartitioner(); - EqualBandsPartitioner(int N, const eckit::Parametrisation& config = util::NoConfig()): BandsPartitioner(N, 1) {} + EqualBandsPartitioner(const eckit::Parametrisation& config): BandsPartitioner(extract_nb_partitions(config), config) {} + EqualBandsPartitioner(int N, const eckit::Parametrisation& config = util::NoConfig()): BandsPartitioner(N, 1, config) {} std::string type() const override { return static_type(); } static std::string static_type() { return "equal_bands"; } }; diff --git a/src/atlas/grid/detail/partitioner/EqualRegionsPartitioner.cc b/src/atlas/grid/detail/partitioner/EqualRegionsPartitioner.cc index 92b551577..f47c7b3cd 100644 --- a/src/atlas/grid/detail/partitioner/EqualRegionsPartitioner.cc +++ b/src/atlas/grid/detail/partitioner/EqualRegionsPartitioner.cc @@ -441,6 +441,7 @@ void eq_regions(int N, double xmin[], double xmax[], double ymin[], double ymax[ } void EqualRegionsPartitioner::init() { + N_ = nb_partitions(); std::vector s_cap; eq_caps(N_, sectors_, s_cap); bands_.resize(s_cap.size()); @@ -449,22 +450,35 @@ void EqualRegionsPartitioner::init() { } } -EqualRegionsPartitioner::EqualRegionsPartitioner(): Partitioner(), N_(nb_partitions()) { +EqualRegionsPartitioner::EqualRegionsPartitioner(): + Partitioner() { init(); } -EqualRegionsPartitioner::EqualRegionsPartitioner(int N): Partitioner(N), N_(N) { - init(); +EqualRegionsPartitioner::EqualRegionsPartitioner(int N): + EqualRegionsPartitioner(N, util::NoConfig()) { } EqualRegionsPartitioner::EqualRegionsPartitioner(int N, const eckit::Parametrisation& config): - EqualRegionsPartitioner(N) { + Partitioner(N,config) { std::string crds; if (config.get("coordinates", crds)) { if (crds == "lonlat") { coordinates_ = Coordinates::LONLAT; } } + init(); +} + +EqualRegionsPartitioner::EqualRegionsPartitioner(const eckit::Parametrisation& config): + Partitioner(config) { + std::string crds; + if (config.get("coordinates", crds)) { + if (crds == "lonlat") { + coordinates_ = Coordinates::LONLAT; + } + } + init(); } int EqualRegionsPartitioner::partition(const double& x, const double& y) const { @@ -605,7 +619,7 @@ void EqualRegionsPartitioner::partition(const Grid& grid, int part[]) const { ATLAS_ASSERT(grid.projection().units() == "degrees"); - const auto& comm = mpi::comm(); + const auto& comm = mpi::comm(mpi_comm()); int mpi_rank = static_cast(comm.rank()); int mpi_size = static_cast(comm.size()); diff --git a/src/atlas/grid/detail/partitioner/EqualRegionsPartitioner.h b/src/atlas/grid/detail/partitioner/EqualRegionsPartitioner.h index ec3cdb051..a795a6f15 100644 --- a/src/atlas/grid/detail/partitioner/EqualRegionsPartitioner.h +++ b/src/atlas/grid/detail/partitioner/EqualRegionsPartitioner.h @@ -81,6 +81,7 @@ class EqualRegionsPartitioner : public Partitioner { EqualRegionsPartitioner(int N); EqualRegionsPartitioner(int N, const eckit::Parametrisation& config); + EqualRegionsPartitioner(const eckit::Parametrisation& config); void where(int partition, int& band, int& sector) const; int nb_bands() const { return bands_.size(); } diff --git a/src/atlas/grid/detail/partitioner/MatchingFunctionSpacePartitioner.cc b/src/atlas/grid/detail/partitioner/MatchingFunctionSpacePartitioner.cc index ef53d501a..44546a090 100644 --- a/src/atlas/grid/detail/partitioner/MatchingFunctionSpacePartitioner.cc +++ b/src/atlas/grid/detail/partitioner/MatchingFunctionSpacePartitioner.cc @@ -22,13 +22,13 @@ MatchingFunctionSpacePartitioner::MatchingFunctionSpacePartitioner(): Partitione ATLAS_NOTIMPLEMENTED; } -MatchingFunctionSpacePartitioner::MatchingFunctionSpacePartitioner(const idx_t nb_partitions): - Partitioner(nb_partitions) { - ATLAS_NOTIMPLEMENTED; -} +// MatchingFunctionSpacePartitioner::MatchingFunctionSpacePartitioner(const idx_t nb_partitions): +// Partitioner(nb_partitions) { +// ATLAS_NOTIMPLEMENTED; +// } -MatchingFunctionSpacePartitioner::MatchingFunctionSpacePartitioner(const FunctionSpace& functionspace): - Partitioner(functionspace.nb_partitions()), partitioned_(functionspace) {} +MatchingFunctionSpacePartitioner::MatchingFunctionSpacePartitioner(const FunctionSpace& functionspace, const eckit::Parametrisation&): + Partitioner(functionspace.nb_parts(),util::Config("mpi_comm",functionspace.mpi_comm())), partitioned_(functionspace) {} } // namespace partitioner } // namespace detail diff --git a/src/atlas/grid/detail/partitioner/MatchingFunctionSpacePartitioner.h b/src/atlas/grid/detail/partitioner/MatchingFunctionSpacePartitioner.h index 147ccc576..dd47ac2aa 100644 --- a/src/atlas/grid/detail/partitioner/MatchingFunctionSpacePartitioner.h +++ b/src/atlas/grid/detail/partitioner/MatchingFunctionSpacePartitioner.h @@ -24,9 +24,9 @@ class MatchingFunctionSpacePartitioner : public Partitioner { public: MatchingFunctionSpacePartitioner(); - MatchingFunctionSpacePartitioner(const idx_t nb_partitions); + // MatchingFunctionSpacePartitioner(const idx_t nb_partitions); - MatchingFunctionSpacePartitioner(const FunctionSpace&); + MatchingFunctionSpacePartitioner(const FunctionSpace&, const eckit::Parametrisation&); virtual ~MatchingFunctionSpacePartitioner() override {} diff --git a/src/atlas/grid/detail/partitioner/MatchingFunctionSpacePartitionerLonLatPolygon.cc b/src/atlas/grid/detail/partitioner/MatchingFunctionSpacePartitionerLonLatPolygon.cc index 947156953..c202b11dd 100644 --- a/src/atlas/grid/detail/partitioner/MatchingFunctionSpacePartitionerLonLatPolygon.cc +++ b/src/atlas/grid/detail/partitioner/MatchingFunctionSpacePartitionerLonLatPolygon.cc @@ -40,14 +40,17 @@ void MatchingFunctionSpacePartitionerLonLatPolygon::partition(const Grid& grid, ATLAS_TRACE("MatchingFunctionSpacePartitionerLonLatPolygon"); //atlas::vector part( grid.size() ); - if (mpi::size() == 1) { + const auto& comm = mpi::comm(partitioned_.mpi_comm()); + const int mpi_rank = int(comm.rank()); + const int mpi_size = int(comm.size()); + + if (mpi_size == 1) { // shortcut omp::fill(part, part + grid.size(), 0); } else { const auto& p = partitioned_.polygon(); - int rank = mpi::rank(); util::PolygonXY poly{p}; { ATLAS_TRACE("point-in-polygon check for entire grid (" + std::to_string(grid.size()) + " points)"); @@ -61,7 +64,7 @@ void MatchingFunctionSpacePartitionerLonLatPolygon::partition(const Grid& grid, auto it = grid.xy().begin() + chunk * grid.size() / chunks; for (size_t n = begin; n < end; ++n) { if (poly.contains(*it)) { - part[n] = rank; + part[n] = mpi_rank; } else { part[n] = -1; @@ -70,15 +73,15 @@ void MatchingFunctionSpacePartitionerLonLatPolygon::partition(const Grid& grid, } } } - ATLAS_TRACE_MPI(ALLREDUCE) { mpi::comm().allReduceInPlace(part, grid.size(), eckit::mpi::max()); } + ATLAS_TRACE_MPI(ALLREDUCE) { comm.allReduceInPlace(part, grid.size(), eckit::mpi::max()); } } } #if 0 - const eckit::mpi::Comm& comm = atlas::mpi::comm(); - const int mpi_rank = int( comm.rank() ); - const int mpi_size = int( comm.size() ); + const auto& comm = mpi::comm(partitioned_.mpi_comm()); + const int mpi_rank = int(comm.rank()); + const int mpi_size = int(comm.size()); ATLAS_TRACE( "MatchingFunctionSpacePartitionerLonLatPolygon::partition" ); diff --git a/src/atlas/grid/detail/partitioner/MatchingFunctionSpacePartitionerLonLatPolygon.h b/src/atlas/grid/detail/partitioner/MatchingFunctionSpacePartitionerLonLatPolygon.h index 63fd0ab6c..91c79fe34 100644 --- a/src/atlas/grid/detail/partitioner/MatchingFunctionSpacePartitionerLonLatPolygon.h +++ b/src/atlas/grid/detail/partitioner/MatchingFunctionSpacePartitionerLonLatPolygon.h @@ -23,10 +23,10 @@ class MatchingFunctionSpacePartitionerLonLatPolygon : public MatchingFunctionSpa public: MatchingFunctionSpacePartitionerLonLatPolygon(): MatchingFunctionSpacePartitioner() {} - MatchingFunctionSpacePartitionerLonLatPolygon(const size_t nb_partitions): - MatchingFunctionSpacePartitioner(nb_partitions) {} - MatchingFunctionSpacePartitionerLonLatPolygon(const FunctionSpace& FunctionSpace): - MatchingFunctionSpacePartitioner(FunctionSpace) {} + // MatchingFunctionSpacePartitionerLonLatPolygon(const size_t nb_partitions): + // MatchingFunctionSpacePartitioner(nb_partitions) {} + MatchingFunctionSpacePartitionerLonLatPolygon(const FunctionSpace& FunctionSpace, const eckit::Parametrisation& config): + MatchingFunctionSpacePartitioner(FunctionSpace, config) {} using MatchingFunctionSpacePartitioner::partition; /** diff --git a/src/atlas/grid/detail/partitioner/MatchingMeshPartitioner.cc b/src/atlas/grid/detail/partitioner/MatchingMeshPartitioner.cc index 99e042c25..3c256c65e 100644 --- a/src/atlas/grid/detail/partitioner/MatchingMeshPartitioner.cc +++ b/src/atlas/grid/detail/partitioner/MatchingMeshPartitioner.cc @@ -20,12 +20,12 @@ MatchingMeshPartitioner::MatchingMeshPartitioner(): Partitioner() { ATLAS_NOTIMPLEMENTED; } -MatchingMeshPartitioner::MatchingMeshPartitioner(const idx_t nb_partitions): Partitioner(nb_partitions) { - ATLAS_NOTIMPLEMENTED; -} +// MatchingMeshPartitioner::MatchingMeshPartitioner(const idx_t nb_partitions): Partitioner(nb_partitions) { + // ATLAS_NOTIMPLEMENTED; +// } -MatchingMeshPartitioner::MatchingMeshPartitioner(const Mesh& mesh): - Partitioner(mesh.nb_partitions()), prePartitionedMesh_(mesh) {} +MatchingMeshPartitioner::MatchingMeshPartitioner(const Mesh& mesh, const eckit::Parametrisation&): + Partitioner(mesh.nb_parts(),util::Config("mpi_comm",mesh.mpi_comm())), prePartitionedMesh_(mesh) {} } // namespace partitioner } // namespace detail diff --git a/src/atlas/grid/detail/partitioner/MatchingMeshPartitioner.h b/src/atlas/grid/detail/partitioner/MatchingMeshPartitioner.h index ecb9761e4..e3e2a5a04 100644 --- a/src/atlas/grid/detail/partitioner/MatchingMeshPartitioner.h +++ b/src/atlas/grid/detail/partitioner/MatchingMeshPartitioner.h @@ -24,9 +24,9 @@ class MatchingMeshPartitioner : public Partitioner { public: MatchingMeshPartitioner(); - MatchingMeshPartitioner(const idx_t nb_partitions); + // MatchingMeshPartitioner(const idx_t nb_partitions); - MatchingMeshPartitioner(const Mesh&); + MatchingMeshPartitioner(const Mesh&, const eckit::Parametrisation&); virtual ~MatchingMeshPartitioner() override {} diff --git a/src/atlas/grid/detail/partitioner/MatchingMeshPartitionerBruteForce.cc b/src/atlas/grid/detail/partitioner/MatchingMeshPartitionerBruteForce.cc index e456b8115..c99afe0c1 100644 --- a/src/atlas/grid/detail/partitioner/MatchingMeshPartitionerBruteForce.cc +++ b/src/atlas/grid/detail/partitioner/MatchingMeshPartitionerBruteForce.cc @@ -75,9 +75,9 @@ bool point_in_quadrilateral(const PointLonLat& P, const PointLonLat& A, const Po void MatchingMeshPartitionerBruteForce::partition(const Grid& grid, int partitioning[]) const { ATLAS_TRACE("MatchingMeshPartitionerBruteForce::partition"); - - eckit::mpi::Comm& comm = eckit::mpi::comm(); - const int mpi_rank = int(comm.rank()); + const auto& comm = mpi::comm(prePartitionedMesh_.mpi_comm()); + const int mpi_rank = int(comm.rank()); + const int mpi_size = int(comm.size()); // Point coordinates // - use a bounding box to quickly discard points, diff --git a/src/atlas/grid/detail/partitioner/MatchingMeshPartitionerBruteForce.h b/src/atlas/grid/detail/partitioner/MatchingMeshPartitionerBruteForce.h index cdc068f8d..b3bea1174 100644 --- a/src/atlas/grid/detail/partitioner/MatchingMeshPartitionerBruteForce.h +++ b/src/atlas/grid/detail/partitioner/MatchingMeshPartitionerBruteForce.h @@ -22,11 +22,14 @@ class MatchingMeshPartitionerBruteForce : public MatchingMeshPartitioner { static std::string static_type() { return "brute-force"; } public: + MatchingMeshPartitionerBruteForce(const Mesh& mesh, const eckit::Parametrisation& config): MatchingMeshPartitioner(mesh, config) {} + + MatchingMeshPartitionerBruteForce(): MatchingMeshPartitioner() {} - MatchingMeshPartitionerBruteForce(const idx_t nb_partitions): MatchingMeshPartitioner(nb_partitions) {} + MatchingMeshPartitionerBruteForce(const eckit::Parametrisation&): MatchingMeshPartitioner() {} + MatchingMeshPartitionerBruteForce(const idx_t nb_partitions): MatchingMeshPartitioner() {} MatchingMeshPartitionerBruteForce(const idx_t nb_partitions, const eckit::Parametrisation&): - MatchingMeshPartitioner(nb_partitions) {} - MatchingMeshPartitionerBruteForce(const Mesh& mesh): MatchingMeshPartitioner(mesh) {} + MatchingMeshPartitioner() {} using MatchingMeshPartitioner::partition; virtual void partition(const Grid& grid, int partitioning[]) const; diff --git a/src/atlas/grid/detail/partitioner/MatchingMeshPartitionerCubedSphere.cc b/src/atlas/grid/detail/partitioner/MatchingMeshPartitionerCubedSphere.cc index 74a605866..625740cf5 100644 --- a/src/atlas/grid/detail/partitioner/MatchingMeshPartitionerCubedSphere.cc +++ b/src/atlas/grid/detail/partitioner/MatchingMeshPartitionerCubedSphere.cc @@ -17,6 +17,10 @@ namespace detail { namespace partitioner { void MatchingMeshPartitionerCubedSphere::partition(const Grid& grid, int partitioning[]) const { + const auto& comm = mpi::comm(prePartitionedMesh_.mpi_comm()); + const int mpi_rank = int(comm.rank()); + const int mpi_size = int(comm.size()); + // Make cell finder from owned mesh cells. const auto finder = interpolation::method::cubedsphere::CellFinder(prePartitionedMesh_); @@ -32,12 +36,12 @@ void MatchingMeshPartitionerCubedSphere::partition(const Grid& grid, int partiti // This is probably more expensive than it needs to be, as it performs // a dry run of the cubedsphere interpolation method. const auto& lonlat = *lonlatIt; - partitioning[i] = finder.getCell(lonlat, listSize, edgeEpsilon, epsilon).isect ? mpi::rank() : -1; + partitioning[i] = finder.getCell(lonlat, listSize, edgeEpsilon, epsilon).isect ? mpi_rank : -1; ++lonlatIt; } // AllReduce to get full partitioning array. - mpi::comm().allReduceInPlace(partitioning, grid.size(), eckit::mpi::Operation::MAX); + comm.allReduceInPlace(partitioning, grid.size(), eckit::mpi::Operation::MAX); const auto misses = std::count_if(partitioning, partitioning + grid.size(), [](int elem) { return elem < 0; }); if (misses > 0) { throw_Exception( diff --git a/src/atlas/grid/detail/partitioner/MatchingMeshPartitionerCubedSphere.h b/src/atlas/grid/detail/partitioner/MatchingMeshPartitionerCubedSphere.h index d6cf3be36..50ad0dbea 100644 --- a/src/atlas/grid/detail/partitioner/MatchingMeshPartitionerCubedSphere.h +++ b/src/atlas/grid/detail/partitioner/MatchingMeshPartitionerCubedSphere.h @@ -21,10 +21,10 @@ class MatchingMeshPartitionerCubedSphere : public MatchingMeshPartitioner { public: MatchingMeshPartitionerCubedSphere(): MatchingMeshPartitioner() {} - MatchingMeshPartitionerCubedSphere(const idx_t nb_partitions): MatchingMeshPartitioner(nb_partitions) {} - MatchingMeshPartitionerCubedSphere(const idx_t nb_partitions, const eckit::Parametrisation&): - MatchingMeshPartitioner(nb_partitions) {} - MatchingMeshPartitionerCubedSphere(const Mesh& mesh): MatchingMeshPartitioner(mesh) {} + // MatchingMeshPartitionerCubedSphere(const idx_t nb_partitions): MatchingMeshPartitioner(nb_partitions) {} + // MatchingMeshPartitionerCubedSphere(const idx_t nb_partitions, const eckit::Parametrisation&): + // MatchingMeshPartitioner(nb_partitions) {} + MatchingMeshPartitionerCubedSphere(const Mesh& mesh, const eckit::Parametrisation& config): MatchingMeshPartitioner(mesh, config) {} using MatchingMeshPartitioner::partition; virtual void partition(const Grid& grid, int partitioning[]) const; diff --git a/src/atlas/grid/detail/partitioner/MatchingMeshPartitionerLonLatPolygon.cc b/src/atlas/grid/detail/partitioner/MatchingMeshPartitionerLonLatPolygon.cc index d06131320..d517c8b2b 100644 --- a/src/atlas/grid/detail/partitioner/MatchingMeshPartitionerLonLatPolygon.cc +++ b/src/atlas/grid/detail/partitioner/MatchingMeshPartitionerLonLatPolygon.cc @@ -38,9 +38,9 @@ PartitionerBuilder __builder("lonlat-polyg } void MatchingMeshPartitionerLonLatPolygon::partition(const Grid& grid, int partitioning[]) const { - const eckit::mpi::Comm& comm = atlas::mpi::comm(); - const int mpi_rank = int(comm.rank()); - const int mpi_size = int(comm.size()); + const auto& comm = mpi::comm(prePartitionedMesh_.mpi_comm()); + const int mpi_rank = int(comm.rank()); + const int mpi_size = int(comm.size()); ATLAS_TRACE("MatchingMeshPartitionerLonLatPolygon::partition"); diff --git a/src/atlas/grid/detail/partitioner/MatchingMeshPartitionerLonLatPolygon.h b/src/atlas/grid/detail/partitioner/MatchingMeshPartitionerLonLatPolygon.h index b74f009e0..f0b11e5d3 100644 --- a/src/atlas/grid/detail/partitioner/MatchingMeshPartitionerLonLatPolygon.h +++ b/src/atlas/grid/detail/partitioner/MatchingMeshPartitionerLonLatPolygon.h @@ -22,11 +22,14 @@ class MatchingMeshPartitionerLonLatPolygon : public MatchingMeshPartitioner { static std::string static_type() { return "lonlat-polygon"; } public: + MatchingMeshPartitionerLonLatPolygon(const Mesh& mesh, const eckit::Parametrisation& config): MatchingMeshPartitioner(mesh, config) {} + + MatchingMeshPartitionerLonLatPolygon(): MatchingMeshPartitioner() {} - MatchingMeshPartitionerLonLatPolygon(const size_t nb_partitions): MatchingMeshPartitioner(nb_partitions) {} + MatchingMeshPartitionerLonLatPolygon(const eckit::Parametrisation&): MatchingMeshPartitioner() {} + MatchingMeshPartitionerLonLatPolygon(const size_t nb_partitions): MatchingMeshPartitioner() {} MatchingMeshPartitionerLonLatPolygon(const size_t nb_partitions, const eckit::Parametrisation& config): - MatchingMeshPartitioner(nb_partitions) {} - MatchingMeshPartitionerLonLatPolygon(const Mesh& mesh): MatchingMeshPartitioner(mesh) {} + MatchingMeshPartitioner() {} using Partitioner::partition; diff --git a/src/atlas/grid/detail/partitioner/MatchingMeshPartitionerSphericalPolygon.cc b/src/atlas/grid/detail/partitioner/MatchingMeshPartitionerSphericalPolygon.cc index 823956b22..b6d5b4266 100644 --- a/src/atlas/grid/detail/partitioner/MatchingMeshPartitionerSphericalPolygon.cc +++ b/src/atlas/grid/detail/partitioner/MatchingMeshPartitionerSphericalPolygon.cc @@ -34,9 +34,9 @@ PartitionerBuilder __builder("spherical } void MatchingMeshPartitionerSphericalPolygon::partition(const Grid& grid, int partitioning[]) const { - const eckit::mpi::Comm& comm = atlas::mpi::comm(); - const int mpi_rank = int(comm.rank()); - const int mpi_size = int(comm.size()); + const auto& comm = mpi::comm(prePartitionedMesh_.mpi_comm()); + const int mpi_rank = int(comm.rank()); + const int mpi_size = int(comm.size()); ATLAS_TRACE("MatchingMeshPartitionerSphericalPolygon::partition"); diff --git a/src/atlas/grid/detail/partitioner/MatchingMeshPartitionerSphericalPolygon.h b/src/atlas/grid/detail/partitioner/MatchingMeshPartitionerSphericalPolygon.h index d94eb5de7..45363dda5 100644 --- a/src/atlas/grid/detail/partitioner/MatchingMeshPartitionerSphericalPolygon.h +++ b/src/atlas/grid/detail/partitioner/MatchingMeshPartitionerSphericalPolygon.h @@ -22,11 +22,14 @@ class MatchingMeshPartitionerSphericalPolygon : public MatchingMeshPartitioner { static std::string static_type() { return "spherical-polygon"; } public: + MatchingMeshPartitionerSphericalPolygon(const Mesh& mesh, const eckit::Parametrisation& config): MatchingMeshPartitioner(mesh, config) {} + + // Following throw ATLAS_NOT_IMPLEMENTED MatchingMeshPartitionerSphericalPolygon(): MatchingMeshPartitioner() {} - MatchingMeshPartitionerSphericalPolygon(const idx_t nb_partitions): MatchingMeshPartitioner(nb_partitions) {} + MatchingMeshPartitionerSphericalPolygon(const idx_t nb_partitions): MatchingMeshPartitioner() {} MatchingMeshPartitionerSphericalPolygon(const idx_t nb_partitions, const eckit::Parametrisation& config): - MatchingMeshPartitioner(nb_partitions) {} - MatchingMeshPartitionerSphericalPolygon(const Mesh& mesh): MatchingMeshPartitioner(mesh) {} + MatchingMeshPartitioner() {} + MatchingMeshPartitionerSphericalPolygon(const eckit::Parametrisation&): MatchingMeshPartitioner() {} using MatchingMeshPartitioner::partition; /** diff --git a/src/atlas/grid/detail/partitioner/Partitioner.cc b/src/atlas/grid/detail/partitioner/Partitioner.cc index 9211c63d5..b671c135b 100644 --- a/src/atlas/grid/detail/partitioner/Partitioner.cc +++ b/src/atlas/grid/detail/partitioner/Partitioner.cc @@ -61,9 +61,44 @@ namespace grid { namespace detail { namespace partitioner { -Partitioner::Partitioner(): nb_partitions_(mpi::size()) {} +int Partitioner::extract_nb_partitions(const eckit::Parametrisation& config) { + int N; + if (config.has("mpi_comm")) { + std::string mpi_comm; + config.get("mpi_comm",mpi_comm); + N = mpi::comm(mpi_comm).size(); + } + else { + N = mpi::size(); + } + config.get("partitions",N); + config.get("nb_partitions",N); + config.get("nb_parts",N); + return N; +} +std::string Partitioner::extract_mpi_comm(const eckit::Parametrisation& config) { + if (config.has("mpi_comm")) { + std::string mpi_comm; + config.get("mpi_comm",mpi_comm); + return mpi_comm; + } + return mpi::comm().name(); +} + + +Partitioner::Partitioner(): + Partitioner(util::NoConfig()) { +} -Partitioner::Partitioner(const idx_t nb_partitions): nb_partitions_(nb_partitions) {} +Partitioner::Partitioner(const idx_t nb_partitions, const eckit::Parametrisation& config): + nb_partitions_(nb_partitions), + mpi_comm_(extract_mpi_comm(config)) { +} + +Partitioner::Partitioner(const eckit::Parametrisation& config) : + nb_partitions_(extract_nb_partitions(config)), + mpi_comm_(extract_mpi_comm(config)) { +} Partitioner::~Partitioner() = default; @@ -75,6 +110,8 @@ Distribution Partitioner::partition(const Grid& grid) const { return new distribution::DistributionArray{grid, atlas::grid::Partitioner(this)}; } +std::string Partitioner::mpi_comm() const { return mpi_comm_; } + namespace { template @@ -164,8 +201,7 @@ Partitioner* PartitionerFactory::build(const std::string& name) { } Partitioner* PartitionerFactory::build(const std::string& name, const idx_t nb_partitions) { - atlas::util::Config config; - return build(name, nb_partitions, config); + return build(name, nb_partitions, util::NoConfig()); } Partitioner* PartitionerFactory::build(const std::string& name, const idx_t nb_partitions, @@ -192,28 +228,53 @@ Partitioner* PartitionerFactory::build(const std::string& name, const idx_t nb_p return (*j).second->make(nb_partitions, config); } +Partitioner* PartitionerFactory::build(const std::string& name, + const eckit::Parametrisation& config) { + pthread_once(&once, init); + + eckit::AutoLock lock(local_mutex); + + static force_link static_linking; + + std::map::const_iterator j = m->find(name); + + Log::debug() << "Looking for PartitionerFactory [" << name << "]" << '\n'; + + if (j == m->end()) { + Log::error() << "No PartitionerFactory for [" << name << "]" << '\n'; + Log::error() << "PartitionerFactories are:" << '\n'; + for (j = m->begin(); j != m->end(); ++j) { + Log::error() << " " << (*j).first << '\n'; + } + throw_Exception(std::string("No PartitionerFactory called ") + name); + } + + return (*j).second->make(config); +} + + -Partitioner* MatchingPartitionerFactory::build(const std::string& type, const Mesh& partitioned) { +Partitioner* MatchingPartitionerFactory::build(const std::string& type, const Mesh& partitioned, const eckit::Parametrisation& config) { if (type == MatchingMeshPartitionerSphericalPolygon::static_type()) { - return new MatchingMeshPartitionerSphericalPolygon(partitioned); + return new MatchingMeshPartitionerSphericalPolygon(partitioned, config); } else if (type == MatchingMeshPartitionerLonLatPolygon::static_type()) { - return new MatchingMeshPartitionerLonLatPolygon(partitioned); + return new MatchingMeshPartitionerLonLatPolygon(partitioned, config); } else if (type == MatchingMeshPartitionerBruteForce::static_type()) { - return new MatchingMeshPartitionerBruteForce(partitioned); + return new MatchingMeshPartitionerBruteForce(partitioned, config); } else if (type == MatchingMeshPartitionerCubedSphere::static_type()) { - return new MatchingMeshPartitionerCubedSphere(partitioned); + return new MatchingMeshPartitionerCubedSphere(partitioned, config); } else { ATLAS_NOTIMPLEMENTED; } } -Partitioner* MatchingPartitionerFactory::build(const std::string& type, const FunctionSpace& partitioned) { +Partitioner* MatchingPartitionerFactory::build(const std::string& type, const FunctionSpace& partitioned, const eckit::Parametrisation& config) { if (type == MatchingFunctionSpacePartitionerLonLatPolygon::static_type()) { - return new MatchingFunctionSpacePartitionerLonLatPolygon(partitioned); + return new MatchingFunctionSpacePartitionerLonLatPolygon(partitioned, config); } else { ATLAS_NOTIMPLEMENTED; diff --git a/src/atlas/grid/detail/partitioner/Partitioner.h b/src/atlas/grid/detail/partitioner/Partitioner.h index 7befd7499..d30f62124 100644 --- a/src/atlas/grid/detail/partitioner/Partitioner.h +++ b/src/atlas/grid/detail/partitioner/Partitioner.h @@ -36,7 +36,9 @@ class Partitioner : public util::Object { public: Partitioner(); - Partitioner(const idx_t nb_partitions); + Partitioner(const idx_t nb_partitions, const eckit::Parametrisation&); + Partitioner(const eckit::Parametrisation&); + virtual ~Partitioner(); virtual void partition(const Grid& grid, int part[]) const = 0; @@ -47,8 +49,14 @@ class Partitioner : public util::Object { virtual std::string type() const = 0; + std::string mpi_comm() const; + + static int extract_nb_partitions(const eckit::Parametrisation&); + static std::string extract_mpi_comm(const eckit::Parametrisation&); + private: idx_t nb_partitions_; + std::string mpi_comm_; }; // ------------------------------------------------------------------ @@ -65,6 +73,7 @@ class PartitionerFactory { static Partitioner* build(const std::string&); static Partitioner* build(const std::string&, const idx_t nb_partitions); static Partitioner* build(const std::string&, const idx_t nb_partitions, const eckit::Parametrisation&); + static Partitioner* build(const std::string&, const eckit::Parametrisation&); /*! * \brief list all registered partioner builders @@ -77,6 +86,7 @@ class PartitionerFactory { virtual Partitioner* make() = 0; virtual Partitioner* make(const idx_t nb_partitions) = 0; virtual Partitioner* make(const idx_t nb_partitions, const eckit::Parametrisation&) = 0; + virtual Partitioner* make(const eckit::Parametrisation&) = 0; protected: PartitionerFactory(const std::string&); @@ -89,10 +99,13 @@ template class PartitionerBuilder : public PartitionerFactory { virtual Partitioner* make() { return new T(); } - virtual Partitioner* make(const idx_t nb_partitions) { return new T(nb_partitions); } + virtual Partitioner* make(const idx_t nb_partitions) { return new T(nb_partitions, util::NoConfig()); } virtual Partitioner* make(const idx_t nb_partitions, const eckit::Parametrisation& config) { return new T(nb_partitions, config); } + virtual Partitioner* make(const eckit::Parametrisation& config) { + return new T(config); + } public: PartitionerBuilder(const std::string& name): PartitionerFactory(name) {} @@ -102,8 +115,8 @@ class PartitionerBuilder : public PartitionerFactory { class MatchingPartitionerFactory { public: - static Partitioner* build(const std::string& type, const Mesh& partitioned); - static Partitioner* build(const std::string& type, const FunctionSpace& partitioned); + static Partitioner* build(const std::string& type, const Mesh& partitioned, const eckit::Parametrisation& config); + static Partitioner* build(const std::string& type, const FunctionSpace& partitioned, const eckit::Parametrisation& config); }; // ------------------------------------------------------------------ diff --git a/src/atlas/grid/detail/partitioner/RegularBandsPartitioner.h b/src/atlas/grid/detail/partitioner/RegularBandsPartitioner.h index b068180e5..aaf9383d2 100644 --- a/src/atlas/grid/detail/partitioner/RegularBandsPartitioner.h +++ b/src/atlas/grid/detail/partitioner/RegularBandsPartitioner.h @@ -22,7 +22,9 @@ class RegularBandsPartitioner : public BandsPartitioner { public: RegularBandsPartitioner(); RegularBandsPartitioner(int N, const eckit::Parametrisation& config = util::NoConfig()): - BandsPartitioner(N, BLOCKSIZE_NX) {} + BandsPartitioner(N, BLOCKSIZE_NX, config) {} + RegularBandsPartitioner(const eckit::Parametrisation& config): + RegularBandsPartitioner(extract_nb_partitions(config), config) {} std::string type() const override { return static_type(); } static std::string static_type() { return "regular_bands"; } }; diff --git a/src/atlas/grid/detail/partitioner/SerialPartitioner.cc b/src/atlas/grid/detail/partitioner/SerialPartitioner.cc index 89e79dd21..239b6f2d4 100644 --- a/src/atlas/grid/detail/partitioner/SerialPartitioner.cc +++ b/src/atlas/grid/detail/partitioner/SerialPartitioner.cc @@ -11,9 +11,26 @@ #include "SerialPartitioner.h" +#include "atlas/parallel/mpi/mpi.h" + namespace { atlas::grid::detail::partitioner::PartitionerBuilder __Serial( atlas::grid::detail::partitioner::SerialPartitioner::static_type()); } -atlas::grid::detail::partitioner::SerialPartitioner::SerialPartitioner(): Partitioner(1) {} +namespace atlas { +namespace grid { +namespace detail { +namespace partitioner { + +SerialPartitioner::SerialPartitioner(const eckit::Parametrisation& config): + Partitioner(config) { + part_ = mpi::comm(mpi_comm()).rank(); + config.get("part", part_); + config.get("partition", part_); +} + +} +} +} +} diff --git a/src/atlas/grid/detail/partitioner/SerialPartitioner.h b/src/atlas/grid/detail/partitioner/SerialPartitioner.h index c1613edf6..0b394257d 100644 --- a/src/atlas/grid/detail/partitioner/SerialPartitioner.h +++ b/src/atlas/grid/detail/partitioner/SerialPartitioner.h @@ -22,24 +22,32 @@ namespace partitioner { class SerialPartitioner : public Partitioner { public: - SerialPartitioner(); - SerialPartitioner(int N, const eckit::Parametrisation& config): Partitioner(1) {} - - SerialPartitioner(int N): Partitioner(N) {} + SerialPartitioner(): + Partitioner(1, util::NoConfig()) { + } + SerialPartitioner(int /*N*/): + SerialPartitioner() { + } + SerialPartitioner(int /*N*/, const eckit::Parametrisation& config): + SerialPartitioner(config) { + } + SerialPartitioner(const eckit::Parametrisation& config); std::string type() const override { return static_type(); } static std::string static_type() { return "serial"; } Distribution partition(const Grid& grid) const override { - return Distribution{new distribution::SerialDistribution{grid}}; + return Distribution{new distribution::SerialDistribution{grid, part_}}; } void partition(const Grid& grid, int part[]) const override { gidx_t gridsize = grid.size(); for (gidx_t n = 0; n < gridsize; ++n) { - part[n] = 0.; + part[n] = part_; } } +private: + int part_{0}; }; diff --git a/src/atlas/grid/detail/partitioner/TransPartitioner.cc b/src/atlas/grid/detail/partitioner/TransPartitioner.cc index 1395e6f3a..6da9140b2 100644 --- a/src/atlas/grid/detail/partitioner/TransPartitioner.cc +++ b/src/atlas/grid/detail/partitioner/TransPartitioner.cc @@ -33,7 +33,16 @@ TransPartitioner::TransPartitioner(): Partitioner() { } } -TransPartitioner::TransPartitioner(const idx_t N, const eckit::Parametrisation&): Partitioner(N) { +TransPartitioner::TransPartitioner(const eckit::Parametrisation& config): Partitioner(config) { + EqualRegionsPartitioner eqreg(config); + nbands_ = eqreg.nb_bands(); + nregions_.resize(nbands_); + for (size_t b = 0; b < nbands_; ++b) { + nregions_[b] = eqreg.nb_regions(b); + } +} + +TransPartitioner::TransPartitioner(const idx_t N, const eckit::Parametrisation& config): Partitioner(N,config) { EqualRegionsPartitioner eqreg(nb_partitions()); nbands_ = eqreg.nb_bands(); nregions_.resize(nbands_); diff --git a/src/atlas/grid/detail/partitioner/TransPartitioner.h b/src/atlas/grid/detail/partitioner/TransPartitioner.h index b15c75be1..24cb33514 100644 --- a/src/atlas/grid/detail/partitioner/TransPartitioner.h +++ b/src/atlas/grid/detail/partitioner/TransPartitioner.h @@ -30,6 +30,8 @@ class TransPartitioner : public Partitioner { /// @brief Constructor TransPartitioner(); + TransPartitioner(const eckit::Parametrisation&); + TransPartitioner(const idx_t nb_partitions, const eckit::Parametrisation& = util::NoConfig()); virtual ~TransPartitioner(); diff --git a/src/atlas/interpolation/method/structured/StructuredInterpolation2D.tcc b/src/atlas/interpolation/method/structured/StructuredInterpolation2D.tcc index bf37d4c0a..b40436f97 100644 --- a/src/atlas/interpolation/method/structured/StructuredInterpolation2D.tcc +++ b/src/atlas/interpolation/method/structured/StructuredInterpolation2D.tcc @@ -193,7 +193,7 @@ namespace { src_fs_config.set("source.grid",src_fs.grid().spec()); src_fs_config.set("source.halo",src_fs.halo()); src_fs_config.set("source.distribution",src_fs.distribution()); - src_fs_config.set("source.partitions",src_fs.nb_partitions()); + src_fs_config.set("source.partitions",src_fs.nb_parts()); src_fs_config.set("interpolation","StructuredInterpolation2D<"+interpolation.kernel().className()+">"); f << src_fs_config.json(); } diff --git a/src/atlas/mesh/Mesh.cc b/src/atlas/mesh/Mesh.cc index e7e782e32..411260617 100644 --- a/src/atlas/mesh/Mesh.cc +++ b/src/atlas/mesh/Mesh.cc @@ -13,6 +13,7 @@ #include "atlas/grid/Grid.h" #include "atlas/grid/Partitioner.h" #include "atlas/meshgenerator/MeshGenerator.h" +#include "atlas/parallel/mpi/mpi.h" namespace atlas { @@ -20,19 +21,32 @@ namespace atlas { Mesh::Mesh(): Handle(new Implementation()) {} -Mesh::Mesh(const Grid& grid): +Mesh::Mesh(const Grid& grid, const eckit::Configuration& config): Handle([&]() { - auto meshgenerator = MeshGenerator{grid.meshgenerator()}; - auto mesh = meshgenerator.generate(grid, grid::Partitioner(grid.partitioner())); + if(config.has("mpi_comm")) { + mpi::push(config.getString("mpi_comm")); + } + util::Config cfg = grid.meshgenerator()|util::Config(config); + auto meshgenerator = MeshGenerator{grid.meshgenerator()|config}; + auto mesh = meshgenerator.generate(grid, grid::Partitioner(grid.partitioner()|config)); + if(config.has("mpi_comm")) { + mpi::pop(); + } mesh.get()->attach(); return mesh.get(); }()) { get()->detach(); } -Mesh::Mesh(const Grid& grid, const grid::Partitioner& partitioner): +Mesh::Mesh(const Grid& grid, const grid::Partitioner& partitioner, const eckit::Configuration& config): Handle([&]() { - auto meshgenerator = MeshGenerator{grid.meshgenerator()}; + std::string mpi_comm = partitioner.mpi_comm(); + if(config.has("mpi_comm")) { + mpi_comm = config.getString("mpi_comm"); + ATLAS_ASSERT(mpi_comm == partitioner.mpi_comm()); + } + mpi::Scope mpi_scope(mpi_comm); + auto meshgenerator = MeshGenerator{grid.meshgenerator()|config}; auto mesh = meshgenerator.generate(grid, partitioner); mesh.get()->attach(); return mesh.get(); diff --git a/src/atlas/mesh/Mesh.h b/src/atlas/mesh/Mesh.h index cfe643162..66cef013b 100644 --- a/src/atlas/mesh/Mesh.h +++ b/src/atlas/mesh/Mesh.h @@ -76,9 +76,9 @@ class Mesh : DOXYGEN_HIDE(public util::ObjectHandle) { operator bool() const; /// @brief Generate a mesh from a Grid with recommended mesh generator and partitioner strategy - Mesh(const Grid&); + Mesh(const Grid&, const eckit::Configuration& = util::NoConfig()); - Mesh(const Grid&, const grid::Partitioner&); + Mesh(const Grid&, const grid::Partitioner&, const eckit::Configuration& = util::NoConfig()); /// @brief Construct a mesh from a Stream (serialization) explicit Mesh(eckit::Stream&); @@ -112,9 +112,11 @@ class Mesh : DOXYGEN_HIDE(public util::ObjectHandle) { /// @brief Return the memory footprint of the mesh size_t footprint() const { return get()->footprint(); } - idx_t partition() const { return get()->partition(); } + idx_t part() const { return get()->part(); } - idx_t nb_partitions() const { return get()->nb_partitions(); } + idx_t nb_parts() const { return get()->nb_parts(); } + + std::string mpi_comm() const { return get()->mpi_comm(); } void updateDevice() const { get()->updateDevice(); } diff --git a/src/atlas/mesh/MeshBuilder.cc b/src/atlas/mesh/MeshBuilder.cc index ca35b0d4b..72a6c6f41 100644 --- a/src/atlas/mesh/MeshBuilder.cc +++ b/src/atlas/mesh/MeshBuilder.cc @@ -164,9 +164,10 @@ Mesh MeshBuilder::operator()(size_t nb_nodes, const double lons[], const double auto mpi_comm_name = [](const auto& config) { return config.getString("mpi_comm", atlas::mpi::comm().name()); }; - const eckit::mpi::Comm& comm = eckit::mpi::comm(mpi_comm_name(config).c_str()); Mesh mesh{}; + mesh.metadata().set("mpi_comm", mpi_comm_name(config)); + auto& comm = mpi::comm(mesh.mpi_comm()); // Setup a grid, if requested via config argument if (config.has("grid")) { diff --git a/src/atlas/mesh/PartitionPolygon.cc b/src/atlas/mesh/PartitionPolygon.cc index 66660b15b..35622df31 100644 --- a/src/atlas/mesh/PartitionPolygon.cc +++ b/src/atlas/mesh/PartitionPolygon.cc @@ -81,9 +81,9 @@ size_t PartitionPolygon::footprint() const { } void PartitionPolygon::outputPythonScript(const eckit::PathName& filepath, const eckit::Configuration& config) const { - const eckit::mpi::Comm& comm = atlas::mpi::comm(); - int mpi_rank = int(comm.rank()); - int mpi_size = int(comm.size()); + const auto& comm = mpi::comm(mesh_.mpi_comm()); + int mpi_rank = int(comm.rank()); + int mpi_size = int(comm.size()); std::string coordinates = config.getString("coordinates", "xy"); if (coordinates == "ij") { @@ -254,11 +254,11 @@ void PartitionPolygon::outputPythonScript(const eckit::PathName& filepath, const void PartitionPolygon::allGather(util::PartitionPolygons& polygons) const { ATLAS_TRACE(); - polygons.clear(); - polygons.reserve(mpi::size()); + const auto& comm = mpi::comm(mesh_.mpi_comm()); + const int mpi_size = int(comm.size()); - const mpi::Comm& comm = mpi::comm(); - const int mpi_size = int(comm.size()); + polygons.clear(); + polygons.reserve(mpi_size); auto& poly = *this; diff --git a/src/atlas/mesh/actions/BuildEdges.cc b/src/atlas/mesh/actions/BuildEdges.cc index 1c7ccda1e..eb6461e91 100644 --- a/src/atlas/mesh/actions/BuildEdges.cc +++ b/src/atlas/mesh/actions/BuildEdges.cc @@ -326,6 +326,8 @@ void build_edges(Mesh& mesh) { void build_edges(Mesh& mesh, const eckit::Configuration& config) { ATLAS_TRACE("BuildEdges"); + mpi::Scope mpi_scope(mesh.mpi_comm()); + int mesh_halo(0); mesh.metadata().get("halo", mesh_halo); diff --git a/src/atlas/mesh/actions/BuildHalo.cc b/src/atlas/mesh/actions/BuildHalo.cc index f0ed02925..b44de535e 100644 --- a/src/atlas/mesh/actions/BuildHalo.cc +++ b/src/atlas/mesh/actions/BuildHalo.cc @@ -1374,6 +1374,8 @@ BuildHalo::BuildHalo(Mesh& mesh): mesh_(mesh), periodic_cells_local_index_(mesh. void BuildHalo::operator()(int nb_elems) { ATLAS_TRACE("BuildHalo"); + mpi::Scope mpi_scope(mesh_.mpi_comm()); + int halo = 0; mesh_.metadata().get("halo", halo); diff --git a/src/atlas/mesh/actions/BuildParallelFields.cc b/src/atlas/mesh/actions/BuildParallelFields.cc index 507ce67b2..17f617d18 100644 --- a/src/atlas/mesh/actions/BuildParallelFields.cc +++ b/src/atlas/mesh/actions/BuildParallelFields.cc @@ -94,11 +94,16 @@ struct Node { void build_parallel_fields(Mesh& mesh) { ATLAS_TRACE(); - build_nodes_parallel_fields(mesh.nodes()); + build_nodes_parallel_fields(mesh); } //---------------------------------------------------------------------------------------------------------------------- +void build_nodes_parallel_fields(Mesh& mesh) { + mpi::Scope mpi_scope(mesh.mpi_comm()); + build_nodes_parallel_fields(mesh.nodes()); +} + void build_nodes_parallel_fields(mesh::Nodes& nodes) { ATLAS_TRACE(); bool parallel = false; @@ -115,6 +120,7 @@ void build_nodes_parallel_fields(mesh::Nodes& nodes) { void build_edges_parallel_fields(Mesh& mesh) { ATLAS_TRACE(); + mpi::Scope mpi_scope(mesh.mpi_comm()); build_edges_partition(mesh); build_edges_remote_idx(mesh); /* @@ -142,6 +148,11 @@ Field& build_nodes_global_idx(mesh::Nodes& nodes) { return nodes.global_index(); } +void renumber_nodes_glb_idx(Mesh& mesh) { + mpi::Scope mpi_scope(mesh.mpi_comm()); + renumber_nodes_glb_idx(mesh.nodes()); +} + void renumber_nodes_glb_idx(mesh::Nodes& nodes) { bool human_readable(false); nodes.global_index().metadata().get("human_readable", human_readable); @@ -1117,6 +1128,8 @@ Field& build_cells_remote_idx(mesh::Cells& cells, const mesh::Nodes& nodes) { } void build_cells_parallel_fields(Mesh& mesh) { + mpi::Scope mpi_scope(mesh.mpi_comm()); + bool parallel = false; mesh.cells().metadata().get("parallel", parallel); if (!parallel) { diff --git a/src/atlas/mesh/actions/BuildParallelFields.h b/src/atlas/mesh/actions/BuildParallelFields.h index 091ac6373..bcaec7782 100644 --- a/src/atlas/mesh/actions/BuildParallelFields.h +++ b/src/atlas/mesh/actions/BuildParallelFields.h @@ -39,7 +39,8 @@ void build_parallel_fields(Mesh& mesh); * - partition: set to mpi::rank() for negative values * - remote_idx: rebuild from scratch */ -void build_nodes_parallel_fields(mesh::Nodes& nodes); +void build_nodes_parallel_fields(Mesh& mesh); +void build_nodes_parallel_fields(mesh::Nodes& nodes); // deprecated (WARNING: does not change MPI scope) /* * Build parallel fields for the "edges" function space if they don't exist. @@ -57,7 +58,8 @@ void build_edges_parallel_fields(Mesh& mesh); void build_cells_parallel_fields(Mesh& mesh); -void renumber_nodes_glb_idx(mesh::Nodes& nodes); +void renumber_nodes_glb_idx(Mesh& mesh); +void renumber_nodes_glb_idx(mesh::Nodes& nodes); // deprecated (WARNING: does not change MPI scope) // ------------------------------------------------------------------ // C wrapper interfaces to C++ routines diff --git a/src/atlas/mesh/actions/BuildPeriodicBoundaries.cc b/src/atlas/mesh/actions/BuildPeriodicBoundaries.cc index 9406e977b..fffed8169 100644 --- a/src/atlas/mesh/actions/BuildPeriodicBoundaries.cc +++ b/src/atlas/mesh/actions/BuildPeriodicBoundaries.cc @@ -38,11 +38,12 @@ void build_periodic_boundaries(Mesh& mesh) { ATLAS_TRACE(); bool periodic = false; mesh.metadata().get("periodic", periodic); + if (!periodic) { + mpi::Scope mpi_scope(mesh.mpi_comm()); - auto mpi_size = mpi::size(); - auto mypart = mpi::rank(); + auto mpi_size = mpi::size(); + auto mypart = mpi::rank(); - if (!periodic) { mesh::Nodes& nodes = mesh.nodes(); auto flags = array::make_view(nodes.flags()); diff --git a/src/atlas/mesh/detail/MeshImpl.cc b/src/atlas/mesh/detail/MeshImpl.cc index 68cda5d3f..d8c53854a 100644 --- a/src/atlas/mesh/detail/MeshImpl.cc +++ b/src/atlas/mesh/detail/MeshImpl.cc @@ -39,6 +39,7 @@ void MeshImpl::encode(eckit::Stream&) const { } MeshImpl::MeshImpl(): nodes_(new mesh::Nodes()), dimensionality_(2) { + metadata_.set("mpi_comm",mpi::comm().name()); createElements(); } @@ -116,14 +117,27 @@ void MeshImpl::setGrid(const Grid& grid) { } } -idx_t MeshImpl::nb_partitions() const { - return mpi::size(); +idx_t MeshImpl::nb_parts() const { + idx_t n; + if (not metadata().get("nb_parts", n)) { + n = mpi::comm(mpi_comm()).size(); + } + return n; } -idx_t MeshImpl::partition() const { - return mpi::rank(); +idx_t MeshImpl::part() const { + idx_t p; + if (not metadata().get("part", p)) { + p = mpi::comm(mpi_comm()).rank(); + } + return p; } +std::string MeshImpl::mpi_comm() const { + return metadata().getString("mpi_comm"); +} + + void MeshImpl::updateDevice() const { if (nodes_) { nodes_->updateDevice(); @@ -186,7 +200,7 @@ const PartitionGraph& MeshImpl::partitionGraph() const { } PartitionGraph::Neighbours MeshImpl::nearestNeighbourPartitions() const { - return partitionGraph().nearestNeighbours(partition()); + return partitionGraph().nearestNeighbours(part()); } const PartitionPolygon& MeshImpl::polygon(idx_t halo) const { diff --git a/src/atlas/mesh/detail/MeshImpl.h b/src/atlas/mesh/detail/MeshImpl.h index 349b0e16a..e7940bd2d 100644 --- a/src/atlas/mesh/detail/MeshImpl.h +++ b/src/atlas/mesh/detail/MeshImpl.h @@ -94,8 +94,9 @@ class MeshImpl : public util::Object { /// @brief Return the memory footprint of the mesh size_t footprint() const; - idx_t partition() const; - idx_t nb_partitions() const; + idx_t part() const; + idx_t nb_parts() const; + std::string mpi_comm() const; void updateDevice() const; diff --git a/src/atlas/mesh/detail/PartitionGraph.cc b/src/atlas/mesh/detail/PartitionGraph.cc index ac3bb31cb..74db379e5 100644 --- a/src/atlas/mesh/detail/PartitionGraph.cc +++ b/src/atlas/mesh/detail/PartitionGraph.cc @@ -30,8 +30,8 @@ namespace detail { PartitionGraph* build_partition_graph(const MeshImpl& mesh) { ATLAS_TRACE("build_partition_graph..."); - const eckit::mpi::Comm& comm = mpi::comm(); - const int mpi_size = int(comm.size()); + const auto& comm = mpi::comm(mesh.mpi_comm()); + const int mpi_size = int(comm.size()); const util::Polygon& poly = mesh.polygon(); diff --git a/src/atlas/meshgenerator/detail/CubedSphereDualMeshGenerator.cc b/src/atlas/meshgenerator/detail/CubedSphereDualMeshGenerator.cc index 66beb592e..953a84ed8 100644 --- a/src/atlas/meshgenerator/detail/CubedSphereDualMeshGenerator.cc +++ b/src/atlas/meshgenerator/detail/CubedSphereDualMeshGenerator.cc @@ -36,6 +36,11 @@ namespace meshgenerator { // ----------------------------------------------------------------------------- CubedSphereDualMeshGenerator::CubedSphereDualMeshGenerator(const eckit::Parametrisation& p) { + // Get mpi_comm + std::string mpi_comm = mpi::comm().name(); + p.get("mpi_comm", mpi_comm); + options.set("mpi_comm", mpi_comm); + configure_defaults(); // Get number of partitions. @@ -58,7 +63,9 @@ CubedSphereDualMeshGenerator::CubedSphereDualMeshGenerator(const eckit::Parametr // Get partitioner. std::string partitioner; - if (p.get("partitioner", partitioner)) { + try { p.get("partitioner.type", partitioner); } catch( std::exception& ) {} + p.get("partitioner", partitioner); + if( partitioner.size() ) { options.set("partitioner", partitioner); } } @@ -67,11 +74,13 @@ CubedSphereDualMeshGenerator::CubedSphereDualMeshGenerator(const eckit::Parametr void CubedSphereDualMeshGenerator::configure_defaults() { + auto& comm = mpi::comm(options.getString("mpi_comm")); + // This option sets number of partitions. - options.set("nb_parts", mpi::size()); + options.set("nb_parts", comm.size()); // This option sets the part that will be generated. - options.set("part", mpi::rank()); + options.set("part", comm.rank()); // This options sets the number of halo elements around each partition. options.set("halo", 0); @@ -90,6 +99,7 @@ void CubedSphereDualMeshGenerator::generate(const Grid& grid, Mesh& mesh) const auto partConfig = util::Config{}; partConfig.set("type", partType); partConfig.set("partitions", nParts); + partConfig.set("mpi_comm",options.getString("mpi_comm")); // Use lonlat instead of xy for non cubedsphere partitioner. if (partType != "cubedsphere") { @@ -97,6 +107,7 @@ void CubedSphereDualMeshGenerator::generate(const Grid& grid, Mesh& mesh) const } // Set distribution. + mpi::Scope mpi_scope(options.getString("mpi_comm")); const auto partitioner = grid::Partitioner(partConfig); const auto distribution = grid::Distribution(grid, partitioner); @@ -126,6 +137,7 @@ void CubedSphereDualMeshGenerator::generate(const Grid& grid, const grid::Distri // Clone some grid properties. setGrid(mesh, csGrid, distribution); + mpi::Scope mpi_scope(options.getString("mpi_comm")); generate_mesh(csGrid, distribution, mesh); } @@ -462,6 +474,9 @@ void CubedSphereDualMeshGenerator::set_metadata(Mesh& mesh) const { mesh.nodes().metadata().set("parallel", true); mesh.cells().metadata().set("parallel", true); + mesh.metadata().set("mpi_comm",options.getString("mpi_comm")); + + // Loop over nodes and count number of halo elements. auto nNodes = std::vector(nHalo + 2, 0); const auto nodeHalo = array::make_view(mesh.nodes().halo()); diff --git a/src/atlas/meshgenerator/detail/CubedSphereMeshGenerator.cc b/src/atlas/meshgenerator/detail/CubedSphereMeshGenerator.cc index f719dc461..c2bd898ab 100644 --- a/src/atlas/meshgenerator/detail/CubedSphereMeshGenerator.cc +++ b/src/atlas/meshgenerator/detail/CubedSphereMeshGenerator.cc @@ -46,6 +46,11 @@ namespace meshgenerator { // ----------------------------------------------------------------------------- CubedSphereMeshGenerator::CubedSphereMeshGenerator(const eckit::Parametrisation& p) { + // Get mpi_comm + std::string mpi_comm = mpi::comm().name(); + p.get("mpi_comm", mpi_comm); + options.set("mpi_comm", mpi_comm); + configure_defaults(); // Get number of partitions. @@ -68,7 +73,9 @@ CubedSphereMeshGenerator::CubedSphereMeshGenerator(const eckit::Parametrisation& // Get partitioner. std::string partitioner; - if (p.get("partitioner", partitioner)) { + try { p.get("partitioner.type", partitioner); } catch( std::exception& ) {} + p.get("partitioner", partitioner); + if( partitioner.size() ) { options.set("partitioner", partitioner); } } @@ -77,11 +84,13 @@ CubedSphereMeshGenerator::CubedSphereMeshGenerator(const eckit::Parametrisation& void CubedSphereMeshGenerator::configure_defaults() { + auto& comm = mpi::comm(options.getString("mpi_comm")); + // This option sets number of partitions. - options.set("nb_parts", mpi::size()); + options.set("nb_parts", comm.size()); // This option sets the part that will be generated. - options.set("part", mpi::rank()); + options.set("part", comm.rank()); // This options sets the number of halo elements around each partition. options.set("halo", 0); @@ -107,6 +116,7 @@ void CubedSphereMeshGenerator::generate(const Grid& grid, Mesh& mesh) const { } // Set distribution. + mpi::Scope mpi_scope(options.getString("mpi_comm")); const auto partitioner = grid::Partitioner(partConfig); const auto distribution = grid::Distribution(grid, partitioner); @@ -149,6 +159,7 @@ void CubedSphereMeshGenerator::generate(const Grid& grid, const grid::Distributi // Clone some grid properties. setGrid(mesh, csGrid, distribution); + mesh.metadata().set("mpi_comm",options.getString("mpi_comm")); generate_mesh(csGrid, distribution, mesh); } diff --git a/src/atlas/meshgenerator/detail/DelaunayMeshGenerator.cc b/src/atlas/meshgenerator/detail/DelaunayMeshGenerator.cc index f8a3b0a2e..4ab849ba0 100644 --- a/src/atlas/meshgenerator/detail/DelaunayMeshGenerator.cc +++ b/src/atlas/meshgenerator/detail/DelaunayMeshGenerator.cc @@ -42,7 +42,8 @@ namespace meshgenerator { DelaunayMeshGenerator::DelaunayMeshGenerator() = default; DelaunayMeshGenerator::DelaunayMeshGenerator(const eckit::Parametrisation& p) { - p.get("part",part_=mpi::rank()); + p.get("mpi_comm",mpi_comm_=mpi::comm().name()); + p.get("part",part_=mpi::comm(mpi_comm_).rank()); p.get("reshuffle",reshuffle_=true); p.get("remove_duplicate_points",remove_duplicate_points_=true); } @@ -57,6 +58,8 @@ void DelaunayMeshGenerator::hash(eckit::Hash& h) const { void DelaunayMeshGenerator::generate(const Grid& grid, const grid::Distribution& dist, Mesh& mesh) const { + mpi::Scope mpi_scope(mpi_comm_); + auto build_global_mesh = [&](Mesh& mesh) { idx_t nb_nodes = grid.size(); mesh.nodes().resize(nb_nodes); @@ -102,6 +105,7 @@ void DelaunayMeshGenerator::generate(const Grid& grid, const grid::Distribution& if( dist.nb_partitions() == 1 ) { build_global_mesh(mesh); setGrid(mesh, grid, dist.type()); + mesh.metadata().set("mpi_comm",mpi_comm_); return; } @@ -357,10 +361,12 @@ void DelaunayMeshGenerator::generate(const Grid& grid, const grid::Distribution& extract_mesh_partition(global_mesh, mesh); setGrid(mesh, grid, dist.type()); + mesh.metadata().set("mpi_comm",mpi_comm_); } void DelaunayMeshGenerator::generate(const Grid& g, Mesh& mesh) const { - generate( g, grid::Distribution{g}, mesh); + mpi::Scope mpi_scope(mpi_comm_); + generate( g, grid::Distribution{g,g.partitioner()}, mesh); } namespace { diff --git a/src/atlas/meshgenerator/detail/DelaunayMeshGenerator.h b/src/atlas/meshgenerator/detail/DelaunayMeshGenerator.h index f397e6408..ef9c981c6 100644 --- a/src/atlas/meshgenerator/detail/DelaunayMeshGenerator.h +++ b/src/atlas/meshgenerator/detail/DelaunayMeshGenerator.h @@ -39,6 +39,7 @@ class DelaunayMeshGenerator : public MeshGenerator::Implementation { virtual void generate(const Grid&, const grid::Distribution&, Mesh&) const override; virtual void generate(const Grid&, Mesh&) const override; + std::string mpi_comm_; int part_; bool remove_duplicate_points_; bool reshuffle_; diff --git a/src/atlas/meshgenerator/detail/HealpixMeshGenerator.cc b/src/atlas/meshgenerator/detail/HealpixMeshGenerator.cc index d77af3652..8fa3f2a09 100644 --- a/src/atlas/meshgenerator/detail/HealpixMeshGenerator.cc +++ b/src/atlas/meshgenerator/detail/HealpixMeshGenerator.cc @@ -47,6 +47,10 @@ namespace atlas { namespace meshgenerator { HealpixMeshGenerator::HealpixMeshGenerator(const eckit::Parametrisation& p) { + std::string mpi_comm = mpi::comm().name(); + p.get("mpi_comm", mpi_comm); + options.set("mpi_comm",mpi_comm); + configure_defaults(); size_t nb_parts; @@ -85,11 +89,15 @@ HealpixMeshGenerator::HealpixMeshGenerator(const eckit::Parametrisation& p) { } void HealpixMeshGenerator::configure_defaults() { + std::string mpi_comm; + options.get("mpi_comm",mpi_comm); + auto& comm = mpi::comm(mpi_comm); + // This option sets number of parts the mesh will be split in - options.set("nb_parts", mpi::size()); + options.set("nb_parts", comm.size()); // This option sets the part that will be generated - options.set("part", mpi::rank()); + options.set("part", comm.rank()); // This option switches between original HEALPix with 1 point at the pole (3d -> true) // or HEALPix with 8 or 4 points at the pole (3d -> false) @@ -100,7 +108,7 @@ void HealpixMeshGenerator::configure_defaults() { // This options sets the default partitioner std::string partitioner; - if (grid::Partitioner::exists("equal_regions") && mpi::size() > 1) { + if (grid::Partitioner::exists("equal_regions") && comm.size() > 1) { partitioner = "equal_regions"; } else { @@ -480,9 +488,10 @@ void HealpixMeshGenerator::generate(const Grid& grid, Mesh& mesh) const { std::string partitioner_type = "equal_regions"; options.get("partitioner", partitioner_type); + mpi::push(options.getString("mpi_comm")); grid::Partitioner partitioner(partitioner_type, nb_parts); grid::Distribution distribution(partitioner.partition(grid)); - + mpi::pop(); generate(grid, distribution, mesh); } @@ -577,7 +586,7 @@ void HealpixMeshGenerator::generate_mesh(const StructuredGrid& grid, const grid: auto compute_part = [&](int iy, gidx_t ii_glb) -> int { // nodes at the pole belong to proc_0 (north) and proc_maxRank (south) // a node gets its proc rank from the element for which this node would be its west vertex - return (iy == 0 ? 0 : (iy == ny - 1 ? mpi::comm().size() - 1 : distribution.partition(ii_glb - nb_pole_nodes))); + return (iy == 0 ? 0 : (iy == ny - 1 ? nparts - 1 : distribution.partition(ii_glb - nb_pole_nodes))); }; #if DEBUG_OUTPUT_DETAIL @@ -1053,6 +1062,9 @@ void HealpixMeshGenerator::generate_mesh(const StructuredGrid& grid, const grid: } #endif + mesh.metadata().set("nb_parts",options.getInt("nb_parts")); + mesh.metadata().set("part",options.getInt("part")); + mesh.metadata().set("mpi_comm",options.getString("mpi_comm")); mesh.metadata().set("nb_nodes_including_halo[0]", nodes.size()); nodes.metadata().set("NbRealPts", size_t(nnodes)); nodes.metadata().set("NbVirtualPts", size_t(0)); diff --git a/src/atlas/meshgenerator/detail/MeshGeneratorImpl.cc b/src/atlas/meshgenerator/detail/MeshGeneratorImpl.cc index d8712f5fc..f0f265342 100644 --- a/src/atlas/meshgenerator/detail/MeshGeneratorImpl.cc +++ b/src/atlas/meshgenerator/detail/MeshGeneratorImpl.cc @@ -76,20 +76,21 @@ Mesh MeshGeneratorImpl::generate(const Grid& grid, const grid::Distribution& dis //---------------------------------------------------------------------------------------------------------------------- void MeshGeneratorImpl::generateGlobalElementNumbering(Mesh& mesh) const { - idx_t mpi_size = static_cast(mpi::size()); + const auto& comm = mpi::comm(mesh.mpi_comm()); + idx_t mpi_size = static_cast(comm.size()); gidx_t loc_nb_elems = mesh.cells().size(); std::vector elem_counts(mpi_size); std::vector elem_displs(mpi_size); - ATLAS_TRACE_MPI(ALLGATHER) { mpi::comm().allGather(loc_nb_elems, elem_counts.begin(), elem_counts.end()); } + ATLAS_TRACE_MPI(ALLGATHER) { comm.allGather(loc_nb_elems, elem_counts.begin(), elem_counts.end()); } elem_displs.at(0) = 0; for (idx_t jpart = 1; jpart < mpi_size; ++jpart) { elem_displs.at(jpart) = elem_displs.at(jpart - 1) + elem_counts.at(jpart - 1); } - gidx_t gid = 1 + elem_displs.at(mpi::rank()); + gidx_t gid = 1 + elem_displs.at(comm.rank()); array::ArrayView glb_idx = array::make_view(mesh.cells().global_index()); diff --git a/src/atlas/meshgenerator/detail/RegularMeshGenerator.cc b/src/atlas/meshgenerator/detail/RegularMeshGenerator.cc index 6035bc64a..cc07bab77 100644 --- a/src/atlas/meshgenerator/detail/RegularMeshGenerator.cc +++ b/src/atlas/meshgenerator/detail/RegularMeshGenerator.cc @@ -45,6 +45,11 @@ namespace atlas { namespace meshgenerator { RegularMeshGenerator::RegularMeshGenerator(const eckit::Parametrisation& p) { + + std::string mpi_comm = mpi::comm().name(); + p.get("mpi_comm", mpi_comm); + options.set("mpi_comm",mpi_comm); + configure_defaults(); // options copied from Structured MeshGenerator @@ -87,15 +92,19 @@ RegularMeshGenerator::RegularMeshGenerator(const eckit::Parametrisation& p) { } void RegularMeshGenerator::configure_defaults() { + std::string mpi_comm; + options.get("mpi_comm",mpi_comm); + auto& comm = mpi::comm(mpi_comm); + // This option sets number of parts the mesh will be split in - options.set("nb_parts", mpi::size()); + options.set("nb_parts", comm.size()); // This option sets the part that will be generated - options.set("part", mpi::rank()); + options.set("part", comm.rank()); // This options sets the default partitioner std::string partitioner; - if (grid::Partitioner::exists("ectrans") && mpi::size() > 1) { + if (grid::Partitioner::exists("ectrans") && comm.size() > 1) { partitioner = "ectrans"; } else { @@ -126,8 +135,10 @@ void RegularMeshGenerator::generate(const Grid& grid, Mesh& mesh) const { // if ( nb_parts == 1 || eckit::mpi::size() == 1 ) partitioner_factory = // "equal_regions"; // Only one part --> Trans is slower + mpi::push(options.getString("mpi_comm")); grid::Partitioner partitioner(partitioner_type, nb_parts); grid::Distribution distribution(partitioner.partition(grid)); + mpi::pop(); generate(grid, distribution, mesh); } @@ -162,6 +173,7 @@ void RegularMeshGenerator::generate(const Grid& grid, const grid::Distribution& void RegularMeshGenerator::generate_mesh(const RegularGrid& rg, const grid::Distribution& distribution, // const Region& region, Mesh& mesh) const { + mpi::Scope mpi_scope(options.getString("mpi_comm")); int mypart = options.get("part"); int nparts = options.get("nb_parts"); int nx = rg.nx(); @@ -552,6 +564,10 @@ void RegularMeshGenerator::generate_mesh(const RegularGrid& rg, const grid::Dist } #endif + mesh.metadata().set("nb_parts",options.getInt("nb_parts")); + mesh.metadata().set("part",options.getInt("part")); + mesh.metadata().set("mpi_comm",options.getString("mpi_comm")); + generateGlobalElementNumbering(mesh); nodes.metadata().set("parallel", true); diff --git a/src/atlas/meshgenerator/detail/StructuredMeshGenerator.cc b/src/atlas/meshgenerator/detail/StructuredMeshGenerator.cc index 0c57e83e5..96da93938 100644 --- a/src/atlas/meshgenerator/detail/StructuredMeshGenerator.cc +++ b/src/atlas/meshgenerator/detail/StructuredMeshGenerator.cc @@ -101,6 +101,17 @@ StructuredMeshGenerator::StructuredMeshGenerator(const eckit::Parametrisation& p options.set("three_dimensional", three_dimensional); } + std::string mpi_comm_name = mpi::comm().name(); + p.get("mpi_comm", mpi_comm_name); + options.set("mpi_comm",mpi_comm_name); + auto& comm = mpi::comm(mpi_comm_name); + + // This option sets number of parts the mesh will be split in + options.set("nb_parts", comm.size()); + + // This option sets the part that will be generated + options.set("part", comm.rank()); + size_t nb_parts; if (p.get("nb_parts", nb_parts)) { options.set("nb_parts", nb_parts); @@ -160,12 +171,6 @@ void StructuredMeshGenerator::configure_defaults() { // false options.set("three_dimensional", false); - // This option sets number of parts the mesh will be split in - options.set("nb_parts", mpi::size()); - - // This option sets the part that will be generated - options.set("part", mpi::rank()); - // Experimental option. This is what makes tripolar grid patched with quads options.set("patch_quads", false); @@ -204,9 +209,10 @@ void StructuredMeshGenerator::generate(const Grid& grid, Mesh& mesh) const { if (nb_parts == 1) { partitioner_type = "serial"; } - + mpi::push(options.getString("mpi_comm")); grid::Partitioner partitioner(partitioner_type, nb_parts); grid::Distribution distribution(partitioner.partition(grid)); + mpi::pop(); generate(grid, distribution, mesh); } @@ -216,7 +222,7 @@ void StructuredMeshGenerator::hash(eckit::Hash& h) const { } void StructuredMeshGenerator::generate(const Grid& grid, const grid::Distribution& distribution, Mesh& mesh) const { - ATLAS_TRACE(); + ATLAS_TRACE("structuredmeshgenerator(grid,dist,mesh)"); Log::debug() << "StructuredMeshGenerator generating mesh from " << grid.name() << std::endl; const StructuredGrid rg = StructuredGrid(grid); @@ -257,6 +263,10 @@ void StructuredMeshGenerator::generate(const Grid& grid, const grid::Distributio Region region; generate_region(rg, distribution, mypart, region); + + mesh.metadata().set("nb_parts",options.getInt("nb_parts")); + mesh.metadata().set("part",options.getInt("part")); + mesh.metadata().set("mpi_comm",options.getString("mpi_comm")); generate_mesh(rg, distribution, region, mesh); } diff --git a/src/atlas/meshgenerator/detail/StructuredMeshGenerator.h b/src/atlas/meshgenerator/detail/StructuredMeshGenerator.h index 6903c2aa1..8341296e6 100644 --- a/src/atlas/meshgenerator/detail/StructuredMeshGenerator.h +++ b/src/atlas/meshgenerator/detail/StructuredMeshGenerator.h @@ -55,6 +55,7 @@ class StructuredMeshGenerator : public MeshGenerator::Implementation { private: virtual void hash(eckit::Hash&) const override; + void configure_defaults(); void generate_region(const StructuredGrid&, const grid::Distribution& distribution, int mypart, diff --git a/src/atlas/output/detail/GmshIO.cc b/src/atlas/output/detail/GmshIO.cc index b69f6f5e5..8a3979b16 100644 --- a/src/atlas/output/detail/GmshIO.cc +++ b/src/atlas/output/detail/GmshIO.cc @@ -896,6 +896,7 @@ void GmshIO::read(const PathName& file_path, Mesh& mesh) const { } void GmshIO::write(const Mesh& mesh, const PathName& file_path) const { + mpi::Scope scope(mesh.mpi_comm()); int part = mesh.metadata().has("part") ? mesh.metadata().get("part") : mpi::rank(); bool include_ghost = options.get("ghost") && options.get("elements"); @@ -1420,6 +1421,7 @@ void GmshIO::write_delegate(const FieldSet& fieldset, const functionspace::Struc // ---------------------------------------------------------------------------- void GmshIO::write(const FieldSet& fieldset, const FunctionSpace& funcspace, const eckit::PathName& file_path, openmode mode) const { + mpi::Scope scope(funcspace.mpi_comm()); if (functionspace::NodeColumns(funcspace)) { write_delegate(fieldset, functionspace::NodeColumns(funcspace), file_path, mode); } @@ -1441,6 +1443,7 @@ void GmshIO::write(const FieldSet& fieldset, const FunctionSpace& funcspace, con // ---------------------------------------------------------------------------- void GmshIO::write(const Field& field, const FunctionSpace& funcspace, const eckit::PathName& file_path, openmode mode) const { + mpi::Scope scope(funcspace.mpi_comm()); if (functionspace::NodeColumns(funcspace)) { write_delegate(field, functionspace::NodeColumns(funcspace), file_path, mode); } diff --git a/src/atlas/parallel/Checksum.cc b/src/atlas/parallel/Checksum.cc index 86b859922..2711709c6 100644 --- a/src/atlas/parallel/Checksum.cc +++ b/src/atlas/parallel/Checksum.cc @@ -23,22 +23,32 @@ Checksum::Checksum(const std::string& name): name_(name) { is_setup_ = false; } -void Checksum::setup(const int part[], const idx_t remote_idx[], const int base, const gidx_t glb_idx[], +void Checksum::setup(const std::string& mpi_comm, const int part[], const idx_t remote_idx[], const int base, const gidx_t glb_idx[], const int parsize) { parsize_ = parsize; gather_ = util::ObjectHandle(new GatherScatter()); - gather_->setup(part, remote_idx, base, glb_idx, parsize); + gather_->setup(mpi_comm, part, remote_idx, base, glb_idx, parsize); is_setup_ = true; } void Checksum::setup(const int part[], const idx_t remote_idx[], const int base, const gidx_t glb_idx[], + const int parsize) { + setup(mpi::comm().name(), part, remote_idx, base, glb_idx, parsize); +} + +void Checksum::setup(const std::string& mpi_comm, const int part[], const idx_t remote_idx[], const int base, const gidx_t glb_idx[], const int mask[], const int parsize) { parsize_ = parsize; gather_ = util::ObjectHandle(new GatherScatter()); - gather_->setup(part, remote_idx, base, glb_idx, mask, parsize); + gather_->setup(mpi_comm, part, remote_idx, base, glb_idx, mask, parsize); is_setup_ = true; } +void Checksum::setup(const int part[], const idx_t remote_idx[], const int base, const gidx_t glb_idx[], + const int mask[], const int parsize) { + setup(mpi::comm().name(), part, remote_idx, base, glb_idx, mask, parsize); +} + void Checksum::setup(const util::ObjectHandle& gather) { gather_ = gather; parsize_ = gather->parsize_; diff --git a/src/atlas/parallel/Checksum.h b/src/atlas/parallel/Checksum.h index b0163ff39..a74f8bdc4 100644 --- a/src/atlas/parallel/Checksum.h +++ b/src/atlas/parallel/Checksum.h @@ -33,6 +33,18 @@ class Checksum : public util::Object { public: // methods std::string name() const { return name_; } + /// @brief Setup + /// @param [in] mpi_comm MPI communicator + /// @param [in] part List of partitions + /// @param [in] remote_idx List of local indices on remote partitions + /// @param [in] base values of remote_idx start at "base" + /// @param [in] glb_idx List of global indices + /// @param [in] max_glb_idx maximum glb index we want to Checksum. + /// To Checksum everything, set to val > max value in + /// domain + /// @param [in] parsize size of given lists + void setup(const std::string& mpi_comm, const int part[], const idx_t remote_idx[], const int base, const gidx_t glb_idx[], const int parsize); + /// @brief Setup /// @param [in] part List of partitions /// @param [in] remote_idx List of local indices on remote partitions @@ -44,6 +56,18 @@ class Checksum : public util::Object { /// @param [in] parsize size of given lists void setup(const int part[], const idx_t remote_idx[], const int base, const gidx_t glb_idx[], const int parsize); + /// @brief Setup + /// @param [in] mpi_comm MPI communicator + /// @param [in] part List of partitions + /// @param [in] remote_idx List of local indices on remote partitions + /// @param [in] base values of remote_idx start at "base" + /// @param [in] glb_idx List of global indices + /// @param [in] mask Mask indices not to include in the communication + /// pattern (0=include,1=exclude) + /// @param [in] parsize size of given lists + void setup(const std::string& mpi_comm, const int part[], const idx_t remote_idx[], const int base, const gidx_t glb_idx[], const int mask[], + const int parsize); + /// @brief Setup /// @param [in] part List of partitions /// @param [in] remote_idx List of local indices on remote partitions @@ -94,14 +118,14 @@ std::string Checksum::execute(const DATA_TYPE data[], const int var_strides[], c local_checksums[pp] = util::checksum(data + pp * var_size, var_size); } - std::vector global_checksums(mpi::rank() == root ? gather_->glb_dof() : 0); + std::vector global_checksums(gather_->comm().rank() == root ? gather_->glb_dof() : 0); parallel::Field loc(local_checksums.data(), 1); parallel::Field glb(global_checksums.data(), 1); gather_->gather(&loc, &glb, 1); util::checksum_t glb_checksum = util::checksum(global_checksums.data(), global_checksums.size()); - mpi::comm().broadcast(glb_checksum, root); + gather_->comm().broadcast(glb_checksum, root); return eckit::Translator()(glb_checksum); } diff --git a/src/atlas/parallel/GatherScatter.cc b/src/atlas/parallel/GatherScatter.cc index 4439fe8be..33a34aed1 100644 --- a/src/atlas/parallel/GatherScatter.cc +++ b/src/atlas/parallel/GatherScatter.cc @@ -30,11 +30,11 @@ namespace parallel { namespace { struct IsGhostPoint { - IsGhostPoint(const int part[], const idx_t ridx[], const idx_t base, const int N) { + IsGhostPoint(const int mypart, const int part[], const idx_t ridx[], const idx_t base, const int N) { part_ = part; ridx_ = ridx; base_ = base; - mypart_ = mpi::rank(); + mypart_ = mypart; } bool operator()(idx_t idx) { @@ -72,18 +72,22 @@ struct Node { } // namespace GatherScatter::GatherScatter(): name_(), is_setup_(false) { - myproc = mpi::rank(); - nproc = mpi::size(); } GatherScatter::GatherScatter(const std::string& name): name_(name), is_setup_(false) { - myproc = mpi::rank(); - nproc = mpi::size(); } void GatherScatter::setup(const int part[], const idx_t remote_idx[], const int base, const gidx_t glb_idx[], const int mask[], const idx_t parsize) { + setup(mpi::comm().name(), part, remote_idx, base, glb_idx, parsize); +} + +void GatherScatter::setup(const std::string& mpi_comm, const int part[], const idx_t remote_idx[], const int base, const gidx_t glb_idx[], + const int mask[], const idx_t parsize) { ATLAS_TRACE("GatherScatter::setup"); + comm_ = &mpi::comm(mpi_comm); + myproc = comm().rank(); + nproc = comm().size(); parsize_ = parsize; glbcounts_.resize(nproc); @@ -106,7 +110,7 @@ void GatherScatter::setup(const int part[], const idx_t remote_idx[], const int } ATLAS_TRACE_MPI(ALLGATHER) { - mpi::comm().allGather(loccnt_, glbcounts_.begin(), glbcounts_.end()); + comm().allGather(loccnt_, glbcounts_.begin(), glbcounts_.end()); } size_t glbcnt_size_t = std::accumulate(glbcounts_.begin(), glbcounts_.end(), size_t(0)); @@ -142,8 +146,8 @@ void GatherScatter::setup(const int part[], const idx_t remote_idx[], const int << eckit::Bytes(glbcnt_ * sizeof(gidx_t))); } ATLAS_TRACE_MPI(ALLGATHER) { - mpi::comm().allGatherv(sendnodes_gidx.begin(), sendnodes_gidx.begin() + loccnt_, recvnodes_gidx.data(), - glbcounts_.data(), glbdispls_.data()); + comm().allGatherv(sendnodes_gidx.begin(), sendnodes_gidx.begin() + loccnt_, recvnodes_gidx.data(), + glbcounts_.data(), glbdispls_.data()); } atlas_omp_parallel_for(idx_t n = 0; n < nb_recv_nodes; ++n) { node_sort[n].g = recvnodes_gidx[n]; @@ -162,8 +166,8 @@ void GatherScatter::setup(const int part[], const idx_t remote_idx[], const int << eckit::Bytes(glbcnt_ * sizeof(int))); } ATLAS_TRACE_MPI(ALLGATHER) { - mpi::comm().allGatherv(sendnodes_part.begin(), sendnodes_part.begin() + loccnt_, recvnodes_part.data(), - glbcounts_.data(), glbdispls_.data()); + comm().allGatherv(sendnodes_part.begin(), sendnodes_part.begin() + loccnt_, recvnodes_part.data(), + glbcounts_.data(), glbdispls_.data()); } atlas_omp_parallel_for(idx_t n = 0; n < nb_recv_nodes; ++n) { node_sort[n].p = recvnodes_part[n]; @@ -181,8 +185,8 @@ void GatherScatter::setup(const int part[], const idx_t remote_idx[], const int << eckit::Bytes(glbcnt_ * sizeof(idx_t))); } ATLAS_TRACE_MPI(ALLGATHER) { - mpi::comm().allGatherv(sendnodes_ridx.begin(), sendnodes_ridx.begin() + loccnt_, recvnodes_ridx.data(), - glbcounts_.data(), glbdispls_.data()); + comm().allGatherv(sendnodes_ridx.begin(), sendnodes_ridx.begin() + loccnt_, recvnodes_ridx.data(), + glbcounts_.data(), glbdispls_.data()); } atlas_omp_parallel_for(idx_t n = 0; n < nb_recv_nodes; ++n) { node_sort[n].i = recvnodes_ridx[n]; @@ -238,12 +242,20 @@ void GatherScatter::setup(const int part[], const idx_t remote_idx[], const int void GatherScatter::setup(const int part[], const idx_t remote_idx[], const int base, const gidx_t glb_idx[], const idx_t parsize) { + setup(mpi::comm().name(), part, remote_idx, base, glb_idx, parsize); +} + +void GatherScatter::setup(const std::string& mpi_comm, const int part[], const idx_t remote_idx[], const int base, const gidx_t glb_idx[], + const idx_t parsize) { std::vector mask(parsize); - IsGhostPoint is_ghost(part, remote_idx, base, parsize); - for (idx_t jj = 0; jj < parsize; ++jj) { - mask[jj] = is_ghost(jj) ? 1 : 0; + { + int mypart = mpi::comm(mpi_comm).rank(); + IsGhostPoint is_ghost(mypart, part, remote_idx, base, parsize); + for (idx_t jj = 0; jj < parsize; ++jj) { + mask[jj] = is_ghost(jj) ? 1 : 0; + } } - setup(part, remote_idx, base, glb_idx, mask.data(), parsize); + setup(mpi_comm, part, remote_idx, base, glb_idx, mask.data(), parsize); } ///////////////////// diff --git a/src/atlas/parallel/GatherScatter.h b/src/atlas/parallel/GatherScatter.h index d882acd89..9fa4d561c 100644 --- a/src/atlas/parallel/GatherScatter.h +++ b/src/atlas/parallel/GatherScatter.h @@ -102,6 +102,15 @@ class GatherScatter : public util::Object { public: // methods std::string name() const { return name_; } + /// @brief Setup + /// @param [in] mpi_comm Name of mpi communicator + /// @param [in] part List of partitions + /// @param [in] remote_idx List of local indices on remote partitions + /// @param [in] base values of remote_idx start at "base" + /// @param [in] glb_idx List of global indices + /// @param [in] parsize size of given lists + void setup(const std::string& mpi_comm, const int part[], const idx_t remote_idx[], const int base, const gidx_t glb_idx[], const idx_t parsize); + /// @brief Setup /// @param [in] part List of partitions /// @param [in] remote_idx List of local indices on remote partitions @@ -110,6 +119,18 @@ class GatherScatter : public util::Object { /// @param [in] parsize size of given lists void setup(const int part[], const idx_t remote_idx[], const int base, const gidx_t glb_idx[], const idx_t parsize); + /// @brief Setup + /// @param [in] mpi_comm Name of mpi communicator + /// @param [in] part List of partitions + /// @param [in] remote_idx List of local indices on remote partitions + /// @param [in] base values of remote_idx start at "base" + /// @param [in] glb_idx List of global indices + /// @param [in] parsize size of given lists + /// @param [in] mask Mask indices not to include in the communication + /// pattern (0=include,1=exclude) + void setup(const std::string& mpi_comm, const int part[], const idx_t remote_idx[], const int base, const gidx_t glb_idx[], const int mask[], + const idx_t parsize); + /// @brief Setup /// @param [in] part List of partitions /// @param [in] remote_idx List of local indices on remote partitions @@ -163,6 +184,8 @@ class GatherScatter : public util::Object { idx_t loc_dof() const { return loccnt_; } + const mpi::Comm& comm() const { return *comm_; } + private: // methods template void pack_send_buffer(const parallel::Field& field, const std::vector& sendmap, @@ -185,6 +208,7 @@ class GatherScatter : public util::Object { std::vector locmap_; std::vector glbmap_; + const mpi::Comm* comm_; idx_t nproc; idx_t myproc; @@ -231,7 +255,7 @@ void GatherScatter::gather(parallel::Field lfields[], parallel: /// Gather ATLAS_TRACE_MPI(GATHER) { - mpi::comm().gatherv(loc_buffer, glb_buffer, glb_counts, glb_displs, root); + comm().gatherv(loc_buffer, glb_buffer, glb_counts, glb_displs, root); } /// Unpack @@ -282,8 +306,8 @@ void GatherScatter::scatter(parallel::Field gfields[], parallel /// Scatter ATLAS_TRACE_MPI(SCATTER) { - mpi::comm().scatterv(glb_buffer.begin(), glb_buffer.end(), glb_counts, glb_displs, loc_buffer.begin(), - loc_buffer.end(), root); + comm().scatterv(glb_buffer.begin(), glb_buffer.end(), glb_counts, glb_displs, loc_buffer.begin(), + loc_buffer.end(), root); } /// Unpack diff --git a/src/atlas/parallel/HaloExchange.cc b/src/atlas/parallel/HaloExchange.cc index d26857600..dc8053714 100644 --- a/src/atlas/parallel/HaloExchange.cc +++ b/src/atlas/parallel/HaloExchange.cc @@ -27,11 +27,11 @@ namespace parallel { namespace { struct IsGhostPoint { - IsGhostPoint(const int part[], const idx_t ridx[], const idx_t base, const int /*N*/) { + IsGhostPoint(const int mypart, const int part[], const idx_t ridx[], const idx_t base, const int /*N*/) { part_ = part; ridx_ = ridx; base_ = base; - mypart_ = mpi::rank(); + mypart_ = mypart; } bool operator()(idx_t idx) { @@ -50,24 +50,34 @@ struct IsGhostPoint { }; } // namespace -HaloExchange::HaloExchange(): name_(), is_setup_(false) { - myproc = mpi::rank(); - nproc = mpi::size(); +HaloExchange::HaloExchange() : + HaloExchange("") { } -HaloExchange::HaloExchange(const std::string& name): name_(name), is_setup_(false) { - myproc = mpi::rank(); - nproc = mpi::size(); +HaloExchange::HaloExchange(const std::string& name): + name_(name), + is_setup_(false) { } HaloExchange::~HaloExchange() = default; void HaloExchange::setup(const int part[], const idx_t remote_idx[], const int base, const idx_t size) { - setup(part, remote_idx, base, size, 0); + setup(mpi::comm().name(), part, remote_idx, base, size); } void HaloExchange::setup(const int part[], const idx_t remote_idx[], const int base, idx_t parsize, idx_t halo_begin) { + setup(mpi::comm().name(), part, remote_idx, base, parsize, halo_begin); +} + +void HaloExchange::setup(const std::string& mpi_comm, const int part[], const idx_t remote_idx[], const int base, const idx_t size) { + setup(mpi_comm, part, remote_idx, base, size, 0); +} + +void HaloExchange::setup(const std::string& mpi_comm, const int part[], const idx_t remote_idx[], const int base, idx_t parsize, idx_t halo_begin) { ATLAS_TRACE("HaloExchange::setup"); + comm_ = &mpi::comm(mpi_comm); + myproc = comm().rank(); + nproc = comm().size(); parsize_ = parsize; sendcounts_.resize(nproc); @@ -83,7 +93,7 @@ void HaloExchange::setup(const int part[], const idx_t remote_idx[], const int b Find the amount of nodes this proc has to receive from each other proc */ - IsGhostPoint is_ghost(part, remote_idx, base, parsize_); + IsGhostPoint is_ghost(myproc, part, remote_idx, base, parsize_); atlas::vector ghost_points(parsize_); idx_t nghost = 0; @@ -103,7 +113,7 @@ void HaloExchange::setup(const int part[], const idx_t remote_idx[], const int b /* Find the amount of nodes this proc has to send to each other proc */ - ATLAS_TRACE_MPI(ALLTOALL) { mpi::comm().allToAll(recvcounts_, sendcounts_); } + ATLAS_TRACE_MPI(ALLTOALL) { comm().allToAll(recvcounts_, sendcounts_); } sendcnt_ = std::accumulate(sendcounts_.begin(), sendcounts_.end(), 0); @@ -142,7 +152,7 @@ void HaloExchange::setup(const int part[], const idx_t remote_idx[], const int b */ ATLAS_TRACE_MPI(ALLTOALL) { - mpi::comm().allToAllv(send_requests.data(), recvcounts_.data(), recvdispls_.data(), recv_requests.data(), + comm().allToAllv(send_requests.data(), recvcounts_.data(), recvdispls_.data(), recv_requests.data(), sendcounts_.data(), senddispls_.data()); } @@ -163,7 +173,7 @@ void HaloExchange::wait_for_send(std::vector& send_counts_init, std::vector ATLAS_TRACE_MPI(WAIT, "mpi-wait send") { for (int jproc = 0; jproc < nproc; ++jproc) { if (send_counts_init[jproc] > 0) { - mpi::comm().wait(send_req[jproc]); + comm().wait(send_req[jproc]); } } } diff --git a/src/atlas/parallel/HaloExchange.h b/src/atlas/parallel/HaloExchange.h index bd089ab98..e3cb3afd7 100644 --- a/src/atlas/parallel/HaloExchange.h +++ b/src/atlas/parallel/HaloExchange.h @@ -51,8 +51,10 @@ class HaloExchange : public util::Object { const std::string& name() const { return name_; } void setup(const int part[], const idx_t remote_idx[], const int base, idx_t size); + void setup(const std::string& mpi_comm, const int part[], const idx_t remote_idx[], const int base, idx_t size); void setup(const int part[], const idx_t remote_idx[], const int base, idx_t size, idx_t halo_begin); + void setup(const std::string& mpi_comm, const int part[], const idx_t remote_idx[], const int base, idx_t size, idx_t halo_begin); template void execute(array::Array& field, bool on_device = false) const; @@ -118,6 +120,10 @@ class HaloExchange : public util::Object { void var_info(const array::ArrayView& arr, std::vector& varstrides, std::vector& varshape) const; + const mpi::Comm& comm() const { + return *comm_; + } + private: // data std::string name_; bool is_setup_; @@ -134,6 +140,7 @@ class HaloExchange : public util::Object { int nproc; int myproc; + const mpi::Comm* comm_; public: struct Backdoor { @@ -287,7 +294,7 @@ void HaloExchange::ireceive(int tag, std::vector& recv_displs, std::vector< for (size_t jproc = 0; jproc < static_cast(nproc); ++jproc) { if (recv_counts[jproc] > 0) { recv_req[jproc] = - mpi::comm().iReceive(&recv_buffer[recv_displs[jproc]], recv_counts[jproc], jproc, tag); + comm().iReceive(&recv_buffer[recv_displs[jproc]], recv_counts[jproc], jproc, tag); } } } @@ -302,7 +309,7 @@ void HaloExchange::isend_and_wait_for_receive(int tag, std::vector& recv_co ATLAS_TRACE_MPI(ISEND) { for (size_t jproc = 0; jproc < static_cast(nproc); ++jproc) { if (send_counts[jproc] > 0) { - send_req[jproc] = mpi::comm().iSend(&send_buffer[send_displs[jproc]], send_counts[jproc], jproc, tag); + send_req[jproc] = comm().iSend(&send_buffer[send_displs[jproc]], send_counts[jproc], jproc, tag); } } } @@ -311,7 +318,7 @@ void HaloExchange::isend_and_wait_for_receive(int tag, std::vector& recv_co ATLAS_TRACE_MPI(WAIT, "mpi-wait receive") { for (size_t jproc = 0; jproc < static_cast(nproc); ++jproc) { if (recv_counts_init[jproc] > 0) { - mpi::comm().wait(recv_req[jproc]); + comm().wait(recv_req[jproc]); } } } diff --git a/src/atlas/parallel/mpi/mpi.cc b/src/atlas/parallel/mpi/mpi.cc index 4ad247b12..6b8f794ac 100644 --- a/src/atlas/parallel/mpi/mpi.cc +++ b/src/atlas/parallel/mpi/mpi.cc @@ -10,6 +10,7 @@ #include "atlas/parallel/mpi/mpi.h" #include "atlas/runtime/Log.h" +#include "atlas/runtime/Exception.h" namespace atlas { namespace mpi { @@ -22,5 +23,50 @@ void finalise() { eckit::mpi::finaliseAllComms(); } +void CommStack::push(std::string_view name) { + if (stack_.size() == size_) { + stack_.resize(2 * size_); + } + stack_[size_++] = name; + eckit::mpi::setCommDefault(name.data()); +} + +void CommStack::pop(std::string_view _name) { + ATLAS_ASSERT(_name == name()); + pop(); +} + +void CommStack::pop() { + --size_; + eckit::mpi::setCommDefault(name().c_str()); +} + +const std::string& CommStack::name() const { + return stack_[size_-1]; +} + +const mpi::Comm& CommStack::comm() const { + return mpi::comm(name()); +} + +CommStack::CommStack(): stack_(64){ + stack_[size_++] = mpi::comm().name(); + }; + +void push(std::string_view name) { + Log::debug() << "atlas::mpi::push("< +#include "eckit/mpi/Comm.h" #include "atlas/parallel/mpi/Statistics.h" namespace atlas { @@ -23,6 +24,10 @@ inline const Comm& comm() { return eckit::mpi::comm(); } +inline const Comm& comm(std::string_view name) { + return eckit::mpi::comm(name.data()); +} + inline idx_t rank() { return static_cast(comm().rank()); } @@ -34,5 +39,45 @@ inline int size() { void finalize(); void finalise(); +class CommStack { +public: + using const_iterator = std::vector::const_iterator; + +public: + void push(std::string_view name); + void pop(); + void pop(std::string_view name); // verifies name matches compared to version above + const std::string& name() const; + const mpi::Comm& comm() const; + + const_iterator begin() const { return stack_.begin(); } + const_iterator end() const { return stack_.begin() + size_; } + + size_t size() const { return size_; } + + static CommStack& instance() { + static CommStack instance; + return instance; + } + +private: + CommStack(); +private: + std::vector stack_; + size_t size_{0}; +}; +void push(std::string_view name); +void pop(std::string_view name); +void pop(); +struct Scope { + Scope(std::string_view name) : name_(name) { + push(name_); + } + ~Scope() { + pop(name_); + } + std::string name_; +}; + } // namespace mpi } // namespace atlas diff --git a/src/atlas/redistribution/detail/RedistributeGeneric.cc b/src/atlas/redistribution/detail/RedistributeGeneric.cc index e869e9d56..9e86fff15 100644 --- a/src/atlas/redistribution/detail/RedistributeGeneric.cc +++ b/src/atlas/redistribution/detail/RedistributeGeneric.cc @@ -42,6 +42,7 @@ Field getGhostField(const FunctionSpace& functionspace) { } if (functionspace::EdgeColumns(functionspace) || functionspace::CellColumns(functionspace)) { // TODO: Move something like this into the functionspace::EdgeColumns and functionspace::CellColumns + auto& comm = mpi::comm(functionspace.mpi_comm()); // Get mesh elements. const auto& elems = functionspace::EdgeColumns(functionspace) @@ -58,7 +59,7 @@ Field getGhostField(const FunctionSpace& functionspace) { auto partition = array::make_view(elems.partition()); // Set ghost field. - const auto thisPart = static_cast(mpi::comm().rank()); + const auto thisPart = static_cast(comm.rank()); for (idx_t i = 0; i < ghost.shape(0); ++i) { ghost(i) = partition(i) != thisPart || remote_index(i) != i; } @@ -147,19 +148,20 @@ std::vector getUidVal(const std::vector& uidVec) { } // Communicate UID values, return receive buffer and displacements. -std::pair, std::vector> communicateUid(const std::vector& sendBuffer) { - auto counts = std::vector(mpi::comm().size()); - mpi::comm().allGather(static_cast(sendBuffer.size()), counts.begin(), counts.end()); +std::pair, std::vector> communicateUid(const std::string& mpi_comm, const std::vector& sendBuffer) { + auto& comm = mpi::comm(mpi_comm); + auto counts = std::vector(comm.size()); + comm.allGather(static_cast(sendBuffer.size()), counts.begin(), counts.end()); auto disps = std::vector{}; - disps.reserve(mpi::comm().size() + 1); + disps.reserve(comm.size() + 1); disps.push_back(0); std::partial_sum(counts.begin(), counts.end(), std::back_inserter(disps)); auto recvBuffer = std::vector(static_cast(disps.back())); - mpi::comm().allGatherv(sendBuffer.begin(), sendBuffer.end(), recvBuffer.begin(), counts.data(), disps.data()); + comm.allGatherv(sendBuffer.begin(), sendBuffer.end(), recvBuffer.begin(), counts.data(), disps.data()); return std::make_pair(recvBuffer, disps); } @@ -175,18 +177,22 @@ bool operator<(const uidx_t& lhs, const IdxUid& rhs) { // Find the intersection between local and global UIDs, then return local // indices of incections and PE dispacements in vector. -std::pair, std::vector> getUidIntersection(const std::vector& localUids, +std::pair, std::vector> getUidIntersection(const std::string& mpi_comm, + const std::vector& localUids, const std::vector& globalUids, const std::vector& globalDisps) { auto uidIntersection = std::vector{}; uidIntersection.reserve(localUids.size()); + auto& comm = mpi::comm(mpi_comm); + auto mpi_size = comm.size(); + auto disps = std::vector{}; - disps.reserve(mpi::comm().size() + 1); + disps.reserve(mpi_size + 1); disps.push_back(0); // Loop over all PE and find UID intersection. - for (size_t i = 0; i < mpi::comm().size(); ++i) { + for (size_t i = 0; i < mpi_size; ++i) { // Get displaced iterators. auto globalUidsBegin = globalUids.begin() + globalDisps[i]; auto globalUidsEnd = globalUids.begin() + globalDisps[i + 1]; @@ -255,6 +261,10 @@ struct ForEach { } // namespace void RedistributeGeneric::do_setup() { + ATLAS_ASSERT( source().mpi_comm() == target().mpi_comm() ); + + mpi_comm_ = source().mpi_comm(); + // get a unique ID (UID) for each owned member of functionspace. const auto sourceUidVec = getUidVec(source()); const auto targetUidVec = getUidVec(target()); @@ -262,14 +272,14 @@ void RedistributeGeneric::do_setup() { // Communicate UID vectors to all PEs. auto sourceGlobalUids = std::vector{}; auto sourceGlobalDisps = std::vector{}; - std::tie(sourceGlobalUids, sourceGlobalDisps) = communicateUid(getUidVal(sourceUidVec)); + std::tie(sourceGlobalUids, sourceGlobalDisps) = communicateUid(mpi_comm_, getUidVal(sourceUidVec)); auto targetGlobalUids = std::vector{}; auto targetGlobalDisps = std::vector{}; - std::tie(targetGlobalUids, targetGlobalDisps) = communicateUid(getUidVal(targetUidVec)); + std::tie(targetGlobalUids, targetGlobalDisps) = communicateUid(mpi_comm_, getUidVal(targetUidVec)); // Get intersection of local UIDs and Global UIDs. - std::tie(sourceLocalIdx_, sourceDisps_) = getUidIntersection(sourceUidVec, targetGlobalUids, targetGlobalDisps); - std::tie(targetLocalIdx_, targetDisps_) = getUidIntersection(targetUidVec, sourceGlobalUids, sourceGlobalDisps); + std::tie(sourceLocalIdx_, sourceDisps_) = getUidIntersection(mpi_comm_, sourceUidVec, targetGlobalUids, targetGlobalDisps); + std::tie(targetLocalIdx_, targetDisps_) = getUidIntersection(mpi_comm_, targetUidVec, sourceGlobalUids, sourceGlobalDisps); } void RedistributeGeneric::execute(const Field& sourceField, Field& targetField) const { @@ -369,6 +379,9 @@ void RedistributeGeneric::do_execute(const Field& sourceField, Field& targetFiel auto sourceView = array::make_view(sourceField); auto targetView = array::make_view(targetField); + const auto& comm = mpi::comm(mpi_comm_); + auto mpi_size = comm.size(); + // Get number of elems per column. int elemsPerCol = 1; for (int i = 1; i < Rank; ++i) { @@ -377,18 +390,18 @@ void RedistributeGeneric::do_execute(const Field& sourceField, Field& targetFiel // Set send displacement and counts vectors. auto sendDisps = std::vector{}; - sendDisps.reserve(mpi::comm().size() + 1); + sendDisps.reserve(mpi_size + 1); auto sendCounts = std::vector{}; - sendCounts.reserve(mpi::comm().size()); + sendCounts.reserve(mpi_size); std::transform(sourceDisps_.begin(), sourceDisps_.end(), std::back_inserter(sendDisps), [&](const int& disp) { return disp * elemsPerCol; }); std::adjacent_difference(sendDisps.begin() + 1, sendDisps.end(), std::back_inserter(sendCounts)); // Set recv displacement and counts vectors. auto recvDisps = std::vector{}; - recvDisps.reserve(mpi::comm().size() + 1); + recvDisps.reserve(mpi_size + 1); auto recvCounts = std::vector{}; - recvCounts.reserve(mpi::comm().size()); + recvCounts.reserve(mpi_size); std::transform(targetDisps_.begin(), targetDisps_.end(), std::back_inserter(recvDisps), [&](const int& disp) { return disp * elemsPerCol; }); std::adjacent_difference(recvDisps.begin() + 1, recvDisps.end(), std::back_inserter(recvCounts)); @@ -403,8 +416,8 @@ void RedistributeGeneric::do_execute(const Field& sourceField, Field& targetFiel ForEach::apply(sourceLocalIdx_, sourceView, [&](const Value& elem) { *sendBufferIt++ = elem; }); // Perform MPI communication. - mpi::comm().allToAllv(sendBuffer.data(), sendCounts.data(), sendDisps.data(), recvBuffer.data(), recvCounts.data(), - recvDisps.data()); + comm.allToAllv(sendBuffer.data(), sendCounts.data(), sendDisps.data(), recvBuffer.data(), recvCounts.data(), + recvDisps.data()); // Copy recvBuffer to targetField. ForEach::apply(targetLocalIdx_, targetView, [&](Value& elem) { elem = *recvBufferIt++; }); diff --git a/src/atlas/redistribution/detail/RedistributeGeneric.h b/src/atlas/redistribution/detail/RedistributeGeneric.h index a408f3e87..a650fd02c 100644 --- a/src/atlas/redistribution/detail/RedistributeGeneric.h +++ b/src/atlas/redistribution/detail/RedistributeGeneric.h @@ -7,6 +7,8 @@ #pragma once +#include + #include "atlas/redistribution/detail/RedistributionImpl.h" namespace atlas { @@ -46,6 +48,8 @@ class RedistributeGeneric : public RedistributionImpl { // Partial sum of number of columns to receive from each PE. std::vector targetDisps_{}; + + std::string mpi_comm_; }; } // namespace detail diff --git a/src/atlas/redistribution/detail/RedistributeStructuredColumns.cc b/src/atlas/redistribution/detail/RedistributeStructuredColumns.cc index 40127920f..3ef241fd7 100644 --- a/src/atlas/redistribution/detail/RedistributeStructuredColumns.cc +++ b/src/atlas/redistribution/detail/RedistributeStructuredColumns.cc @@ -69,6 +69,10 @@ void RedistributeStructuredColumns::do_setup() { // Check levels match. ATLAS_ASSERT(source_.levels() == target_.levels()); + // Check that communicators match. + ATLAS_ASSERT(source_.mpi_comm() == target_.mpi_comm()); + mpi_comm_ = source_.mpi_comm(); + // Get source and target range of this function space. const auto sourceRange = StructuredIndexRange(source_); @@ -225,8 +229,8 @@ void RedistributeStructuredColumns::do_execute(const Field& sourceField, Field& forEachIndex(sendIntersections_, sendFunctor); // Communicate. - mpi::comm().allToAllv(sendBuffer.data(), sendCounts_.data(), sendDisplacements_.data(), recvBuffer.data(), - recvCounts_.data(), recvDisplacements_.data()); + mpi::comm(mpi_comm_).allToAllv(sendBuffer.data(), sendCounts_.data(), sendDisplacements_.data(), recvBuffer.data(), + recvCounts_.data(), recvDisplacements_.data()); // Read data from buffer. forEachIndex(recvIntersections_, recvFunctor); @@ -246,19 +250,23 @@ StructuredIndexRange::StructuredIndexRange(const functionspace::StructuredColumn iBeginEnd_.push_back(std::make_pair(structuredColumns.i_begin(j), structuredColumns.i_end(j))); } + mpi_comm_ = structuredColumns.mpi_comm(); + return; } // Get index ranges from all PEs. StructuredIndexRangeVector StructuredIndexRange::getStructuredIndexRanges() const { + auto& comm = mpi::comm(mpi_comm()); + // Get MPI communicator size. - const auto mpiSize = static_cast(atlas::mpi::comm().size()); + const auto mpiSize = static_cast(comm.size()); // Set recv buffer for j range. auto jRecvBuffer = idxPairVector(mpiSize); // Perform all gather. - atlas::mpi::comm().allGather(jBeginEnd_, jRecvBuffer.begin(), jRecvBuffer.end()); + comm.allGather(jBeginEnd_, jRecvBuffer.begin(), jRecvBuffer.end()); // Set i receive counts. auto iRecvCounts = transformVector( @@ -272,8 +280,8 @@ StructuredIndexRangeVector StructuredIndexRange::getStructuredIndexRanges() cons auto irecvBuffer = idxPairVector(static_cast(iRecvDisplacements.back() + iRecvCounts.back())); // Perform all gather. - atlas::mpi::comm().allGatherv(iBeginEnd_.cbegin(), iBeginEnd_.cend(), irecvBuffer.begin(), iRecvCounts.data(), - iRecvDisplacements.data()); + comm.allGatherv(iBeginEnd_.cbegin(), iBeginEnd_.cend(), irecvBuffer.begin(), iRecvCounts.data(), + iRecvDisplacements.data()); // Make vector of indexRange structs. auto indexRanges = StructuredIndexRangeVector{}; diff --git a/src/atlas/redistribution/detail/RedistributeStructuredColumns.h b/src/atlas/redistribution/detail/RedistributeStructuredColumns.h index 34425282a..7972078b8 100644 --- a/src/atlas/redistribution/detail/RedistributeStructuredColumns.h +++ b/src/atlas/redistribution/detail/RedistributeStructuredColumns.h @@ -92,6 +92,8 @@ class RedistributeStructuredColumns : public RedistributionImpl { std::vector sendDisplacements_{}; std::vector recvCounts_{}; std::vector recvDisplacements_{}; + + std::string mpi_comm_; }; /// \brief Helper class for function space intersections. @@ -116,12 +118,16 @@ class StructuredIndexRange { template void forEach(const functorType&) const; + const std::string& mpi_comm() const { return mpi_comm_; } + private: // Begin and end of j range. idxPair jBeginEnd_{}; // Begin and end of i range for each j. idxPairVector iBeginEnd_{}; + + std::string mpi_comm_; }; } // namespace detail diff --git a/src/tests/functionspace/CMakeLists.txt b/src/tests/functionspace/CMakeLists.txt index 137478faf..55b599545 100644 --- a/src/tests/functionspace/CMakeLists.txt +++ b/src/tests/functionspace/CMakeLists.txt @@ -39,6 +39,15 @@ ecbuild_add_test( TARGET atlas_test_functionspace ENVIRONMENT ${ATLAS_TEST_ENVIRONMENT} ) +ecbuild_add_test( TARGET atlas_test_functionspace_splitcomm + MPI 4 + CONDITION eckit_HAVE_MPI + SOURCES test_functionspace_splitcomm.cc + LIBS atlas + ENVIRONMENT ${ATLAS_TEST_ENVIRONMENT} +) + + ecbuild_add_test( TARGET atlas_test_cellcolumns SOURCES test_cellcolumns.cc LIBS atlas diff --git a/src/tests/functionspace/test_functionspace_splitcomm.cc b/src/tests/functionspace/test_functionspace_splitcomm.cc new file mode 100644 index 000000000..68a10b88f --- /dev/null +++ b/src/tests/functionspace/test_functionspace_splitcomm.cc @@ -0,0 +1,192 @@ +/* + * (C) Copyright 2013 ECMWF. + * + * This software is licensed under the terms of the Apache Licence Version 2.0 + * which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. + * In applying this licence, ECMWF does not waive the privileges and immunities + * granted to it by virtue of its status as an intergovernmental organisation + * nor does it submit to any jurisdiction. + */ + +#include "atlas/array.h" +#include "atlas/grid.h" +#include "atlas/mesh/Mesh.h" +#include "atlas/output/Gmsh.h" +#include "tests/AtlasTestEnvironment.h" +#include "atlas/functionspace/NodeColumns.h" +#include "atlas/functionspace/StructuredColumns.h" +#include "atlas/functionspace/BlockStructuredColumns.h" +#include "atlas/grid/Partitioner.h" +#include "atlas/field/for_each.h" + +namespace atlas { +namespace test { + +//----------------------------------------------------------------------------- + +namespace option { + struct mpi_split_comm : public util::Config { + mpi_split_comm() { + set("mpi_comm","split"); + } + }; +} + +int color() { + static int c = mpi::comm("world").rank()%2; + return c; +} + +Grid grid() { + static Grid g (color() == 0 ? "O32" : "N32" ); + return g; +} + +std::string expected_checksum() { + if (grid().name()=="O32") { + return "75e913d400755a0d2782fc65e2035e97"; + } + else if (grid().name()=="N32") { + return "bcb344196d20becbb66f098d91f83abb"; + } + else { + return "unknown"; + } +} + +struct Fixture { + Fixture() { + mpi::comm().split(color(),"split"); + } + ~Fixture() { + if (eckit::mpi::hasComm("split")) { + eckit::mpi::deleteComm("split"); + } + } +}; + +void field_init(Field& field) { + auto fs = field.functionspace(); + auto f = array::make_view(field); + auto g = array::make_view(fs.global_index()); + for( idx_t j=0; j(); + field_init(field); + + // HaloExchange + field.haloExchange(); + // TODO CHECK + + // Gather + auto fieldg = fs.createField(atlas::option::global()); + fs.gather(field,fieldg); + + if (fieldg.size()) { + idx_t g{0}; + field::for_each_value(fieldg,[&](double x) { + EXPECT_EQ(++g,x); + }); + } + + // Checksum + auto checksum = fs.checksum(field); + EXPECT_EQ(checksum, expected_checksum()); + + // Output + output::Gmsh gmsh(grid().name()+".msh"); + gmsh.write(mesh); + gmsh.write(field); +} + +CASE("test FunctionSpace StructuredColumns") { + Fixture fixture; + + auto fs = functionspace::StructuredColumns(grid(),atlas::option::halo(1)|option::mpi_split_comm()); + EXPECT_EQUAL(fs.part(),mpi::comm("split").rank()); + EXPECT_EQUAL(fs.nb_parts(),mpi::comm("split").size()); + + auto field = fs.createField(); + field_init(field); + + // HaloExchange + field.haloExchange(); + // TODO CHECK + + // Gather + auto fieldg = fs.createField(atlas::option::global()); + fs.gather(field,fieldg); + + if (fieldg.size()) { + idx_t g{0}; + field::for_each_value(fieldg,[&](double x) { + EXPECT_EQ(++g,x); + }); + } + + // Checksum + auto checksum = fs.checksum(field); + EXPECT_EQ(checksum, expected_checksum()); +} + +CASE("test FunctionSpace BlockStructuredColumns") { + Fixture fixture; + + auto fs = functionspace::BlockStructuredColumns(grid(),atlas::option::halo(1)|option::mpi_split_comm()); + EXPECT_EQUAL(fs.part(),mpi::comm("split").rank()); + EXPECT_EQUAL(fs.nb_parts(),mpi::comm("split").size()); + + auto field = fs.createField(); + // field_init(field); + + // HaloExchange + // field.haloExchange(); + // TODO CHECK + + // Gather + auto fieldg = fs.createField(atlas::option::global()); + // fs.gather(field,fieldg); + + // if (fieldg.size()) { + // idx_t g{0}; + // field::for_each_value(fieldg,[&](double x) { + // EXPECT_EQ(++g,x); + // }); + // } + + // // Checksum + // auto checksum = fs.checksum(field); + // EXPECT_EQ(checksum, expected_checksum()); +} + +//----------------------------------------------------------------------------- + +CASE("test FunctionSpace StructuredColumns with MatchingPartitioner") { + Fixture fixture; + + auto fs_A = functionspace::StructuredColumns(grid(), option::mpi_split_comm()); + auto fs_B = functionspace::StructuredColumns(grid(), grid::MatchingPartitioner(fs_A), option::mpi_split_comm()); + fs_A.polygon().outputPythonScript("fs_A_polygons.py"); + fs_B.polygon().outputPythonScript("fs_B_polygons.py"); +} + +//----------------------------------------------------------------------------- + +} // namespace test +} // namespace atlas + +int main(int argc, char** argv) { + return atlas::test::run(argc, argv); +} diff --git a/src/tests/functionspace/test_pointcloud_haloexchange_2PE.cc b/src/tests/functionspace/test_pointcloud_haloexchange_2PE.cc index 35a93b268..d20c76054 100644 --- a/src/tests/functionspace/test_pointcloud_haloexchange_2PE.cc +++ b/src/tests/functionspace/test_pointcloud_haloexchange_2PE.cc @@ -35,8 +35,8 @@ double innerproductwithhalo(const Field& f1, const Field& f2) { sum += view1(jn, jl) * view2(jn, jl); } } - - atlas::mpi::comm().allReduceInPlace(sum, eckit::mpi::sum()); + auto& comm = atlas::mpi::comm(f1.functionspace().mpi_comm()); + comm.allReduceInPlace(sum, eckit::mpi::sum()); return sum; } @@ -111,8 +111,19 @@ CASE("test_halo_exchange_01") { remote_indexv.assign({0, 1, 2, 3, 1, 0, 0, 2, 2, 3, 3, 1}); - - if (atlas::mpi::rank() == 0) { + int color = 0; + if( mpi::rank() >= mpi::size() - 2) { + color = 1; + } + auto& comm = mpi::comm().split(color,"split"); + if( color == 0 ) { + eckit::mpi::deleteComm("split"); + return; + } + util::Config mpi_comm("mpi_comm","split"); + int mpi_rank = comm.rank(); + + if (mpi_rank == 0) { // center followed by clockwise halo starting from top left lonlatv.assign({-45.0, 45.0, 45.0, 45.0, // center, first row -45.0, -45.0, 45.0, -45.0, // center, second row @@ -126,7 +137,7 @@ CASE("test_halo_exchange_01") { - } else if (atlas::mpi::rank() == 1) { + } else if (mpi_rank == 1) { // center followed by clockwise halo starting from top left lonlatv.assign({135.0, 45.0, 225.0, 45.0, 135.0, -45.0, 225.0, -45.0, @@ -146,7 +157,7 @@ CASE("test_halo_exchange_01") { fset.add(remote_index); fset.add(partition); - auto fs2 = functionspace::PointCloud(fset); + auto fs2 = functionspace::PointCloud(fset, mpi_comm); Field f1 = fs2.createField(option::name("f1") | option::levels(2)); Field f2 = fs2.createField(option::name("f2") | option::levels(2)); auto f1v = array::make_view(f1); @@ -158,7 +169,7 @@ CASE("test_halo_exchange_01") { for (idx_t l = 0; l < f2v.shape(1); ++l) { auto ghostv2 = array::make_view(fs2.ghost()); if (ghostv2(i) == 0) { - f1v(i, l) = (atlas::mpi::rank() +1) * 10.0 + i; + f1v(i, l) = (mpi_rank +1) * 10.0 + i; f2v(i, l) = f1v(i, l); } } @@ -173,8 +184,8 @@ CASE("test_halo_exchange_01") { double sum2 = innerproductwithhalo(f1, f2); - atlas::mpi::comm().allReduceInPlace(sum1, eckit::mpi::sum()); - atlas::mpi::comm().allReduceInPlace(sum2, eckit::mpi::sum()); + comm.allReduceInPlace(sum1, eckit::mpi::sum()); + comm.allReduceInPlace(sum2, eckit::mpi::sum()); EXPECT(std::abs(sum1 - sum2)/ std::abs(sum1) < tolerance); atlas::Log::info() << "adjoint test passed :: " << "sum1 " << sum1 << " sum2 " << sum2 << " normalised difference " @@ -189,6 +200,9 @@ CASE("test_halo_exchange_01") { } atlas::Log::info() << "values from halo followed by halo adjoint are as expected " << std::endl; + + eckit::mpi::deleteComm("split"); + } @@ -202,7 +216,19 @@ CASE("test_halo_exchange_02") { auto lonlatv = array::make_view(lonlat); auto ghostv = array::make_view(ghost); - if (atlas::mpi::rank() == 0) { + int color = 0; + if( mpi::rank() >= mpi::size() - 2) { + color = 1; + } + auto& comm = mpi::comm().split(color,"split"); + if( color == 0 ) { + eckit::mpi::deleteComm("split"); + return; + } + util::Config mpi_comm("mpi_comm","split"); + int mpi_rank = comm.rank(); + + if (mpi_rank == 0) { // center followed by clockwise halo starting from top left lonlatv.assign({-45.0, 45.0, 45.0, 45.0, // center, first row -45.0, -45.0, 45.0, -45.0, // center, second row @@ -214,7 +240,7 @@ CASE("test_halo_exchange_02") { ghostv.assign({ 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1}); - } else if (atlas::mpi::rank() == 1) { + } else if (mpi_rank == 1) { // center followed by clockwise halo starting from top left lonlatv.assign({135.0, 45.0, 225.0, 45.0, 135.0, -45.0, 225.0, -45.0, @@ -228,7 +254,7 @@ CASE("test_halo_exchange_02") { } - auto pcfs = functionspace::PointCloud(lonlat, ghost); + auto pcfs = functionspace::PointCloud(lonlat, ghost, mpi_comm); Field f1 = pcfs.createField(option::name("f1") | option::levels(2)); Field f2 = pcfs.createField(option::name("f2") | option::levels(2)); @@ -242,7 +268,7 @@ CASE("test_halo_exchange_02") { for (idx_t l = 0; l < f2v.shape(1); ++l) { auto ghostv2 = array::make_view(pcfs.ghost()); if (ghostv2(i) == 0) { - f1v(i, l) = (atlas::mpi::rank() +1) * 10.0 + i; + f1v(i, l) = (mpi_rank +1) * 10.0 + i; f2v(i, l) = f1v(i, l); } } @@ -257,8 +283,8 @@ CASE("test_halo_exchange_02") { double sum2 = innerproductwithhalo(f1, f2); - atlas::mpi::comm().allReduceInPlace(sum1, eckit::mpi::sum()); - atlas::mpi::comm().allReduceInPlace(sum2, eckit::mpi::sum()); + comm.allReduceInPlace(sum1, eckit::mpi::sum()); + comm.allReduceInPlace(sum2, eckit::mpi::sum()); EXPECT(std::abs(sum1 - sum2)/ std::abs(sum1) < tolerance); atlas::Log::info() << "adjoint test passed :: " << "sum1 " << sum1 << " sum2 " << sum2 << " normalised difference " @@ -274,6 +300,8 @@ CASE("test_halo_exchange_02") { atlas::Log::info() << "values from halo followed by halo adjoint are as expected " << std::endl; + eckit::mpi::deleteComm("split"); + } diff --git a/src/tests/mesh/CMakeLists.txt b/src/tests/mesh/CMakeLists.txt index 148c966d4..c901de5bc 100644 --- a/src/tests/mesh/CMakeLists.txt +++ b/src/tests/mesh/CMakeLists.txt @@ -87,6 +87,13 @@ ecbuild_add_test( TARGET atlas_test_mesh_node2cell ENVIRONMENT ${ATLAS_TEST_ENVIRONMENT} ) +ecbuild_add_test( TARGET atlas_test_meshgen_splitcomm + MPI 4 + CONDITION eckit_HAVE_MPI + SOURCES test_meshgen_splitcomm.cc + LIBS atlas + ENVIRONMENT ${ATLAS_TEST_ENVIRONMENT} +) foreach( test connectivity stream_connectivity elements ll meshgen3d rgg ) ecbuild_add_test( TARGET atlas_test_${test} diff --git a/src/tests/mesh/test_meshgen_splitcomm.cc b/src/tests/mesh/test_meshgen_splitcomm.cc new file mode 100644 index 000000000..70df5770a --- /dev/null +++ b/src/tests/mesh/test_meshgen_splitcomm.cc @@ -0,0 +1,248 @@ +/* + * (C) Copyright 2013 ECMWF. + * + * This software is licensed under the terms of the Apache Licence Version 2.0 + * which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. + * In applying this licence, ECMWF does not waive the privileges and immunities + * granted to it by virtue of its status as an intergovernmental organisation + * nor does it submit to any jurisdiction. + */ + +#include "atlas/grid.h" +#include "atlas/mesh/Mesh.h" +#include "atlas/meshgenerator.h" +#include "atlas/output/Gmsh.h" +#include "atlas/grid/Partitioner.h" + +#include "tests/AtlasTestEnvironment.h" + +namespace atlas { +namespace test { + +//----------------------------------------------------------------------------- + +namespace option { + struct mpi_comm : public util::Config { + mpi_comm(std::string_view name) { + set("mpi_comm",std::string(name)); + } + }; +} + +int color() { + static int c = mpi::comm("world").rank()%2; + return c; +} + +Grid grid() { + static Grid g (color() == 0 ? "O32" : "N32" ); + return g; +} + +Grid grid_A() { + return grid(); +} + +Grid grid_B() { + static Grid g (color() == 0 ? "F64" : "O64" ); + return g; +} + +Grid grid_CS() { + static Grid g (color() == 0 ? "CS-LFR-C-8" : "CS-LFR-C-16" ); + return g; +} + +Grid grid_Regular() { + static Grid g (color() == 0 ? "S8" : "L16" ); + return g; +} + +Grid grid_healpix() { + static Grid g (color() == 0 ? "H8" : "H16" ); + return g; +} + +struct Fixture { + Fixture() { + mpi::comm().split(color(),"split"); + } + ~Fixture() { + if (eckit::mpi::hasComm("split")) { + eckit::mpi::deleteComm("split"); + } + } +}; + +CASE("Partitioners") { + Fixture fixture; + SECTION("default") { + auto partitioner = grid::Partitioner("equal_regions"); + EXPECT_EQ(partitioner.mpi_comm(),mpi::comm().name()); + auto distribution = partitioner.partition(Grid("O8")); + EXPECT_EQ(distribution.nb_partitions(), mpi::comm().size()); + } + SECTION("split") { + auto partitioner = grid::Partitioner("equal_regions", option::mpi_comm("split")); + EXPECT_EQ(partitioner.mpi_comm(),"split"); + auto distribution = partitioner.partition(Grid("O8")); + EXPECT_EQ(distribution.nb_partitions(), mpi::comm("split").size()); + } +} + +CASE("StructuredMeshGenerator") { + Fixture fixture; + + MeshGenerator meshgen{"structured", option::mpi_comm("split")}; + Mesh mesh = meshgen.generate(grid()); + EXPECT_EQUAL(mesh.nb_parts(),mpi::comm("split").size()); + EXPECT_EQUAL(mesh.part(),mpi::comm("split").rank()); + EXPECT_EQUAL(mesh.mpi_comm(),"split"); + EXPECT_EQUAL(mpi::comm().name(),"world"); + output::Gmsh gmsh(grid().name()+"_1.msh"); + gmsh.write(mesh); + + // partitioning graph and polygon output + EXPECT_NO_THROW(mesh.partitionGraph()); + EXPECT_NO_THROW(mesh.polygons()); + mesh.polygon().outputPythonScript(grid().name()+"_polygons_1.py"); +} + +CASE("RegularMeshGenerator") { + Fixture fixture; + + MeshGenerator meshgen{"regular", option::mpi_comm("split")}; + Mesh mesh = meshgen.generate(grid_Regular()); + EXPECT_EQUAL(mesh.nb_parts(),mpi::comm("split").size()); + EXPECT_EQUAL(mesh.part(),mpi::comm("split").rank()); + EXPECT_EQUAL(mesh.mpi_comm(),"split"); + EXPECT_EQUAL(mpi::comm().name(),"world"); + output::Gmsh gmsh(grid().name()+"_regular.msh"); + gmsh.write(mesh); + + // partitioning graph and polygon output + EXPECT_NO_THROW(mesh.partitionGraph()); + EXPECT_NO_THROW(mesh.polygons()); + mesh.polygon().outputPythonScript(grid().name()+"_regular_polygons_1.py"); +} + +CASE("HealpixMeshGenerator") { + Fixture fixture; + + MeshGenerator meshgen{"healpix", option::mpi_comm("split")}; + Mesh mesh = meshgen.generate(grid_healpix()); + EXPECT_EQUAL(mesh.nb_parts(),mpi::comm("split").size()); + EXPECT_EQUAL(mesh.part(),mpi::comm("split").rank()); + EXPECT_EQUAL(mesh.mpi_comm(),"split"); + EXPECT_EQUAL(mpi::comm().name(),"world"); + output::Gmsh gmsh(grid().name()+"_regular.msh"); + gmsh.write(mesh); + + // partitioning graph and polygon output + EXPECT_NO_THROW(mesh.partitionGraph()); + EXPECT_NO_THROW(mesh.polygons()); + mesh.polygon().outputPythonScript(grid().name()+"_regular_polygons_1.py"); +} + +CASE("DelaunayMeshGenerator") { +if( ATLAS_HAVE_TESSELATION ) { + Fixture fixture; + + MeshGenerator meshgen{"delaunay", option::mpi_comm("split")}; + Mesh mesh = meshgen.generate(grid_CS()); + EXPECT_EQUAL(mesh.nb_parts(),mpi::comm("split").size()); + EXPECT_EQUAL(mesh.part(),mpi::comm("split").rank()); + EXPECT_EQUAL(mesh.mpi_comm(),"split"); + EXPECT_EQUAL(mpi::comm().name(),"world"); + output::Gmsh gmsh(grid_CS().name()+"_delaunay.msh",util::Config("coordinates","xyz")); + gmsh.write(mesh); +} +else { + Log::warning() << "Not testing DelaunayMeshGenerator as TESSELATION feature is OFF" << std::endl; +} +} + +CASE("CubedSphereDualMeshGenerator") { + Fixture fixture; + + MeshGenerator meshgen{"cubedsphere_dual", option::mpi_comm("split")}; + Mesh mesh = meshgen.generate(grid_CS()); + EXPECT_EQUAL(mesh.nb_parts(),mpi::comm("split").size()); + EXPECT_EQUAL(mesh.part(),mpi::comm("split").rank()); + EXPECT_EQUAL(mesh.mpi_comm(),"split"); + EXPECT_EQUAL(mpi::comm().name(),"world"); + output::Gmsh gmsh(grid_CS().name()+"_1.msh"); + gmsh.write(mesh); +} + +CASE("CubedSphereMeshGenerator") { + Fixture fixture; + + MeshGenerator meshgen{"cubedsphere", option::mpi_comm("split")}; + Mesh mesh = meshgen.generate(grid_CS()); + EXPECT_EQUAL(mesh.nb_parts(),mpi::comm("split").size()); + EXPECT_EQUAL(mesh.part(),mpi::comm("split").rank()); + EXPECT_EQUAL(mesh.mpi_comm(),"split"); + EXPECT_EQUAL(mpi::comm().name(),"world"); + output::Gmsh gmsh(grid_CS().name()+"_2.msh"); + gmsh.write(mesh); +} + + +CASE("Mesh constructor") { + Fixture fixture; + + auto mesh = Mesh(grid(), option::mpi_comm("split")); + EXPECT_EQUAL(mesh.nb_parts(),mpi::comm("split").size()); + EXPECT_EQUAL(mesh.part(),mpi::comm("split").rank()); + EXPECT_EQUAL(mesh.mpi_comm(),"split"); + EXPECT_EQUAL(mpi::comm().name(),"world"); + output::Gmsh gmsh(grid().name()+"_2.msh"); + gmsh.write(mesh); + + // partitioning graph and polygon output + EXPECT_NO_THROW(mesh.partitionGraph()); + EXPECT_NO_THROW(mesh.polygons()); + mesh.polygon().outputPythonScript(grid().name()+"_polygons_2.py"); +} + +CASE("Mesh constructor with partitioner") { + Fixture fixture; + auto partitioner = grid::Partitioner("equal_regions", option::mpi_comm("split")); + + auto mesh = Mesh(grid(), partitioner); + EXPECT_EQUAL(mesh.nb_parts(),mpi::comm("split").size()); + EXPECT_EQUAL(mesh.part(),mpi::comm("split").rank()); + EXPECT_EQUAL(mesh.mpi_comm(),"split"); + EXPECT_EQUAL(mpi::comm().name(),"world"); + output::Gmsh gmsh(grid().name()+"_2.msh"); + gmsh.write(mesh); + + // partitioning graph and polygon output + EXPECT_NO_THROW(mesh.partitionGraph()); + EXPECT_NO_THROW(mesh.polygons()); + mesh.polygon().outputPythonScript(grid().name()+"_polygons_2.py"); +} + + +CASE("MatchingPartitioner") { + Fixture fixture; + + auto mesh_A = Mesh(grid_A(), option::mpi_comm("split")); + auto mesh_B = Mesh(grid_B(), grid::MatchingPartitioner(mesh_A), option::mpi_comm("split")); + + output::Gmsh gmsh_B(grid_B().name()+"_3.msh"); + gmsh_B.write(mesh_B); + + // partitioning graph and polygon output + EXPECT_NO_THROW(mesh_B.partitionGraph()); + EXPECT_NO_THROW(mesh_B.polygons()); + mesh_B.polygon().outputPythonScript(grid_B().name()+"_polygons_3.py"); +} + +} // namespace test +} // namespace atlas + +int main(int argc, char** argv) { + return atlas::test::run(argc, argv); +} diff --git a/src/tests/parallel/test_setcomm.cc b/src/tests/parallel/test_setcomm.cc index 8ac653006..bb52a5001 100644 --- a/src/tests/parallel/test_setcomm.cc +++ b/src/tests/parallel/test_setcomm.cc @@ -38,6 +38,23 @@ CASE("test_setcomm") { std::cout << "----- STOP -----" << std::endl; } +CASE("test_split") { + int irank = mpi::comm("world").rank(); + auto& split_comm = mpi::comm("world").split(irank%2, "test_split"); + + EXPECT_EQ(mpi::comm("world").name(), "world"); + EXPECT_EQ(split_comm.name(), "test_split"); + + if( mpi::comm("world").size()%2 == 0) { // even number + EXPECT_EQ( split_comm.size() , mpi::comm("world").size()/2); + } + + auto& split_comm_by_name = mpi::comm("test_split"); + EXPECT_EQ( &split_comm, &split_comm_by_name); +} + + + } // namespace test } // namespace atlas diff --git a/src/tests/redistribution/test_redistribution_generic.cc b/src/tests/redistribution/test_redistribution_generic.cc index be0be22dc..3b0c60793 100644 --- a/src/tests/redistribution/test_redistribution_generic.cc +++ b/src/tests/redistribution/test_redistribution_generic.cc @@ -27,6 +27,23 @@ namespace atlas { namespace test { +int mpi_color() { + static int c = mpi::comm("world").rank()%2; + return c; +} + +struct Fixture { + Fixture() { + mpi::comm().split(mpi_color(),"split"); + } + ~Fixture() { + if (eckit::mpi::hasComm("split")) { + eckit::mpi::deleteComm("split"); + } + } +}; + + // Set floating point tolerance. template Value tolerance() { @@ -111,24 +128,27 @@ Value testPattern(const mesh::Connectivity::Row& elem, const array::ArrayViewcast(); - const auto* edgeColumnsPtr = functionSpace.get()->cast(); - const auto* nodeColumnsPtr = functionSpace.get()->cast(); - const auto* structuredColumnsPtr = functionSpace.get()->cast(); + const auto cellColumns = functionspace::CellColumns(functionSpace); + const auto edgeColumns = functionspace::EdgeColumns(functionSpace); + const auto nodeColumns = functionspace::NodeColumns(functionSpace); + const auto structuredColumns = functionspace::StructuredColumns(functionSpace); auto mesh = Mesh(); - if (cellColumnsPtr) { - mesh = cellColumnsPtr->mesh(); + if (cellColumns) { + mesh = cellColumns.mesh(); } - else if (edgeColumnsPtr) { - mesh = edgeColumnsPtr->mesh(); + else if (edgeColumns) { + mesh = edgeColumns.mesh(); } - else if (nodeColumnsPtr) { - mesh = nodeColumnsPtr->mesh(); + else if (nodeColumns) { + mesh = nodeColumns.mesh(); } - else if (structuredColumnsPtr) { - mesh = MeshGenerator("structured").generate(structuredColumnsPtr->grid(), structuredColumnsPtr->distribution()); + else if (structuredColumns) { + auto mpi_comm = util::Config("mpi_comm",functionSpace.mpi_comm()); + auto grid = structuredColumns.grid(); + auto partitioner = grid::Partitioner(functionSpace.distribution(),mpi_comm); + mesh = MeshGenerator("structured",mpi_comm).generate(grid, partitioner); } return mesh; } @@ -136,14 +156,14 @@ Mesh getMesh(const FunctionSpace& functionSpace) { // Try and get cells or edges node-connectivity from functionspace. const mesh::HybridElements::Connectivity* getConnectivity(const FunctionSpace& functionSpace) { // Try and create a pointer to CellColumns or EdgeColumns. - const auto* cellColumnsPtr = functionSpace.get()->cast(); - const auto* edgeColumnsPtr = functionSpace.get()->cast(); + const auto cellColumns = functionspace::CellColumns(functionSpace); + const auto edgeColumns = functionspace::EdgeColumns(functionSpace); - if (cellColumnsPtr) { - return &(cellColumnsPtr->cells().node_connectivity()); + if (cellColumns) { + return &(cellColumns.cells().node_connectivity()); } - else if (edgeColumnsPtr) { - return &(edgeColumnsPtr->edges().node_connectivity()); + else if (edgeColumns) { + return &(edgeColumns.edges().node_connectivity()); } else { return nullptr; @@ -219,7 +239,7 @@ struct TestRedistributionPoints1 : public TestRedistribution { // Perform redistribution. this->redist_.execute(this->sourceFieldSet_, this->targetFieldSet_); - // Perform halo excahnge; + // Perform halo exchange; this->targetFunctionSpace_.haloExchange(this->targetFieldSet_); // Check target field. @@ -229,7 +249,8 @@ struct TestRedistributionPoints1 : public TestRedistribution { testPattern(targetLonlatView(i, LON), targetLonlatView(i, LAT), 0))); ++nCheck; } - mpi::comm().allReduceInPlace(nCheck, eckit::mpi::Operation::SUM); + const auto& comm = mpi::comm(this->sourceFunctionSpace_.mpi_comm()); + comm.allReduceInPlace(nCheck, eckit::mpi::Operation::SUM); Log::debug() << "Checked " << nCheck << " elements." << std::endl; } }; @@ -252,7 +273,7 @@ struct TestRedistributionPoints2 : public TestRedistribution { // Perform redistribution. this->redist_.execute(this->sourceFieldSet_, this->targetFieldSet_); - // Perform halo excahnge; + // Perform halo exchange; this->targetFunctionSpace_.haloExchange(this->targetFieldSet_); // Check target field. @@ -264,7 +285,8 @@ struct TestRedistributionPoints2 : public TestRedistribution { ++nCheck; } } - mpi::comm().allReduceInPlace(nCheck, eckit::mpi::Operation::SUM); + const auto& comm = mpi::comm(this->sourceFunctionSpace_.mpi_comm()); + comm.allReduceInPlace(nCheck, eckit::mpi::Operation::SUM); Log::debug() << "Checked " << nCheck << " elements." << std::endl; } }; @@ -288,7 +310,7 @@ struct TestRedistributionPoints3 : public TestRedistribution { // Perform redistribution. this->redist_.execute(this->sourceFieldSet_, this->targetFieldSet_); - // Perform halo excahnge; + // Perform halo exchange; this->targetFunctionSpace_.haloExchange(this->targetFieldSet_); @@ -304,7 +326,8 @@ struct TestRedistributionPoints3 : public TestRedistribution { ++nCheck; } } - mpi::comm().allReduceInPlace(nCheck, eckit::mpi::Operation::SUM); + const auto& comm = mpi::comm(this->sourceFunctionSpace_.mpi_comm()); + comm.allReduceInPlace(nCheck, eckit::mpi::Operation::SUM); Log::debug() << "Checked " << nCheck << " elements." << std::endl; } }; @@ -330,7 +353,7 @@ struct TestRedistributionElems : public TestRedistribution { // Perform redistribution. this->redist_.execute(this->sourceFieldSet_, this->targetFieldSet_); - // Perform halo excahnge; + // Perform halo exchange; this->targetFunctionSpace_.haloExchange(this->targetFieldSet_); // Check target field. @@ -339,12 +362,12 @@ struct TestRedistributionElems : public TestRedistribution { EXPECT(checkValue(this->targetView_(i), testPattern(targetConnectivity->row(i), targetLonLatView))); ++nCheck; } - mpi::comm().allReduceInPlace(nCheck, eckit::mpi::Operation::SUM); + const auto& comm = mpi::comm(this->sourceFunctionSpace_.mpi_comm()); + comm.allReduceInPlace(nCheck, eckit::mpi::Operation::SUM); Log::debug() << "Checked " << nCheck << " elements." << std::endl; } }; - CASE("Structured grid") { auto grid = atlas::Grid("L24x19"); @@ -529,6 +552,209 @@ CASE("Cubed sphere dual grid") { } } +CASE("Structured grid with split comms") { + Fixture fixture; + + auto grid = mpi_color() == 0 ? atlas::Grid("L24x13") : atlas::Grid("O16"); + auto mpi_comm = util::Config("mpi_comm","split"); + + // auto grid = atlas::Grid("O48"); + // auto mpi_comm = util::Config("mpi_comm","world"); + + // Set mesh config. + const auto sourceMeshConfig = util::Config("partitioner", "equal_regions") | mpi_comm; + const auto targetMeshConfig = util::Config("partitioner", "equal_bands") | mpi_comm; + + auto sourceMesh = MeshGenerator("structured", sourceMeshConfig).generate(grid); + auto targetMesh = MeshGenerator("structured", targetMeshConfig).generate(grid); + + SECTION("NodeColumns") { + const auto sourceFunctionSpace = functionspace::NodeColumns(sourceMesh, util::Config("halo", 2)); + const auto targetFunctionSpace = functionspace::NodeColumns(targetMesh, util::Config("halo", 2)); + + // Test double for different ranks. + auto test1 = TestRedistributionPoints1(sourceFunctionSpace, targetFunctionSpace); + auto test2 = TestRedistributionPoints2(sourceFunctionSpace, targetFunctionSpace); + auto test3 = TestRedistributionPoints3(sourceFunctionSpace, targetFunctionSpace); + + // Test float. + auto test4 = TestRedistributionPoints1(sourceFunctionSpace, targetFunctionSpace); + + // Test int. + auto test5 = TestRedistributionPoints1(sourceFunctionSpace, targetFunctionSpace); + + // Test long. + auto test6 = TestRedistributionPoints1(sourceFunctionSpace, targetFunctionSpace); + + test1.execute(); + test2.execute(); + test3.execute(); + test4.execute(); + test5.execute(); + test6.execute(); + + test2.outputFields("StructuredGrid_NodeColumns_"+std::to_string(mpi_color())); + } + SECTION("CellColumns") { + // No build_cells_global_idx method implemented in mesh/actions/BuildParallelFields.cc. + + Log::debug() << "Structured Grid Cell Columns currently unsupported." << std::endl; + + //const auto sourceFunctionSpace = functionspace::CellColumns( sourceMesh, util::Config( "halo", 0 ) ); + //const auto targetFunctionSpace = functionspace::CellColumns( targetMesh, util::Config( "halo", 0 ) ); + + //auto test = TestRedistributionElems( sourceFunctionSpace, targetFunctionSpace ); + + //test.execute(); + + //test.outputFields( "StructuredGird_CellColumns_"+std::to_string(mpi_color())); + } + SECTION("EdgeColumns") { + // Note StructuredGrid EdegColumns redistribution only works for halo = 0; + + const auto sourceFunctionSpace = functionspace::EdgeColumns(sourceMesh, util::Config("halo", 0)); + const auto targetFunctionSpace = functionspace::EdgeColumns(targetMesh, util::Config("halo", 0)); + + // Test long int. + auto test = TestRedistributionElems(sourceFunctionSpace, targetFunctionSpace); + + test.execute(); + + // EdgeColumns not currently supported by gmsh IO. + } + SECTION("Structured Columns") { + const auto sourceFunctionSpace = functionspace::StructuredColumns( + grid, grid::Partitioner("equal_regions",mpi_comm), util::Config("halo", 2) | util::Config("periodic_points", true) | mpi_comm); + const auto targetFunctionSpace = functionspace::StructuredColumns( + grid, grid::Partitioner("regular_bands",mpi_comm), util::Config("halo", 2) | util::Config("periodic_points", true) | mpi_comm); + + auto test = TestRedistributionPoints2(sourceFunctionSpace, targetFunctionSpace); + + test.execute(); + + test.outputFields("StructuredGrid_StructuredColumns_"+std::to_string(mpi_color())); + } + SECTION("Point Cloud") { + // Make a point cloud from NodeColumns functionspace. + const auto sourceFunctionSpace = functionspace::NodeColumns(sourceMesh, util::Config("halo", 0)); + const auto targetFunctionSpace = functionspace::NodeColumns(targetMesh, util::Config("halo", 0)); + + // Make a vector of lonlats. + auto sourceLonLat = std::vector{}; + auto targetLonLat = std::vector{}; + + const auto sourceGhostView = array::make_view(sourceFunctionSpace.ghost()); + const auto sourceLonLatView = array::make_view(sourceFunctionSpace.lonlat()); + const auto targetGhostView = array::make_view(targetFunctionSpace.ghost()); + const auto targetLonLatView = array::make_view(targetFunctionSpace.lonlat()); + + // Add non-ghost lonlats to vector. + sourceLonLat.reserve(sourceFunctionSpace.size()); + for (idx_t i = 0; i < sourceFunctionSpace.size(); ++i) { + if (!sourceGhostView(i)) { + sourceLonLat.emplace_back(sourceLonLatView(i, LON), sourceLonLatView(i, LAT)); + } + } + targetLonLat.reserve(targetFunctionSpace.size()); + for (idx_t i = 0; i < targetFunctionSpace.size(); ++i) { + if (!targetGhostView(i)) { + targetLonLat.emplace_back(targetLonLatView(i, LON), targetLonLatView(i, LAT)); + } + } + + // Make point cloud functionspaces. + const auto sourcePointCloud = functionspace::PointCloud(sourceLonLat); + const auto targetPointCloud = functionspace::PointCloud(targetLonLat); + + auto test = TestRedistributionPoints2(sourcePointCloud, targetPointCloud); + + test.execute(); + } +} + +CASE("Cubed sphere grid with split comms") { + Fixture fixture; + + auto grid = mpi_color() == 0 ? atlas::Grid("CS-LFR-C-8") : atlas::Grid("CS-LFR-C-16"); + auto mpi_comm = util::Config("mpi_comm","split"); + + // Set mesh config. + const auto sourceMeshConfig = util::Config("partitioner", "equal_regions") | util::Config("halo", "2") | mpi_comm; + const auto targetMeshConfig = util::Config("partitioner", "cubedsphere") | util::Config("halo", "2") | mpi_comm; + + auto sourceMesh = MeshGenerator("cubedsphere", sourceMeshConfig).generate(grid); + auto targetMesh = MeshGenerator("cubedsphere", targetMeshConfig).generate(grid); + + SECTION("CubedSphereNodeColumns") { + const auto sourceFunctionSpace = functionspace::CubedSphereNodeColumns(sourceMesh); + const auto targetFunctionSpace = functionspace::CubedSphereNodeColumns(targetMesh); + + EXPECT_EQ( sourceFunctionSpace.mpi_comm(), "split" ); + + auto test = TestRedistributionPoints2(sourceFunctionSpace, targetFunctionSpace); + + test.execute(); + + test.outputFields("CubedSphere_NodeColumns_"+std::to_string(mpi_color())); + } + + SECTION("CubedSphereCellColumns") { + const auto sourceFunctionSpace = functionspace::CubedSphereCellColumns(sourceMesh); + const auto targetFunctionSpace = functionspace::CubedSphereCellColumns(targetMesh); + + EXPECT_EQ( sourceFunctionSpace.mpi_comm(), "split" ); + + auto test = TestRedistributionElems(sourceFunctionSpace, targetFunctionSpace); + + test.execute(); + + test.outputFields("CubedSphere_CellColumns_"+std::to_string(mpi_color())); + } +} + +CASE("Cubed sphere dual grid with split comms") { + Fixture fixture; + + auto grid = mpi_color() == 0 ? atlas::Grid("CS-LFR-C-8") : atlas::Grid("CS-LFR-C-16"); + auto mpi_comm = util::Config("mpi_comm","split"); + + // Set mesh config. + const auto sourceMeshConfig = util::Config("partitioner", "equal_regions") | util::Config("halo", "0") | mpi_comm; + const auto targetMeshConfig = util::Config("partitioner", "cubedsphere") | util::Config("halo", "0") | mpi_comm; + + auto sourceMesh = MeshGenerator("cubedsphere_dual", sourceMeshConfig).generate(grid); + auto targetMesh = MeshGenerator("cubedsphere_dual", targetMeshConfig).generate(grid); + + EXPECT_EQ( sourceMesh.mpi_comm(), "split" ); + + SECTION("CubedSphereDualNodeColumns") { + const auto sourceFunctionSpace = functionspace::CubedSphereNodeColumns(sourceMesh); + const auto targetFunctionSpace = functionspace::CubedSphereNodeColumns(targetMesh); + + EXPECT_EQ( sourceFunctionSpace.mpi_comm(), "split" ); + + + auto test = TestRedistributionPoints2(sourceFunctionSpace, targetFunctionSpace); + + test.execute(); + + test.outputFields("CubedSphereDual_NodeColumns_"+std::to_string(mpi_color())); + } + + SECTION("CubedSphereDualCellColumns") { + const auto sourceFunctionSpace = functionspace::CubedSphereCellColumns(sourceMesh); + const auto targetFunctionSpace = functionspace::CubedSphereCellColumns(targetMesh); + + EXPECT_EQ( sourceFunctionSpace.mpi_comm(), "split" ); + + auto test = TestRedistributionElems(sourceFunctionSpace, targetFunctionSpace); + + test.execute(); + + test.outputFields("CubedSphereDual_CellColumns_"+std::to_string(mpi_color())); + } +} + } // namespace test } // namespace atlas diff --git a/src/tests/redistribution/test_redistribution_structured.cc b/src/tests/redistribution/test_redistribution_structured.cc index a6b4cc238..706c1df15 100644 --- a/src/tests/redistribution/test_redistribution_structured.cc +++ b/src/tests/redistribution/test_redistribution_structured.cc @@ -22,6 +22,22 @@ namespace atlas { namespace test { +int mpi_color() { + static int c = mpi::comm("world").rank()%2; + return c; +} + +struct Fixture { + Fixture() { + mpi::comm().split(mpi_color(),"split"); + } + ~Fixture() { + if (eckit::mpi::hasComm("split")) { + eckit::mpi::deleteComm("split"); + } + } +}; + // Define test pattern for grid. template T testPattern(const double lambda, const double phi, const idx_t field, const idx_t level) { @@ -128,7 +144,7 @@ bool testStructColsToStructCols(const atlas::Grid& grid, const idx_t nFields, // Write mesh and fields to file. if (gmshOutput) { // Generate meshes. - const auto meshGen = atlas::MeshGenerator("structured"); + const auto meshGen = atlas::MeshGenerator("structured",util::Config("mpi_comm",sourceFunctionSpace.mpi_comm())); const auto sourceMesh = meshGen.generate(grid, sourcePartitioner); const auto targetMesh = meshGen.generate(grid, targetPartitioner); @@ -331,6 +347,28 @@ CASE("Redistribute Structured Columns") { } } +CASE("Redistribute Structured Columns with split comms") { + Fixture fixture; + SECTION("lonlat: checkerboard to equal_regions") { + util::Config mpi_comm("mpi_comm","split"); + std::string id = std::to_string(mpi_color()); + + // Set grid. + idx_t nFields = 5; + + auto grid = atlas::Grid("L48x37"); + + // Set partitioners. + auto sourcePartitioner = atlas::grid::Partitioner(option::type("checkerboard")|mpi_comm); + auto targetPartitioner = atlas::grid::Partitioner(option::type("equal_regions")|mpi_comm); + + // Check redistributer. + EXPECT(testStructColsToStructCols(grid, nFields, sourcePartitioner, targetPartitioner, + funcSpaceDefaultConfig(), funcSpaceDefaultConfig(), + true, id)); + } +} + } // namespace test } // namespace atlas