diff --git a/src/atlas/redistribution/detail/RedistributeGeneric.cc b/src/atlas/redistribution/detail/RedistributeGeneric.cc index e869e9d56..9e86fff15 100644 --- a/src/atlas/redistribution/detail/RedistributeGeneric.cc +++ b/src/atlas/redistribution/detail/RedistributeGeneric.cc @@ -42,6 +42,7 @@ Field getGhostField(const FunctionSpace& functionspace) { } if (functionspace::EdgeColumns(functionspace) || functionspace::CellColumns(functionspace)) { // TODO: Move something like this into the functionspace::EdgeColumns and functionspace::CellColumns + auto& comm = mpi::comm(functionspace.mpi_comm()); // Get mesh elements. const auto& elems = functionspace::EdgeColumns(functionspace) @@ -58,7 +59,7 @@ Field getGhostField(const FunctionSpace& functionspace) { auto partition = array::make_view(elems.partition()); // Set ghost field. - const auto thisPart = static_cast(mpi::comm().rank()); + const auto thisPart = static_cast(comm.rank()); for (idx_t i = 0; i < ghost.shape(0); ++i) { ghost(i) = partition(i) != thisPart || remote_index(i) != i; } @@ -147,19 +148,20 @@ std::vector getUidVal(const std::vector& uidVec) { } // Communicate UID values, return receive buffer and displacements. -std::pair, std::vector> communicateUid(const std::vector& sendBuffer) { - auto counts = std::vector(mpi::comm().size()); - mpi::comm().allGather(static_cast(sendBuffer.size()), counts.begin(), counts.end()); +std::pair, std::vector> communicateUid(const std::string& mpi_comm, const std::vector& sendBuffer) { + auto& comm = mpi::comm(mpi_comm); + auto counts = std::vector(comm.size()); + comm.allGather(static_cast(sendBuffer.size()), counts.begin(), counts.end()); auto disps = std::vector{}; - disps.reserve(mpi::comm().size() + 1); + disps.reserve(comm.size() + 1); disps.push_back(0); std::partial_sum(counts.begin(), counts.end(), std::back_inserter(disps)); auto recvBuffer = std::vector(static_cast(disps.back())); - mpi::comm().allGatherv(sendBuffer.begin(), sendBuffer.end(), recvBuffer.begin(), counts.data(), disps.data()); + comm.allGatherv(sendBuffer.begin(), sendBuffer.end(), recvBuffer.begin(), counts.data(), disps.data()); return std::make_pair(recvBuffer, disps); } @@ -175,18 +177,22 @@ bool operator<(const uidx_t& lhs, const IdxUid& rhs) { // Find the intersection between local and global UIDs, then return local // indices of incections and PE dispacements in vector. -std::pair, std::vector> getUidIntersection(const std::vector& localUids, +std::pair, std::vector> getUidIntersection(const std::string& mpi_comm, + const std::vector& localUids, const std::vector& globalUids, const std::vector& globalDisps) { auto uidIntersection = std::vector{}; uidIntersection.reserve(localUids.size()); + auto& comm = mpi::comm(mpi_comm); + auto mpi_size = comm.size(); + auto disps = std::vector{}; - disps.reserve(mpi::comm().size() + 1); + disps.reserve(mpi_size + 1); disps.push_back(0); // Loop over all PE and find UID intersection. - for (size_t i = 0; i < mpi::comm().size(); ++i) { + for (size_t i = 0; i < mpi_size; ++i) { // Get displaced iterators. auto globalUidsBegin = globalUids.begin() + globalDisps[i]; auto globalUidsEnd = globalUids.begin() + globalDisps[i + 1]; @@ -255,6 +261,10 @@ struct ForEach { } // namespace void RedistributeGeneric::do_setup() { + ATLAS_ASSERT( source().mpi_comm() == target().mpi_comm() ); + + mpi_comm_ = source().mpi_comm(); + // get a unique ID (UID) for each owned member of functionspace. const auto sourceUidVec = getUidVec(source()); const auto targetUidVec = getUidVec(target()); @@ -262,14 +272,14 @@ void RedistributeGeneric::do_setup() { // Communicate UID vectors to all PEs. auto sourceGlobalUids = std::vector{}; auto sourceGlobalDisps = std::vector{}; - std::tie(sourceGlobalUids, sourceGlobalDisps) = communicateUid(getUidVal(sourceUidVec)); + std::tie(sourceGlobalUids, sourceGlobalDisps) = communicateUid(mpi_comm_, getUidVal(sourceUidVec)); auto targetGlobalUids = std::vector{}; auto targetGlobalDisps = std::vector{}; - std::tie(targetGlobalUids, targetGlobalDisps) = communicateUid(getUidVal(targetUidVec)); + std::tie(targetGlobalUids, targetGlobalDisps) = communicateUid(mpi_comm_, getUidVal(targetUidVec)); // Get intersection of local UIDs and Global UIDs. - std::tie(sourceLocalIdx_, sourceDisps_) = getUidIntersection(sourceUidVec, targetGlobalUids, targetGlobalDisps); - std::tie(targetLocalIdx_, targetDisps_) = getUidIntersection(targetUidVec, sourceGlobalUids, sourceGlobalDisps); + std::tie(sourceLocalIdx_, sourceDisps_) = getUidIntersection(mpi_comm_, sourceUidVec, targetGlobalUids, targetGlobalDisps); + std::tie(targetLocalIdx_, targetDisps_) = getUidIntersection(mpi_comm_, targetUidVec, sourceGlobalUids, sourceGlobalDisps); } void RedistributeGeneric::execute(const Field& sourceField, Field& targetField) const { @@ -369,6 +379,9 @@ void RedistributeGeneric::do_execute(const Field& sourceField, Field& targetFiel auto sourceView = array::make_view(sourceField); auto targetView = array::make_view(targetField); + const auto& comm = mpi::comm(mpi_comm_); + auto mpi_size = comm.size(); + // Get number of elems per column. int elemsPerCol = 1; for (int i = 1; i < Rank; ++i) { @@ -377,18 +390,18 @@ void RedistributeGeneric::do_execute(const Field& sourceField, Field& targetFiel // Set send displacement and counts vectors. auto sendDisps = std::vector{}; - sendDisps.reserve(mpi::comm().size() + 1); + sendDisps.reserve(mpi_size + 1); auto sendCounts = std::vector{}; - sendCounts.reserve(mpi::comm().size()); + sendCounts.reserve(mpi_size); std::transform(sourceDisps_.begin(), sourceDisps_.end(), std::back_inserter(sendDisps), [&](const int& disp) { return disp * elemsPerCol; }); std::adjacent_difference(sendDisps.begin() + 1, sendDisps.end(), std::back_inserter(sendCounts)); // Set recv displacement and counts vectors. auto recvDisps = std::vector{}; - recvDisps.reserve(mpi::comm().size() + 1); + recvDisps.reserve(mpi_size + 1); auto recvCounts = std::vector{}; - recvCounts.reserve(mpi::comm().size()); + recvCounts.reserve(mpi_size); std::transform(targetDisps_.begin(), targetDisps_.end(), std::back_inserter(recvDisps), [&](const int& disp) { return disp * elemsPerCol; }); std::adjacent_difference(recvDisps.begin() + 1, recvDisps.end(), std::back_inserter(recvCounts)); @@ -403,8 +416,8 @@ void RedistributeGeneric::do_execute(const Field& sourceField, Field& targetFiel ForEach::apply(sourceLocalIdx_, sourceView, [&](const Value& elem) { *sendBufferIt++ = elem; }); // Perform MPI communication. - mpi::comm().allToAllv(sendBuffer.data(), sendCounts.data(), sendDisps.data(), recvBuffer.data(), recvCounts.data(), - recvDisps.data()); + comm.allToAllv(sendBuffer.data(), sendCounts.data(), sendDisps.data(), recvBuffer.data(), recvCounts.data(), + recvDisps.data()); // Copy recvBuffer to targetField. ForEach::apply(targetLocalIdx_, targetView, [&](Value& elem) { elem = *recvBufferIt++; }); diff --git a/src/atlas/redistribution/detail/RedistributeGeneric.h b/src/atlas/redistribution/detail/RedistributeGeneric.h index a408f3e87..a650fd02c 100644 --- a/src/atlas/redistribution/detail/RedistributeGeneric.h +++ b/src/atlas/redistribution/detail/RedistributeGeneric.h @@ -7,6 +7,8 @@ #pragma once +#include + #include "atlas/redistribution/detail/RedistributionImpl.h" namespace atlas { @@ -46,6 +48,8 @@ class RedistributeGeneric : public RedistributionImpl { // Partial sum of number of columns to receive from each PE. std::vector targetDisps_{}; + + std::string mpi_comm_; }; } // namespace detail diff --git a/src/atlas/redistribution/detail/RedistributeStructuredColumns.cc b/src/atlas/redistribution/detail/RedistributeStructuredColumns.cc index 40127920f..3ef241fd7 100644 --- a/src/atlas/redistribution/detail/RedistributeStructuredColumns.cc +++ b/src/atlas/redistribution/detail/RedistributeStructuredColumns.cc @@ -69,6 +69,10 @@ void RedistributeStructuredColumns::do_setup() { // Check levels match. ATLAS_ASSERT(source_.levels() == target_.levels()); + // Check that communicators match. + ATLAS_ASSERT(source_.mpi_comm() == target_.mpi_comm()); + mpi_comm_ = source_.mpi_comm(); + // Get source and target range of this function space. const auto sourceRange = StructuredIndexRange(source_); @@ -225,8 +229,8 @@ void RedistributeStructuredColumns::do_execute(const Field& sourceField, Field& forEachIndex(sendIntersections_, sendFunctor); // Communicate. - mpi::comm().allToAllv(sendBuffer.data(), sendCounts_.data(), sendDisplacements_.data(), recvBuffer.data(), - recvCounts_.data(), recvDisplacements_.data()); + mpi::comm(mpi_comm_).allToAllv(sendBuffer.data(), sendCounts_.data(), sendDisplacements_.data(), recvBuffer.data(), + recvCounts_.data(), recvDisplacements_.data()); // Read data from buffer. forEachIndex(recvIntersections_, recvFunctor); @@ -246,19 +250,23 @@ StructuredIndexRange::StructuredIndexRange(const functionspace::StructuredColumn iBeginEnd_.push_back(std::make_pair(structuredColumns.i_begin(j), structuredColumns.i_end(j))); } + mpi_comm_ = structuredColumns.mpi_comm(); + return; } // Get index ranges from all PEs. StructuredIndexRangeVector StructuredIndexRange::getStructuredIndexRanges() const { + auto& comm = mpi::comm(mpi_comm()); + // Get MPI communicator size. - const auto mpiSize = static_cast(atlas::mpi::comm().size()); + const auto mpiSize = static_cast(comm.size()); // Set recv buffer for j range. auto jRecvBuffer = idxPairVector(mpiSize); // Perform all gather. - atlas::mpi::comm().allGather(jBeginEnd_, jRecvBuffer.begin(), jRecvBuffer.end()); + comm.allGather(jBeginEnd_, jRecvBuffer.begin(), jRecvBuffer.end()); // Set i receive counts. auto iRecvCounts = transformVector( @@ -272,8 +280,8 @@ StructuredIndexRangeVector StructuredIndexRange::getStructuredIndexRanges() cons auto irecvBuffer = idxPairVector(static_cast(iRecvDisplacements.back() + iRecvCounts.back())); // Perform all gather. - atlas::mpi::comm().allGatherv(iBeginEnd_.cbegin(), iBeginEnd_.cend(), irecvBuffer.begin(), iRecvCounts.data(), - iRecvDisplacements.data()); + comm.allGatherv(iBeginEnd_.cbegin(), iBeginEnd_.cend(), irecvBuffer.begin(), iRecvCounts.data(), + iRecvDisplacements.data()); // Make vector of indexRange structs. auto indexRanges = StructuredIndexRangeVector{}; diff --git a/src/atlas/redistribution/detail/RedistributeStructuredColumns.h b/src/atlas/redistribution/detail/RedistributeStructuredColumns.h index 34425282a..7972078b8 100644 --- a/src/atlas/redistribution/detail/RedistributeStructuredColumns.h +++ b/src/atlas/redistribution/detail/RedistributeStructuredColumns.h @@ -92,6 +92,8 @@ class RedistributeStructuredColumns : public RedistributionImpl { std::vector sendDisplacements_{}; std::vector recvCounts_{}; std::vector recvDisplacements_{}; + + std::string mpi_comm_; }; /// \brief Helper class for function space intersections. @@ -116,12 +118,16 @@ class StructuredIndexRange { template void forEach(const functorType&) const; + const std::string& mpi_comm() const { return mpi_comm_; } + private: // Begin and end of j range. idxPair jBeginEnd_{}; // Begin and end of i range for each j. idxPairVector iBeginEnd_{}; + + std::string mpi_comm_; }; } // namespace detail diff --git a/src/tests/redistribution/test_redistribution_generic.cc b/src/tests/redistribution/test_redistribution_generic.cc index be0be22dc..3b0c60793 100644 --- a/src/tests/redistribution/test_redistribution_generic.cc +++ b/src/tests/redistribution/test_redistribution_generic.cc @@ -27,6 +27,23 @@ namespace atlas { namespace test { +int mpi_color() { + static int c = mpi::comm("world").rank()%2; + return c; +} + +struct Fixture { + Fixture() { + mpi::comm().split(mpi_color(),"split"); + } + ~Fixture() { + if (eckit::mpi::hasComm("split")) { + eckit::mpi::deleteComm("split"); + } + } +}; + + // Set floating point tolerance. template Value tolerance() { @@ -111,24 +128,27 @@ Value testPattern(const mesh::Connectivity::Row& elem, const array::ArrayViewcast(); - const auto* edgeColumnsPtr = functionSpace.get()->cast(); - const auto* nodeColumnsPtr = functionSpace.get()->cast(); - const auto* structuredColumnsPtr = functionSpace.get()->cast(); + const auto cellColumns = functionspace::CellColumns(functionSpace); + const auto edgeColumns = functionspace::EdgeColumns(functionSpace); + const auto nodeColumns = functionspace::NodeColumns(functionSpace); + const auto structuredColumns = functionspace::StructuredColumns(functionSpace); auto mesh = Mesh(); - if (cellColumnsPtr) { - mesh = cellColumnsPtr->mesh(); + if (cellColumns) { + mesh = cellColumns.mesh(); } - else if (edgeColumnsPtr) { - mesh = edgeColumnsPtr->mesh(); + else if (edgeColumns) { + mesh = edgeColumns.mesh(); } - else if (nodeColumnsPtr) { - mesh = nodeColumnsPtr->mesh(); + else if (nodeColumns) { + mesh = nodeColumns.mesh(); } - else if (structuredColumnsPtr) { - mesh = MeshGenerator("structured").generate(structuredColumnsPtr->grid(), structuredColumnsPtr->distribution()); + else if (structuredColumns) { + auto mpi_comm = util::Config("mpi_comm",functionSpace.mpi_comm()); + auto grid = structuredColumns.grid(); + auto partitioner = grid::Partitioner(functionSpace.distribution(),mpi_comm); + mesh = MeshGenerator("structured",mpi_comm).generate(grid, partitioner); } return mesh; } @@ -136,14 +156,14 @@ Mesh getMesh(const FunctionSpace& functionSpace) { // Try and get cells or edges node-connectivity from functionspace. const mesh::HybridElements::Connectivity* getConnectivity(const FunctionSpace& functionSpace) { // Try and create a pointer to CellColumns or EdgeColumns. - const auto* cellColumnsPtr = functionSpace.get()->cast(); - const auto* edgeColumnsPtr = functionSpace.get()->cast(); + const auto cellColumns = functionspace::CellColumns(functionSpace); + const auto edgeColumns = functionspace::EdgeColumns(functionSpace); - if (cellColumnsPtr) { - return &(cellColumnsPtr->cells().node_connectivity()); + if (cellColumns) { + return &(cellColumns.cells().node_connectivity()); } - else if (edgeColumnsPtr) { - return &(edgeColumnsPtr->edges().node_connectivity()); + else if (edgeColumns) { + return &(edgeColumns.edges().node_connectivity()); } else { return nullptr; @@ -219,7 +239,7 @@ struct TestRedistributionPoints1 : public TestRedistribution { // Perform redistribution. this->redist_.execute(this->sourceFieldSet_, this->targetFieldSet_); - // Perform halo excahnge; + // Perform halo exchange; this->targetFunctionSpace_.haloExchange(this->targetFieldSet_); // Check target field. @@ -229,7 +249,8 @@ struct TestRedistributionPoints1 : public TestRedistribution { testPattern(targetLonlatView(i, LON), targetLonlatView(i, LAT), 0))); ++nCheck; } - mpi::comm().allReduceInPlace(nCheck, eckit::mpi::Operation::SUM); + const auto& comm = mpi::comm(this->sourceFunctionSpace_.mpi_comm()); + comm.allReduceInPlace(nCheck, eckit::mpi::Operation::SUM); Log::debug() << "Checked " << nCheck << " elements." << std::endl; } }; @@ -252,7 +273,7 @@ struct TestRedistributionPoints2 : public TestRedistribution { // Perform redistribution. this->redist_.execute(this->sourceFieldSet_, this->targetFieldSet_); - // Perform halo excahnge; + // Perform halo exchange; this->targetFunctionSpace_.haloExchange(this->targetFieldSet_); // Check target field. @@ -264,7 +285,8 @@ struct TestRedistributionPoints2 : public TestRedistribution { ++nCheck; } } - mpi::comm().allReduceInPlace(nCheck, eckit::mpi::Operation::SUM); + const auto& comm = mpi::comm(this->sourceFunctionSpace_.mpi_comm()); + comm.allReduceInPlace(nCheck, eckit::mpi::Operation::SUM); Log::debug() << "Checked " << nCheck << " elements." << std::endl; } }; @@ -288,7 +310,7 @@ struct TestRedistributionPoints3 : public TestRedistribution { // Perform redistribution. this->redist_.execute(this->sourceFieldSet_, this->targetFieldSet_); - // Perform halo excahnge; + // Perform halo exchange; this->targetFunctionSpace_.haloExchange(this->targetFieldSet_); @@ -304,7 +326,8 @@ struct TestRedistributionPoints3 : public TestRedistribution { ++nCheck; } } - mpi::comm().allReduceInPlace(nCheck, eckit::mpi::Operation::SUM); + const auto& comm = mpi::comm(this->sourceFunctionSpace_.mpi_comm()); + comm.allReduceInPlace(nCheck, eckit::mpi::Operation::SUM); Log::debug() << "Checked " << nCheck << " elements." << std::endl; } }; @@ -330,7 +353,7 @@ struct TestRedistributionElems : public TestRedistribution { // Perform redistribution. this->redist_.execute(this->sourceFieldSet_, this->targetFieldSet_); - // Perform halo excahnge; + // Perform halo exchange; this->targetFunctionSpace_.haloExchange(this->targetFieldSet_); // Check target field. @@ -339,12 +362,12 @@ struct TestRedistributionElems : public TestRedistribution { EXPECT(checkValue(this->targetView_(i), testPattern(targetConnectivity->row(i), targetLonLatView))); ++nCheck; } - mpi::comm().allReduceInPlace(nCheck, eckit::mpi::Operation::SUM); + const auto& comm = mpi::comm(this->sourceFunctionSpace_.mpi_comm()); + comm.allReduceInPlace(nCheck, eckit::mpi::Operation::SUM); Log::debug() << "Checked " << nCheck << " elements." << std::endl; } }; - CASE("Structured grid") { auto grid = atlas::Grid("L24x19"); @@ -529,6 +552,209 @@ CASE("Cubed sphere dual grid") { } } +CASE("Structured grid with split comms") { + Fixture fixture; + + auto grid = mpi_color() == 0 ? atlas::Grid("L24x13") : atlas::Grid("O16"); + auto mpi_comm = util::Config("mpi_comm","split"); + + // auto grid = atlas::Grid("O48"); + // auto mpi_comm = util::Config("mpi_comm","world"); + + // Set mesh config. + const auto sourceMeshConfig = util::Config("partitioner", "equal_regions") | mpi_comm; + const auto targetMeshConfig = util::Config("partitioner", "equal_bands") | mpi_comm; + + auto sourceMesh = MeshGenerator("structured", sourceMeshConfig).generate(grid); + auto targetMesh = MeshGenerator("structured", targetMeshConfig).generate(grid); + + SECTION("NodeColumns") { + const auto sourceFunctionSpace = functionspace::NodeColumns(sourceMesh, util::Config("halo", 2)); + const auto targetFunctionSpace = functionspace::NodeColumns(targetMesh, util::Config("halo", 2)); + + // Test double for different ranks. + auto test1 = TestRedistributionPoints1(sourceFunctionSpace, targetFunctionSpace); + auto test2 = TestRedistributionPoints2(sourceFunctionSpace, targetFunctionSpace); + auto test3 = TestRedistributionPoints3(sourceFunctionSpace, targetFunctionSpace); + + // Test float. + auto test4 = TestRedistributionPoints1(sourceFunctionSpace, targetFunctionSpace); + + // Test int. + auto test5 = TestRedistributionPoints1(sourceFunctionSpace, targetFunctionSpace); + + // Test long. + auto test6 = TestRedistributionPoints1(sourceFunctionSpace, targetFunctionSpace); + + test1.execute(); + test2.execute(); + test3.execute(); + test4.execute(); + test5.execute(); + test6.execute(); + + test2.outputFields("StructuredGrid_NodeColumns_"+std::to_string(mpi_color())); + } + SECTION("CellColumns") { + // No build_cells_global_idx method implemented in mesh/actions/BuildParallelFields.cc. + + Log::debug() << "Structured Grid Cell Columns currently unsupported." << std::endl; + + //const auto sourceFunctionSpace = functionspace::CellColumns( sourceMesh, util::Config( "halo", 0 ) ); + //const auto targetFunctionSpace = functionspace::CellColumns( targetMesh, util::Config( "halo", 0 ) ); + + //auto test = TestRedistributionElems( sourceFunctionSpace, targetFunctionSpace ); + + //test.execute(); + + //test.outputFields( "StructuredGird_CellColumns_"+std::to_string(mpi_color())); + } + SECTION("EdgeColumns") { + // Note StructuredGrid EdegColumns redistribution only works for halo = 0; + + const auto sourceFunctionSpace = functionspace::EdgeColumns(sourceMesh, util::Config("halo", 0)); + const auto targetFunctionSpace = functionspace::EdgeColumns(targetMesh, util::Config("halo", 0)); + + // Test long int. + auto test = TestRedistributionElems(sourceFunctionSpace, targetFunctionSpace); + + test.execute(); + + // EdgeColumns not currently supported by gmsh IO. + } + SECTION("Structured Columns") { + const auto sourceFunctionSpace = functionspace::StructuredColumns( + grid, grid::Partitioner("equal_regions",mpi_comm), util::Config("halo", 2) | util::Config("periodic_points", true) | mpi_comm); + const auto targetFunctionSpace = functionspace::StructuredColumns( + grid, grid::Partitioner("regular_bands",mpi_comm), util::Config("halo", 2) | util::Config("periodic_points", true) | mpi_comm); + + auto test = TestRedistributionPoints2(sourceFunctionSpace, targetFunctionSpace); + + test.execute(); + + test.outputFields("StructuredGrid_StructuredColumns_"+std::to_string(mpi_color())); + } + SECTION("Point Cloud") { + // Make a point cloud from NodeColumns functionspace. + const auto sourceFunctionSpace = functionspace::NodeColumns(sourceMesh, util::Config("halo", 0)); + const auto targetFunctionSpace = functionspace::NodeColumns(targetMesh, util::Config("halo", 0)); + + // Make a vector of lonlats. + auto sourceLonLat = std::vector{}; + auto targetLonLat = std::vector{}; + + const auto sourceGhostView = array::make_view(sourceFunctionSpace.ghost()); + const auto sourceLonLatView = array::make_view(sourceFunctionSpace.lonlat()); + const auto targetGhostView = array::make_view(targetFunctionSpace.ghost()); + const auto targetLonLatView = array::make_view(targetFunctionSpace.lonlat()); + + // Add non-ghost lonlats to vector. + sourceLonLat.reserve(sourceFunctionSpace.size()); + for (idx_t i = 0; i < sourceFunctionSpace.size(); ++i) { + if (!sourceGhostView(i)) { + sourceLonLat.emplace_back(sourceLonLatView(i, LON), sourceLonLatView(i, LAT)); + } + } + targetLonLat.reserve(targetFunctionSpace.size()); + for (idx_t i = 0; i < targetFunctionSpace.size(); ++i) { + if (!targetGhostView(i)) { + targetLonLat.emplace_back(targetLonLatView(i, LON), targetLonLatView(i, LAT)); + } + } + + // Make point cloud functionspaces. + const auto sourcePointCloud = functionspace::PointCloud(sourceLonLat); + const auto targetPointCloud = functionspace::PointCloud(targetLonLat); + + auto test = TestRedistributionPoints2(sourcePointCloud, targetPointCloud); + + test.execute(); + } +} + +CASE("Cubed sphere grid with split comms") { + Fixture fixture; + + auto grid = mpi_color() == 0 ? atlas::Grid("CS-LFR-C-8") : atlas::Grid("CS-LFR-C-16"); + auto mpi_comm = util::Config("mpi_comm","split"); + + // Set mesh config. + const auto sourceMeshConfig = util::Config("partitioner", "equal_regions") | util::Config("halo", "2") | mpi_comm; + const auto targetMeshConfig = util::Config("partitioner", "cubedsphere") | util::Config("halo", "2") | mpi_comm; + + auto sourceMesh = MeshGenerator("cubedsphere", sourceMeshConfig).generate(grid); + auto targetMesh = MeshGenerator("cubedsphere", targetMeshConfig).generate(grid); + + SECTION("CubedSphereNodeColumns") { + const auto sourceFunctionSpace = functionspace::CubedSphereNodeColumns(sourceMesh); + const auto targetFunctionSpace = functionspace::CubedSphereNodeColumns(targetMesh); + + EXPECT_EQ( sourceFunctionSpace.mpi_comm(), "split" ); + + auto test = TestRedistributionPoints2(sourceFunctionSpace, targetFunctionSpace); + + test.execute(); + + test.outputFields("CubedSphere_NodeColumns_"+std::to_string(mpi_color())); + } + + SECTION("CubedSphereCellColumns") { + const auto sourceFunctionSpace = functionspace::CubedSphereCellColumns(sourceMesh); + const auto targetFunctionSpace = functionspace::CubedSphereCellColumns(targetMesh); + + EXPECT_EQ( sourceFunctionSpace.mpi_comm(), "split" ); + + auto test = TestRedistributionElems(sourceFunctionSpace, targetFunctionSpace); + + test.execute(); + + test.outputFields("CubedSphere_CellColumns_"+std::to_string(mpi_color())); + } +} + +CASE("Cubed sphere dual grid with split comms") { + Fixture fixture; + + auto grid = mpi_color() == 0 ? atlas::Grid("CS-LFR-C-8") : atlas::Grid("CS-LFR-C-16"); + auto mpi_comm = util::Config("mpi_comm","split"); + + // Set mesh config. + const auto sourceMeshConfig = util::Config("partitioner", "equal_regions") | util::Config("halo", "0") | mpi_comm; + const auto targetMeshConfig = util::Config("partitioner", "cubedsphere") | util::Config("halo", "0") | mpi_comm; + + auto sourceMesh = MeshGenerator("cubedsphere_dual", sourceMeshConfig).generate(grid); + auto targetMesh = MeshGenerator("cubedsphere_dual", targetMeshConfig).generate(grid); + + EXPECT_EQ( sourceMesh.mpi_comm(), "split" ); + + SECTION("CubedSphereDualNodeColumns") { + const auto sourceFunctionSpace = functionspace::CubedSphereNodeColumns(sourceMesh); + const auto targetFunctionSpace = functionspace::CubedSphereNodeColumns(targetMesh); + + EXPECT_EQ( sourceFunctionSpace.mpi_comm(), "split" ); + + + auto test = TestRedistributionPoints2(sourceFunctionSpace, targetFunctionSpace); + + test.execute(); + + test.outputFields("CubedSphereDual_NodeColumns_"+std::to_string(mpi_color())); + } + + SECTION("CubedSphereDualCellColumns") { + const auto sourceFunctionSpace = functionspace::CubedSphereCellColumns(sourceMesh); + const auto targetFunctionSpace = functionspace::CubedSphereCellColumns(targetMesh); + + EXPECT_EQ( sourceFunctionSpace.mpi_comm(), "split" ); + + auto test = TestRedistributionElems(sourceFunctionSpace, targetFunctionSpace); + + test.execute(); + + test.outputFields("CubedSphereDual_CellColumns_"+std::to_string(mpi_color())); + } +} + } // namespace test } // namespace atlas diff --git a/src/tests/redistribution/test_redistribution_structured.cc b/src/tests/redistribution/test_redistribution_structured.cc index a6b4cc238..706c1df15 100644 --- a/src/tests/redistribution/test_redistribution_structured.cc +++ b/src/tests/redistribution/test_redistribution_structured.cc @@ -22,6 +22,22 @@ namespace atlas { namespace test { +int mpi_color() { + static int c = mpi::comm("world").rank()%2; + return c; +} + +struct Fixture { + Fixture() { + mpi::comm().split(mpi_color(),"split"); + } + ~Fixture() { + if (eckit::mpi::hasComm("split")) { + eckit::mpi::deleteComm("split"); + } + } +}; + // Define test pattern for grid. template T testPattern(const double lambda, const double phi, const idx_t field, const idx_t level) { @@ -128,7 +144,7 @@ bool testStructColsToStructCols(const atlas::Grid& grid, const idx_t nFields, // Write mesh and fields to file. if (gmshOutput) { // Generate meshes. - const auto meshGen = atlas::MeshGenerator("structured"); + const auto meshGen = atlas::MeshGenerator("structured",util::Config("mpi_comm",sourceFunctionSpace.mpi_comm())); const auto sourceMesh = meshGen.generate(grid, sourcePartitioner); const auto targetMesh = meshGen.generate(grid, targetPartitioner); @@ -331,6 +347,28 @@ CASE("Redistribute Structured Columns") { } } +CASE("Redistribute Structured Columns with split comms") { + Fixture fixture; + SECTION("lonlat: checkerboard to equal_regions") { + util::Config mpi_comm("mpi_comm","split"); + std::string id = std::to_string(mpi_color()); + + // Set grid. + idx_t nFields = 5; + + auto grid = atlas::Grid("L48x37"); + + // Set partitioners. + auto sourcePartitioner = atlas::grid::Partitioner(option::type("checkerboard")|mpi_comm); + auto targetPartitioner = atlas::grid::Partitioner(option::type("equal_regions")|mpi_comm); + + // Check redistributer. + EXPECT(testStructColsToStructCols(grid, nFields, sourcePartitioner, targetPartitioner, + funcSpaceDefaultConfig(), funcSpaceDefaultConfig(), + true, id)); + } +} + } // namespace test } // namespace atlas