From db0d4e90074a1c0a9e98534a9f435e9c27994931 Mon Sep 17 00:00:00 2001 From: andrthu Date: Mon, 17 Feb 2020 14:54:29 +0100 Subject: [PATCH 1/2] Enable multiple overlap layers, as well as functunality for restricted overlapping schwartz --- opm/grid/CpGrid.hpp | 12 ++ opm/grid/common/GridPartitioning.cpp | 226 ++++++++++++++++++++++----- opm/grid/common/GridPartitioning.hpp | 8 +- opm/grid/cpgrid/CpGrid.cpp | 41 ++++- 4 files changed, 246 insertions(+), 41 deletions(-) diff --git a/opm/grid/CpGrid.hpp b/opm/grid/CpGrid.hpp index 765766876b..9ae1d89a87 100644 --- a/opm/grid/CpGrid.hpp +++ b/opm/grid/CpGrid.hpp @@ -1327,6 +1327,11 @@ namespace Dune return current_view_data_->cell_remote_indices_; } + std::vector cellHasWell() const + { + return *cell_has_well_; + } + #endif private: @@ -1366,6 +1371,13 @@ namespace Dune * @warning Will only update owner cells */ std::shared_ptr point_scatter_gather_interfaces_; + + /// \brief Mark all cells perforated by cells. + void findWellperforatedCells(const std::vector * wells, + std::vector& cell_has_well); + + std::shared_ptr> cell_has_well_; + }; // end Class CpGrid diff --git a/opm/grid/common/GridPartitioning.cpp b/opm/grid/common/GridPartitioning.cpp index d705f5a483..a329c9461c 100644 --- a/opm/grid/common/GridPartitioning.cpp +++ b/opm/grid/common/GridPartitioning.cpp @@ -242,6 +242,97 @@ namespace Dune } } +/// \brief seperate overlap and ghost cells +void seperateOverlapAndGhostCells(const CpGrid& grid, const std::vector& cell_has_well, + std::vector& part_type, int layers) +{ + + auto lid = grid.localIdSet(); + auto gid = grid.globalIdSet(); + part_type.resize(grid.numCells(), 0); + + for (auto it = grid.leafbegin<0>(); it != grid.leafend<0>(); ++it) { + auto elem = *it; + + if (elem.partitionType() == InteriorEntity) { + auto id = lid.id(elem); + + part_type[id] = 1; + + for (CpGrid::LeafIntersectionIterator iit = elem.ileafbegin(); iit != elem.ileafend(); ++iit) { + auto inter = *iit; + if ( inter.neighbor() ) { + auto nab = inter.outside(); + int nab_lid = lid.id(nab); + if (nab.partitionType() != InteriorEntity && part_type[nab_lid] == 0) { + int nab_gid = gid.id(nab); + + if ( cell_has_well[nab_gid] == 1 ) { + part_type[nab_lid] = layers + 1; + } + else { + part_type[nab_lid] = 2; + } + } + } + } + } + } + + int layer = 2; + while (layer < layers + 1) { + for (auto it = grid.leafbegin<0>(); it != grid.leafend<0>(); ++it) { + + auto elem = *it; + int id = lid.id(elem); + bool isLayer = part_type[id] == layer || part_type[id] == layers + 1; + + if (elem.partitionType() != InteriorEntity && isLayer) { + for (CpGrid::LeafIntersectionIterator iit = elem.ileafbegin(); iit != elem.ileafend(); ++iit) { + + auto inter = *iit; + if ( inter.neighbor() ) { + + auto nab = inter.outside(); + int nab_gid = gid.id(nab); + int nab_lid = lid.id(nab); + + if (nab.partitionType() != InteriorEntity && part_type[nab_lid] == 0) { + if (cell_has_well[nab_gid] == 1) { + part_type[nab_lid] = layers + 1; + } + else { + part_type[nab_lid] = layer + 1; + } + } + } + } + } + /* + else if (elem.partitionType() != InteriorEntity && part_type[id] == layers + 1) { + int gid_e = gid.id(elem); + bool isWell = cell_has_well[gid_e] == 1; + if ( isWell ) { + for (CpGrid::LeafIntersectionIterator iit = elem.ileafbegin(); iit != elem.ileafend(); ++iit) { + auto inter = *iit; + if ( inter.neighbor() ) { + + auto nab = inter.outside(); + int nab_gid = gid.id(nab); + int nab_lid = lid.id(nab); + if (nab.partitionType() != InteriorEntity) { + part_type[nab_lid] = layers + 1; + } + } + } + } + } + */ + } + layer++; + } +} + /// \brief Adds cells to the overlap that just share a point with an owner cell. void addOverlapCornerCell(const CpGrid& grid, int owner, const CpGrid::Codim<0>::Entity& from, @@ -364,7 +455,7 @@ void addOverlapLayer(const CpGrid& grid, int index, const CpGrid::Codim<0>::Enti void addOverlapLayer(const CpGrid& grid, int index, const CpGrid::Codim<0>::Entity& e, const int owner, const std::vector& cell_part, std::vector>& exportList, - int recursion_deps) + const std::vector& isWell, int recursion_deps) { using AttributeSet = Dune::OwnerOverlapCopyAttributeSet::AttributeSet; const CpGrid::LeafIndexSet& ix = grid.leafIndexSet(); @@ -373,38 +464,33 @@ void addOverlapLayer(const CpGrid& grid, int index, const CpGrid::Codim<0>::Enti int nb_index = ix.index(*(iit->outside())); if ( cell_part[nb_index]!=owner ) { - // Note: multiple adds for same process are possible - exportList.emplace_back(nb_index, owner, AttributeSet::copy); - exportList.emplace_back(index, cell_part[nb_index], AttributeSet::copy); - if ( recursion_deps>0 ) - { - // Add another layer - addOverlapLayer(grid, nb_index, *(iit->outside()), owner, - cell_part, exportList, recursion_deps-1); + // If on last level add cell as type: copy. If not it has partition type: overlap + if ( recursion_deps < 1 ) { + exportList.emplace_back(nb_index, owner, AttributeSet::copy); } - else + else { + // If cell is perforated by a well add it as a copy cell, since wells are not parallel + if ( isWell[nb_index] == 1 ) + exportList.emplace_back(nb_index, owner, AttributeSet::copy); + else + exportList.emplace_back(nb_index, owner, AttributeSet::overlap); + } + if ( recursion_deps > 0 ) { - // Add cells to the overlap that just share a corner with e. - for (CpGrid::LeafIntersectionIterator iit2 = iit->outside()->ileafbegin(); - iit2 != iit->outside()->ileafend(); ++iit2) - { - if ( iit2->neighbor() ) - { - int nb_index2 = ix.index(*(iit2->outside())); - if( cell_part[nb_index2]==owner ) continue; - addOverlapCornerCell(grid, owner, e, *(iit2->outside()), - cell_part, exportList); - } - } + // Add another layer + addOverlapLayer(grid, index, *(iit->outside()), owner, + cell_part, exportList, isWell, recursion_deps-1); } } } } } - int addOverlapLayer(const CpGrid& grid, const std::vector& cell_part, + int addOverlapLayer(const CpGrid& grid, + const std::vector& cell_part, std::vector>& exportList, std::vector>& importList, + const std::vector& cell_has_well, const CollectiveCommunication& cc, int layers) { @@ -413,28 +499,52 @@ void addOverlapLayer(const CpGrid& grid, int index, const CpGrid::Codim<0>::Enti auto ownerSize = exportList.size(); const CpGrid::LeafIndexSet& ix = grid.leafIndexSet(); std::map exportProcs, importProcs; - + + std::vector> ghostList; + for (CpGrid::Codim<0>::LeafIterator it = grid.leafbegin<0>(); it != grid.leafend<0>(); ++it) { int index = ix.index(*it); + auto owner = cell_part[index]; exportProcs.insert(std::make_pair(owner, 0)); - addOverlapLayer(grid, index, *it, owner, cell_part, exportList, layers-1); + addOverlapLayer(grid, index, *it, owner, cell_part, exportList, cell_has_well, layers-1); } + // remove multiple entries + // This predicate will result in the following ordering of + // the exportList tuples (index, rank, Attribute) after std::sort: + // + // Tuple x comes before tuple y if x[0] < y[0]. + // Tuple x comes before tuple y if x[0] == y[0] and x[1] < y[1]. + // Tuple x comes before tuple y if x[0] == y[0] and x[1] == y[1] and (x[2] == overlap and y[2] = copy). + // This ordering is needed to meke std::unique work properly. auto compare = [](const std::tuple& t1, const std::tuple& t2) - { - return (std::get<0>(t1) < std::get<0>(t2)) || - ( ! (std::get<0>(t2) < std::get<0>(t1)) - && (std::get<1>(t1) < std::get<1>(t2)) ); - }; + { + return (std::get<0>(t1) < std::get<0>(t2) ) || + ( (std::get<0>(t2) == std::get<0>(t1)) + && (std::get<1>(t1) < std::get<1>(t2)) ) || + ( (std::get<0>(t2) == std::get<0>(t1)) && std::get<2>(t1) == AttributeSet::overlap + && std::get<2>(t2) == AttributeSet::copy && (std::get<1>(t1) == std::get<1>(t2))); + }; + + // We do not want to add a cell twice as an overlap and copy type. To avoid this we say two tuples + // x,y in exportList are equal if x[0] == y[0] and x[1] == y[1]. Since overlap tuples come + // before the copy tuples, the copy tuples will be removed. + auto overlapEqual = [](const std::tuple& t1, const std::tuple& t2) + { + return ( ( std::get<0>(t1) == std::get<0>(t2) ) + && ( std::get<1>(t1) == std::get<1>(t2) ) ) ; + }; + // We only sort and remove duplicate cells in the overlap layers auto ownerEnd = exportList.begin() + ownerSize; std::sort(ownerEnd, exportList.end(), compare); - auto newEnd = std::unique(ownerEnd, exportList.end()); + // Remove duplicate cells in overlap layer. + auto newEnd = std::unique(ownerEnd, exportList.end(), overlapEqual); exportList.resize(newEnd - exportList.begin()); - + for(const auto& entry: importList) importProcs.insert(std::make_pair(std::get<1>(entry), 0)); //count entries to send @@ -484,17 +594,61 @@ void addOverlapLayer(const CpGrid& grid, int index, const CpGrid::Codim<0>::Enti MPI_Send(sendBuffer.data(), proc.second, MPI_INT, proc.first, tag, cc); } - std::inplace_merge(exportList.begin(), ownerEnd, exportList.end()); + MPI_Waitall(requests.size(), requests.data(), statuses.data()); + + // -------------- Communicate overlap type + ++tag; + std::vector > typeBuffers(importProcs.size()); + auto tbuffer = typeBuffers.begin(); + req = requests.begin(); + + for(auto&& proc: importProcs) + { + tbuffer->resize(proc.second); + MPI_Irecv(tbuffer->data(), proc.second, MPI_INT, proc.first, tag, cc, &(*req)); + ++req; ++tbuffer; + } + + for(const auto& proc: exportProcs) + { + std::vector sendBuffer; + sendBuffer.reserve(proc.second); + + for (auto t = ownerEnd; t != exportList.end(); ++t) { + if ( std::get<1>(*t) == proc.first ) { + if ( std::get<2>(*t) == AttributeSet::copy) + sendBuffer.push_back(0); + else + sendBuffer.push_back(1); + } + } + MPI_Send(sendBuffer.data(), proc.second, MPI_INT, proc.first, tag, cc); + } + + std::inplace_merge(exportList.begin(), ownerEnd, exportList.end()); MPI_Waitall(requests.size(), requests.data(), statuses.data()); + // ------------------------------ + buffer = receiveBuffers.begin(); + tbuffer = typeBuffers.begin(); auto importOwnerSize = importList.size(); - + for(const auto& proc: importProcs) { - for(const auto& index: *buffer) - importList.emplace_back(index, proc.first, AttributeSet::copy, -1); + auto pt = tbuffer->begin(); + for(const auto& index: *buffer) { + + if (*pt == 0) { + importList.emplace_back(index, proc.first, AttributeSet::copy, -1); + } + else { + importList.emplace_back(index, proc.first, AttributeSet::overlap, -1); + } + ++pt; + } ++buffer; + ++tbuffer; } std::sort(importList.begin() + importOwnerSize, importList.end(), [](const std::tuple& t1, const std::tuple& t2) diff --git a/opm/grid/common/GridPartitioning.hpp b/opm/grid/common/GridPartitioning.hpp index 67dcca7b42..2714d24c87 100644 --- a/opm/grid/common/GridPartitioning.hpp +++ b/opm/grid/common/GridPartitioning.hpp @@ -43,6 +43,7 @@ #include #include + namespace Dune { @@ -71,6 +72,9 @@ namespace Dune bool recursive = false, bool ensureConnectivity = true); + void seperateOverlapAndGhostCells(const CpGrid& grid, const std::vector& cell_has_well, + std::vector& part_type, int layers); + /// \brief Adds a layer of overlap cells to a partitioning. /// \param[in] grid The grid that is partitioned. /// \param[in] cell_part a vector containing each cells partition number. @@ -94,9 +98,11 @@ namespace Dune /// index here /// \param[in] cc The communication object /// \param[in] layer Number of overlap layers - int addOverlapLayer(const CpGrid& grid, const std::vector& cell_part, + int addOverlapLayer(const CpGrid& grid, + const std::vector& cell_part, std::vector>& exportList, std::vector>& importList, + const std::vector& cell_has_well, const CollectiveCommunication& cc, int layers = 1); diff --git a/opm/grid/cpgrid/CpGrid.cpp b/opm/grid/cpgrid/CpGrid.cpp index 576e9b9798..b7da86b044 100644 --- a/opm/grid/cpgrid/CpGrid.cpp +++ b/opm/grid/cpgrid/CpGrid.cpp @@ -146,7 +146,6 @@ CpGrid::scatterGrid(EdgeWeightMethod method, const std::vector()); } - #if HAVE_MPI auto& cc = data_->ccobj_; @@ -195,13 +194,19 @@ CpGrid::scatterGrid(EdgeWeightMethod method, const std::vectorcell_has_well_.reset( new std::vector ); + this->findWellperforatedCells(wells, *this->cell_has_well_); + // map from process to global cell indices in overlap std::map > overlap; - auto noImportedOwner = addOverlapLayer(*this, cell_part, exportList, importList, cc); + auto noImportedOwner = addOverlapLayer(*this, cell_part, exportList, importList, + *this->cell_has_well_, cc, overlapLayers); // importList contains all the indices that will be here. auto compareImport = [](const std::tuple& t1, const std::tuple&t2) @@ -209,6 +214,7 @@ CpGrid::scatterGrid(EdgeWeightMethod method, const std::vector(t1) < std::get<0>(t2); }; + if ( ! ownersFirst ) { // merge owner and overlap sorted by global index @@ -236,7 +242,10 @@ CpGrid::scatterGrid(EdgeWeightMethod method, const std::vectorcell_indexset_.beginResize(); for(const auto& entry: importList) { - distributed_data_->cell_indexset_.add(std::get<0>(entry), ParallelIndexSet::LocalIndex(std::get<3>(entry), AttributeSet(std::get<2>(entry)), true)); + distributed_data_->cell_indexset_.add(std::get<0>(entry), + ParallelIndexSet::LocalIndex(std::get<3>(entry), + AttributeSet(std::get<2>(entry)), + true)); } distributed_data_->cell_indexset_.endResize(); // add an interface for gathering/scattering data with communication @@ -272,6 +281,30 @@ CpGrid::scatterGrid(EdgeWeightMethod method, const std::vector * wells, + std::vector& cell_has_well) +{ + cell_has_well.resize(this->numCells(), 0); + cpgrid::WellConnections well_indices; + const auto& cpgdim = this->logicalCartesianSize(); + + std::vector cartesian_to_compressed(cpgdim[0]*cpgdim[1]*cpgdim[2], -1); + + for( int i=0; i < this->numCells(); ++i ) + { + cartesian_to_compressed[this->globalCell()[i]] = i; + } + well_indices.init(*wells, cpgdim, cartesian_to_compressed); + + for(const auto& well: well_indices) + { + for( auto well_idx = well.begin(); well_idx != well.end(); + ++well_idx) + { + cell_has_well[*well_idx] = 1; + } + } +} void CpGrid::createCartesian(const std::array& dims, const std::array& cellsize) From 2a345ea7a6f78e9893d62734f8ba609e61c1f505 Mon Sep 17 00:00:00 2001 From: andrthu Date: Tue, 25 Feb 2020 13:55:56 +0100 Subject: [PATCH 2/2] Add GhostEntity for cell partitionType(). Remove the unused seperate copy/overlap --- opm/grid/common/GridPartitioning.cpp | 122 +++------------------------ opm/grid/common/GridPartitioning.hpp | 10 +-- opm/grid/common/ZoltanPartition.cpp | 4 +- opm/grid/common/ZoltanPartition.hpp | 9 +- opm/grid/cpgrid/CpGrid.cpp | 2 +- opm/grid/cpgrid/CpGridData.cpp | 5 +- 6 files changed, 30 insertions(+), 122 deletions(-) diff --git a/opm/grid/common/GridPartitioning.cpp b/opm/grid/common/GridPartitioning.cpp index a329c9461c..c66e94ad03 100644 --- a/opm/grid/common/GridPartitioning.cpp +++ b/opm/grid/common/GridPartitioning.cpp @@ -242,97 +242,6 @@ namespace Dune } } -/// \brief seperate overlap and ghost cells -void seperateOverlapAndGhostCells(const CpGrid& grid, const std::vector& cell_has_well, - std::vector& part_type, int layers) -{ - - auto lid = grid.localIdSet(); - auto gid = grid.globalIdSet(); - part_type.resize(grid.numCells(), 0); - - for (auto it = grid.leafbegin<0>(); it != grid.leafend<0>(); ++it) { - auto elem = *it; - - if (elem.partitionType() == InteriorEntity) { - auto id = lid.id(elem); - - part_type[id] = 1; - - for (CpGrid::LeafIntersectionIterator iit = elem.ileafbegin(); iit != elem.ileafend(); ++iit) { - auto inter = *iit; - if ( inter.neighbor() ) { - auto nab = inter.outside(); - int nab_lid = lid.id(nab); - if (nab.partitionType() != InteriorEntity && part_type[nab_lid] == 0) { - int nab_gid = gid.id(nab); - - if ( cell_has_well[nab_gid] == 1 ) { - part_type[nab_lid] = layers + 1; - } - else { - part_type[nab_lid] = 2; - } - } - } - } - } - } - - int layer = 2; - while (layer < layers + 1) { - for (auto it = grid.leafbegin<0>(); it != grid.leafend<0>(); ++it) { - - auto elem = *it; - int id = lid.id(elem); - bool isLayer = part_type[id] == layer || part_type[id] == layers + 1; - - if (elem.partitionType() != InteriorEntity && isLayer) { - for (CpGrid::LeafIntersectionIterator iit = elem.ileafbegin(); iit != elem.ileafend(); ++iit) { - - auto inter = *iit; - if ( inter.neighbor() ) { - - auto nab = inter.outside(); - int nab_gid = gid.id(nab); - int nab_lid = lid.id(nab); - - if (nab.partitionType() != InteriorEntity && part_type[nab_lid] == 0) { - if (cell_has_well[nab_gid] == 1) { - part_type[nab_lid] = layers + 1; - } - else { - part_type[nab_lid] = layer + 1; - } - } - } - } - } - /* - else if (elem.partitionType() != InteriorEntity && part_type[id] == layers + 1) { - int gid_e = gid.id(elem); - bool isWell = cell_has_well[gid_e] == 1; - if ( isWell ) { - for (CpGrid::LeafIntersectionIterator iit = elem.ileafbegin(); iit != elem.ileafend(); ++iit) { - auto inter = *iit; - if ( inter.neighbor() ) { - - auto nab = inter.outside(); - int nab_gid = gid.id(nab); - int nab_lid = lid.id(nab); - if (nab.partitionType() != InteriorEntity) { - part_type[nab_lid] = layers + 1; - } - } - } - } - } - */ - } - layer++; - } -} - /// \brief Adds cells to the overlap that just share a point with an owner cell. void addOverlapCornerCell(const CpGrid& grid, int owner, const CpGrid::Codim<0>::Entity& from, @@ -499,9 +408,7 @@ void addOverlapLayer(const CpGrid& grid, int index, const CpGrid::Codim<0>::Enti auto ownerSize = exportList.size(); const CpGrid::LeafIndexSet& ix = grid.leafIndexSet(); std::map exportProcs, importProcs; - - std::vector> ghostList; - + for (CpGrid::Codim<0>::LeafIterator it = grid.leafbegin<0>(); it != grid.leafend<0>(); ++it) { int index = ix.index(*it); @@ -544,7 +451,7 @@ void addOverlapLayer(const CpGrid& grid, int index, const CpGrid::Codim<0>::Enti // Remove duplicate cells in overlap layer. auto newEnd = std::unique(ownerEnd, exportList.end(), overlapEqual); exportList.resize(newEnd - exportList.begin()); - + for(const auto& entry: importList) importProcs.insert(std::make_pair(std::get<1>(entry), 0)); //count entries to send @@ -596,24 +503,24 @@ void addOverlapLayer(const CpGrid& grid, int index, const CpGrid::Codim<0>::Enti MPI_Waitall(requests.size(), requests.data(), statuses.data()); - // -------------- Communicate overlap type + // Communicate overlap type ++tag; std::vector > typeBuffers(importProcs.size()); - auto tbuffer = typeBuffers.begin(); + auto partitionTypeBuffer = typeBuffers.begin(); req = requests.begin(); for(auto&& proc: importProcs) { - tbuffer->resize(proc.second); - MPI_Irecv(tbuffer->data(), proc.second, MPI_INT, proc.first, tag, cc, &(*req)); - ++req; ++tbuffer; + partitionTypeBuffer->resize(proc.second); + MPI_Irecv(partitionTypeBuffer->data(), proc.second, MPI_INT, proc.first, tag, cc, &(*req)); + ++req; ++partitionTypeBuffer; } for(const auto& proc: exportProcs) { std::vector sendBuffer; sendBuffer.reserve(proc.second); - + for (auto t = ownerEnd; t != exportList.end(); ++t) { if ( std::get<1>(*t) == proc.first ) { if ( std::get<2>(*t) == AttributeSet::copy) @@ -622,21 +529,20 @@ void addOverlapLayer(const CpGrid& grid, int index, const CpGrid::Codim<0>::Enti sendBuffer.push_back(1); } } - MPI_Send(sendBuffer.data(), proc.second, MPI_INT, proc.first, tag, cc); } std::inplace_merge(exportList.begin(), ownerEnd, exportList.end()); MPI_Waitall(requests.size(), requests.data(), statuses.data()); - // ------------------------------ - + + // Add the overlap layer to the import list on each process. buffer = receiveBuffers.begin(); - tbuffer = typeBuffers.begin(); + partitionTypeBuffer = typeBuffers.begin(); auto importOwnerSize = importList.size(); - + for(const auto& proc: importProcs) { - auto pt = tbuffer->begin(); + auto pt = partitionTypeBuffer->begin(); for(const auto& index: *buffer) { if (*pt == 0) { @@ -648,7 +554,7 @@ void addOverlapLayer(const CpGrid& grid, int index, const CpGrid::Codim<0>::Enti ++pt; } ++buffer; - ++tbuffer; + ++partitionTypeBuffer; } std::sort(importList.begin() + importOwnerSize, importList.end(), [](const std::tuple& t1, const std::tuple& t2) diff --git a/opm/grid/common/GridPartitioning.hpp b/opm/grid/common/GridPartitioning.hpp index 2714d24c87..c4b608b30e 100644 --- a/opm/grid/common/GridPartitioning.hpp +++ b/opm/grid/common/GridPartitioning.hpp @@ -43,7 +43,6 @@ #include #include - namespace Dune { @@ -72,9 +71,6 @@ namespace Dune bool recursive = false, bool ensureConnectivity = true); - void seperateOverlapAndGhostCells(const CpGrid& grid, const std::vector& cell_has_well, - std::vector& part_type, int layers); - /// \brief Adds a layer of overlap cells to a partitioning. /// \param[in] grid The grid that is partitioned. /// \param[in] cell_part a vector containing each cells partition number. @@ -96,13 +92,15 @@ namespace Dune /// \param[inout] importList List indices to import, each entry is a tuple /// of global index, process rank (to import from), attribute here, local /// index here + /// \param[in] cell_has_well integer list that indicate if cell i is perforated + /// by a well. /// \param[in] cc The communication object /// \param[in] layer Number of overlap layers int addOverlapLayer(const CpGrid& grid, - const std::vector& cell_part, + const std::vector& cell_part, std::vector>& exportList, std::vector>& importList, - const std::vector& cell_has_well, + const std::vector& cell_has_well, const CollectiveCommunication& cc, int layers = 1); diff --git a/opm/grid/common/ZoltanPartition.cpp b/opm/grid/common/ZoltanPartition.cpp index ec3e612979..8c560a4b58 100644 --- a/opm/grid/common/ZoltanPartition.cpp +++ b/opm/grid/common/ZoltanPartition.cpp @@ -77,7 +77,7 @@ zoltanGraphPartitionGridOnRoot(const CpGrid& cpgrid, wells, transmissibilities, partitionIsEmpty, - edgeWeightsMethod)); + edgeWeightsMethod)); Dune::cpgrid::setCpGridZoltanGraphFunctions(zz, *grid_and_wells, partitionIsEmpty); } @@ -104,7 +104,7 @@ zoltanGraphPartitionGridOnRoot(const CpGrid& cpgrid, int rank = cc.rank(); std::vector parts(size, rank); std::vector > wells_on_proc; - // List entry: process to export to, (global) index, attribute there (not needed?) + // List entry: (global) index, process to export to, attribute there std::vector> myExportList(numExport); // List entry: process to import from, global index, attribute here, local index // (determined later) diff --git a/opm/grid/common/ZoltanPartition.hpp b/opm/grid/common/ZoltanPartition.hpp index 7daff44cb7..ef54d9cfcd 100644 --- a/opm/grid/common/ZoltanPartition.hpp +++ b/opm/grid/common/ZoltanPartition.hpp @@ -46,10 +46,13 @@ namespace cpgrid /// @param edgeWeightMethod The method used to calculate the weights associated /// with the edges of the graph (uniform, transmissibilities, log thereof) /// @param root The process number that holds the global grid. -/// @return A pair consisting of a vector that contains for each local cell of the grid the +/// @return A tuple consisting of a vector that contains for each local cell of the grid the /// the number of the process that owns it after repartitioning, -/// and a set of names of wells that should be defunct in a parallel -/// simulation. +/// a set of names of wells that should be defunct in a parallel +/// simulation, a vector of tuples exportList(global id, owner rank, attribute) +/// containing information each rank needs for distributing the grid and a second +/// vector of tuples importList(global id, send rank, attribute, local id) containing +/// information about the cells of the grid each rank will receive. std::tuple,std::unordered_set, std::vector >, std::vector > > diff --git a/opm/grid/cpgrid/CpGrid.cpp b/opm/grid/cpgrid/CpGrid.cpp index b7da86b044..0e32993b2a 100644 --- a/opm/grid/cpgrid/CpGrid.cpp +++ b/opm/grid/cpgrid/CpGrid.cpp @@ -146,6 +146,7 @@ CpGrid::scatterGrid(EdgeWeightMethod method, const std::vector()); } + #if HAVE_MPI auto& cc = data_->ccobj_; @@ -214,7 +215,6 @@ CpGrid::scatterGrid(EdgeWeightMethod method, const std::vector(t1) < std::get<0>(t2); }; - if ( ! ownersFirst ) { // merge owner and overlap sorted by global index diff --git a/opm/grid/cpgrid/CpGridData.cpp b/opm/grid/cpgrid/CpGridData.cpp index 2e305c1d04..6170949297 100644 --- a/opm/grid/cpgrid/CpGridData.cpp +++ b/opm/grid/cpgrid/CpGridData.cpp @@ -1574,9 +1574,10 @@ void CpGridData::distributeGlobalGrid(CpGrid& grid, partition_type_indicator_->cell_indicator_.resize(cell_indexset_.size()); for(const auto i: cell_indexset_) { + auto ci_attr = i.local().attribute(); partition_type_indicator_->cell_indicator_[i.local()]= - i.local().attribute()==AttributeSet::owner? - InteriorEntity:OverlapEntity; + ci_attr==AttributeSet::owner? + InteriorEntity: ci_attr==AttributeSet::copy? GhostEntity:OverlapEntity; } // Compute partition type for points