diff --git a/opm/grid/CpGrid.hpp b/opm/grid/CpGrid.hpp index 765766876b..9ae1d89a87 100644 --- a/opm/grid/CpGrid.hpp +++ b/opm/grid/CpGrid.hpp @@ -1327,6 +1327,11 @@ namespace Dune return current_view_data_->cell_remote_indices_; } + std::vector cellHasWell() const + { + return *cell_has_well_; + } + #endif private: @@ -1366,6 +1371,13 @@ namespace Dune * @warning Will only update owner cells */ std::shared_ptr point_scatter_gather_interfaces_; + + /// \brief Mark all cells perforated by cells. + void findWellperforatedCells(const std::vector * wells, + std::vector& cell_has_well); + + std::shared_ptr> cell_has_well_; + }; // end Class CpGrid diff --git a/opm/grid/common/GridPartitioning.cpp b/opm/grid/common/GridPartitioning.cpp index d705f5a483..c66e94ad03 100644 --- a/opm/grid/common/GridPartitioning.cpp +++ b/opm/grid/common/GridPartitioning.cpp @@ -364,7 +364,7 @@ void addOverlapLayer(const CpGrid& grid, int index, const CpGrid::Codim<0>::Enti void addOverlapLayer(const CpGrid& grid, int index, const CpGrid::Codim<0>::Entity& e, const int owner, const std::vector& cell_part, std::vector>& exportList, - int recursion_deps) + const std::vector& isWell, int recursion_deps) { using AttributeSet = Dune::OwnerOverlapCopyAttributeSet::AttributeSet; const CpGrid::LeafIndexSet& ix = grid.leafIndexSet(); @@ -373,38 +373,33 @@ void addOverlapLayer(const CpGrid& grid, int index, const CpGrid::Codim<0>::Enti int nb_index = ix.index(*(iit->outside())); if ( cell_part[nb_index]!=owner ) { - // Note: multiple adds for same process are possible - exportList.emplace_back(nb_index, owner, AttributeSet::copy); - exportList.emplace_back(index, cell_part[nb_index], AttributeSet::copy); - if ( recursion_deps>0 ) - { - // Add another layer - addOverlapLayer(grid, nb_index, *(iit->outside()), owner, - cell_part, exportList, recursion_deps-1); + // If on last level add cell as type: copy. If not it has partition type: overlap + if ( recursion_deps < 1 ) { + exportList.emplace_back(nb_index, owner, AttributeSet::copy); } - else + else { + // If cell is perforated by a well add it as a copy cell, since wells are not parallel + if ( isWell[nb_index] == 1 ) + exportList.emplace_back(nb_index, owner, AttributeSet::copy); + else + exportList.emplace_back(nb_index, owner, AttributeSet::overlap); + } + if ( recursion_deps > 0 ) { - // Add cells to the overlap that just share a corner with e. - for (CpGrid::LeafIntersectionIterator iit2 = iit->outside()->ileafbegin(); - iit2 != iit->outside()->ileafend(); ++iit2) - { - if ( iit2->neighbor() ) - { - int nb_index2 = ix.index(*(iit2->outside())); - if( cell_part[nb_index2]==owner ) continue; - addOverlapCornerCell(grid, owner, e, *(iit2->outside()), - cell_part, exportList); - } - } + // Add another layer + addOverlapLayer(grid, index, *(iit->outside()), owner, + cell_part, exportList, isWell, recursion_deps-1); } } } } } - int addOverlapLayer(const CpGrid& grid, const std::vector& cell_part, + int addOverlapLayer(const CpGrid& grid, + const std::vector& cell_part, std::vector>& exportList, std::vector>& importList, + const std::vector& cell_has_well, const CollectiveCommunication& cc, int layers) { @@ -417,22 +412,44 @@ void addOverlapLayer(const CpGrid& grid, int index, const CpGrid::Codim<0>::Enti for (CpGrid::Codim<0>::LeafIterator it = grid.leafbegin<0>(); it != grid.leafend<0>(); ++it) { int index = ix.index(*it); + auto owner = cell_part[index]; exportProcs.insert(std::make_pair(owner, 0)); - addOverlapLayer(grid, index, *it, owner, cell_part, exportList, layers-1); + addOverlapLayer(grid, index, *it, owner, cell_part, exportList, cell_has_well, layers-1); } + // remove multiple entries + // This predicate will result in the following ordering of + // the exportList tuples (index, rank, Attribute) after std::sort: + // + // Tuple x comes before tuple y if x[0] < y[0]. + // Tuple x comes before tuple y if x[0] == y[0] and x[1] < y[1]. + // Tuple x comes before tuple y if x[0] == y[0] and x[1] == y[1] and (x[2] == overlap and y[2] = copy). + // This ordering is needed to meke std::unique work properly. auto compare = [](const std::tuple& t1, const std::tuple& t2) - { - return (std::get<0>(t1) < std::get<0>(t2)) || - ( ! (std::get<0>(t2) < std::get<0>(t1)) - && (std::get<1>(t1) < std::get<1>(t2)) ); - }; + { + return (std::get<0>(t1) < std::get<0>(t2) ) || + ( (std::get<0>(t2) == std::get<0>(t1)) + && (std::get<1>(t1) < std::get<1>(t2)) ) || + ( (std::get<0>(t2) == std::get<0>(t1)) && std::get<2>(t1) == AttributeSet::overlap + && std::get<2>(t2) == AttributeSet::copy && (std::get<1>(t1) == std::get<1>(t2))); + }; + + // We do not want to add a cell twice as an overlap and copy type. To avoid this we say two tuples + // x,y in exportList are equal if x[0] == y[0] and x[1] == y[1]. Since overlap tuples come + // before the copy tuples, the copy tuples will be removed. + auto overlapEqual = [](const std::tuple& t1, const std::tuple& t2) + { + return ( ( std::get<0>(t1) == std::get<0>(t2) ) + && ( std::get<1>(t1) == std::get<1>(t2) ) ) ; + }; + // We only sort and remove duplicate cells in the overlap layers auto ownerEnd = exportList.begin() + ownerSize; std::sort(ownerEnd, exportList.end(), compare); - auto newEnd = std::unique(ownerEnd, exportList.end()); + // Remove duplicate cells in overlap layer. + auto newEnd = std::unique(ownerEnd, exportList.end(), overlapEqual); exportList.resize(newEnd - exportList.begin()); for(const auto& entry: importList) @@ -484,17 +501,60 @@ void addOverlapLayer(const CpGrid& grid, int index, const CpGrid::Codim<0>::Enti MPI_Send(sendBuffer.data(), proc.second, MPI_INT, proc.first, tag, cc); } - std::inplace_merge(exportList.begin(), ownerEnd, exportList.end()); + MPI_Waitall(requests.size(), requests.data(), statuses.data()); + + // Communicate overlap type + ++tag; + std::vector > typeBuffers(importProcs.size()); + auto partitionTypeBuffer = typeBuffers.begin(); + req = requests.begin(); + for(auto&& proc: importProcs) + { + partitionTypeBuffer->resize(proc.second); + MPI_Irecv(partitionTypeBuffer->data(), proc.second, MPI_INT, proc.first, tag, cc, &(*req)); + ++req; ++partitionTypeBuffer; + } + + for(const auto& proc: exportProcs) + { + std::vector sendBuffer; + sendBuffer.reserve(proc.second); + + for (auto t = ownerEnd; t != exportList.end(); ++t) { + if ( std::get<1>(*t) == proc.first ) { + if ( std::get<2>(*t) == AttributeSet::copy) + sendBuffer.push_back(0); + else + sendBuffer.push_back(1); + } + } + MPI_Send(sendBuffer.data(), proc.second, MPI_INT, proc.first, tag, cc); + } + + std::inplace_merge(exportList.begin(), ownerEnd, exportList.end()); MPI_Waitall(requests.size(), requests.data(), statuses.data()); + + // Add the overlap layer to the import list on each process. buffer = receiveBuffers.begin(); + partitionTypeBuffer = typeBuffers.begin(); auto importOwnerSize = importList.size(); for(const auto& proc: importProcs) { - for(const auto& index: *buffer) - importList.emplace_back(index, proc.first, AttributeSet::copy, -1); + auto pt = partitionTypeBuffer->begin(); + for(const auto& index: *buffer) { + + if (*pt == 0) { + importList.emplace_back(index, proc.first, AttributeSet::copy, -1); + } + else { + importList.emplace_back(index, proc.first, AttributeSet::overlap, -1); + } + ++pt; + } ++buffer; + ++partitionTypeBuffer; } std::sort(importList.begin() + importOwnerSize, importList.end(), [](const std::tuple& t1, const std::tuple& t2) diff --git a/opm/grid/common/GridPartitioning.hpp b/opm/grid/common/GridPartitioning.hpp index 67dcca7b42..c4b608b30e 100644 --- a/opm/grid/common/GridPartitioning.hpp +++ b/opm/grid/common/GridPartitioning.hpp @@ -92,11 +92,15 @@ namespace Dune /// \param[inout] importList List indices to import, each entry is a tuple /// of global index, process rank (to import from), attribute here, local /// index here + /// \param[in] cell_has_well integer list that indicate if cell i is perforated + /// by a well. /// \param[in] cc The communication object /// \param[in] layer Number of overlap layers - int addOverlapLayer(const CpGrid& grid, const std::vector& cell_part, + int addOverlapLayer(const CpGrid& grid, + const std::vector& cell_part, std::vector>& exportList, std::vector>& importList, + const std::vector& cell_has_well, const CollectiveCommunication& cc, int layers = 1); diff --git a/opm/grid/common/ZoltanPartition.cpp b/opm/grid/common/ZoltanPartition.cpp index ec3e612979..8c560a4b58 100644 --- a/opm/grid/common/ZoltanPartition.cpp +++ b/opm/grid/common/ZoltanPartition.cpp @@ -77,7 +77,7 @@ zoltanGraphPartitionGridOnRoot(const CpGrid& cpgrid, wells, transmissibilities, partitionIsEmpty, - edgeWeightsMethod)); + edgeWeightsMethod)); Dune::cpgrid::setCpGridZoltanGraphFunctions(zz, *grid_and_wells, partitionIsEmpty); } @@ -104,7 +104,7 @@ zoltanGraphPartitionGridOnRoot(const CpGrid& cpgrid, int rank = cc.rank(); std::vector parts(size, rank); std::vector > wells_on_proc; - // List entry: process to export to, (global) index, attribute there (not needed?) + // List entry: (global) index, process to export to, attribute there std::vector> myExportList(numExport); // List entry: process to import from, global index, attribute here, local index // (determined later) diff --git a/opm/grid/common/ZoltanPartition.hpp b/opm/grid/common/ZoltanPartition.hpp index 7daff44cb7..ef54d9cfcd 100644 --- a/opm/grid/common/ZoltanPartition.hpp +++ b/opm/grid/common/ZoltanPartition.hpp @@ -46,10 +46,13 @@ namespace cpgrid /// @param edgeWeightMethod The method used to calculate the weights associated /// with the edges of the graph (uniform, transmissibilities, log thereof) /// @param root The process number that holds the global grid. -/// @return A pair consisting of a vector that contains for each local cell of the grid the +/// @return A tuple consisting of a vector that contains for each local cell of the grid the /// the number of the process that owns it after repartitioning, -/// and a set of names of wells that should be defunct in a parallel -/// simulation. +/// a set of names of wells that should be defunct in a parallel +/// simulation, a vector of tuples exportList(global id, owner rank, attribute) +/// containing information each rank needs for distributing the grid and a second +/// vector of tuples importList(global id, send rank, attribute, local id) containing +/// information about the cells of the grid each rank will receive. std::tuple,std::unordered_set, std::vector >, std::vector > > diff --git a/opm/grid/cpgrid/CpGrid.cpp b/opm/grid/cpgrid/CpGrid.cpp index 576e9b9798..0e32993b2a 100644 --- a/opm/grid/cpgrid/CpGrid.cpp +++ b/opm/grid/cpgrid/CpGrid.cpp @@ -195,13 +195,19 @@ CpGrid::scatterGrid(EdgeWeightMethod method, const std::vectorcell_has_well_.reset( new std::vector ); + this->findWellperforatedCells(wells, *this->cell_has_well_); + // map from process to global cell indices in overlap std::map > overlap; - auto noImportedOwner = addOverlapLayer(*this, cell_part, exportList, importList, cc); + auto noImportedOwner = addOverlapLayer(*this, cell_part, exportList, importList, + *this->cell_has_well_, cc, overlapLayers); // importList contains all the indices that will be here. auto compareImport = [](const std::tuple& t1, const std::tuple&t2) @@ -236,7 +242,10 @@ CpGrid::scatterGrid(EdgeWeightMethod method, const std::vectorcell_indexset_.beginResize(); for(const auto& entry: importList) { - distributed_data_->cell_indexset_.add(std::get<0>(entry), ParallelIndexSet::LocalIndex(std::get<3>(entry), AttributeSet(std::get<2>(entry)), true)); + distributed_data_->cell_indexset_.add(std::get<0>(entry), + ParallelIndexSet::LocalIndex(std::get<3>(entry), + AttributeSet(std::get<2>(entry)), + true)); } distributed_data_->cell_indexset_.endResize(); // add an interface for gathering/scattering data with communication @@ -272,6 +281,30 @@ CpGrid::scatterGrid(EdgeWeightMethod method, const std::vector * wells, + std::vector& cell_has_well) +{ + cell_has_well.resize(this->numCells(), 0); + cpgrid::WellConnections well_indices; + const auto& cpgdim = this->logicalCartesianSize(); + + std::vector cartesian_to_compressed(cpgdim[0]*cpgdim[1]*cpgdim[2], -1); + + for( int i=0; i < this->numCells(); ++i ) + { + cartesian_to_compressed[this->globalCell()[i]] = i; + } + well_indices.init(*wells, cpgdim, cartesian_to_compressed); + + for(const auto& well: well_indices) + { + for( auto well_idx = well.begin(); well_idx != well.end(); + ++well_idx) + { + cell_has_well[*well_idx] = 1; + } + } +} void CpGrid::createCartesian(const std::array& dims, const std::array& cellsize) diff --git a/opm/grid/cpgrid/CpGridData.cpp b/opm/grid/cpgrid/CpGridData.cpp index 2e305c1d04..6170949297 100644 --- a/opm/grid/cpgrid/CpGridData.cpp +++ b/opm/grid/cpgrid/CpGridData.cpp @@ -1574,9 +1574,10 @@ void CpGridData::distributeGlobalGrid(CpGrid& grid, partition_type_indicator_->cell_indicator_.resize(cell_indexset_.size()); for(const auto i: cell_indexset_) { + auto ci_attr = i.local().attribute(); partition_type_indicator_->cell_indicator_[i.local()]= - i.local().attribute()==AttributeSet::owner? - InteriorEntity:OverlapEntity; + ci_attr==AttributeSet::owner? + InteriorEntity: ci_attr==AttributeSet::copy? GhostEntity:OverlapEntity; } // Compute partition type for points