3 #ifndef DUNE_ISTL_REPARTITION_HH
4 #define DUNE_ISTL_REPARTITION_HH
21 #include <dune/common/timer.hh>
22 #include <dune/common/enumset.hh>
23 #include <dune/common/stdstreams.hh>
24 #include <dune/common/parallel/mpitraits.hh>
25 #include <dune/common/parallel/communicator.hh>
26 #include <dune/common/parallel/indexset.hh>
27 #include <dune/common/parallel/indicessyncer.hh>
28 #include <dune/common/parallel/remoteindices.hh>
29 #include <dune/common/rangeutilities.hh>
48 #if HAVE_PARMETIS && defined(REALTYPEWIDTH)
54 #if HAVE_PARMETIS && defined(IDXTYPEWIDTH)
56 #elif HAVE_PARMETIS && defined(HAVE_SCOTCH_NUM_TYPE)
57 using idx_t = SCOTCH_Num;
80 template<
class G,
class T1,
class T2>
84 typedef typename IndexSet::LocalIndex::Attribute Attribute;
86 IndexSet& indexSet = oocomm.
indexSet();
89 std::size_t sum=0, needed = graph.noVertices()-indexSet.size();
90 std::vector<std::size_t> neededall(oocomm.
communicator().size(), 0);
92 MPI_Allgather(&needed, 1, MPITraits<std::size_t>::getType() , &(neededall[0]), 1, MPITraits<std::size_t>::getType(), oocomm.
communicator());
102 auto end = indexSet.end();
103 for(
auto it = indexSet.begin(); it != end; ++it)
104 maxgi=std::max(maxgi,it->global());
113 maxgi=maxgi+neededall[i];
116 std::map<int,SLList<std::pair<T1,Attribute> > > globalIndices;
117 storeGlobalIndicesOfRemoteIndices(globalIndices, oocomm.
remoteIndices());
118 indexSet.beginResize();
120 for(
auto vertex = graph.begin(), vend=graph.end(); vertex != vend; ++vertex) {
121 const typename IndexSet::IndexPair* pair=lookup.pair(*vertex);
129 indexSet.endResize();
131 repairLocalIndexPointers(globalIndices, oocomm.
remoteIndices(), indexSet);
136 std::cout<<
"Holes are filled!"<<std::endl;
144 class ParmetisDuneIndexMap
147 template<
class Graph,
class OOComm>
148 ParmetisDuneIndexMap(
const Graph& graph,
const OOComm& com);
149 int toParmetis(
int i)
const
151 return duneToParmetis[i];
153 int toLocalParmetis(
int i)
const
155 return duneToParmetis[i]-base_;
157 int operator[](
int i)
const
159 return duneToParmetis[i];
161 int toDune(
int i)
const
163 return parmetisToDune[i];
165 std::vector<int>::size_type numOfOwnVtx()
const
167 return parmetisToDune.size();
176 std::vector<int> duneToParmetis;
177 std::vector<int> parmetisToDune;
179 std::vector<Metis::idx_t> vtxDist_;
182 template<
class G,
class OOComm>
183 ParmetisDuneIndexMap::ParmetisDuneIndexMap(
const G& graph,
const OOComm& oocomm)
184 : duneToParmetis(graph.noVertices(), -1), vtxDist_(oocomm.communicator().size()+1)
186 int npes=oocomm.communicator().size(), mype=oocomm.communicator().rank();
188 typedef typename OOComm::OwnerSet OwnerSet;
191 auto end = oocomm.indexSet().end();
192 for(
auto index = oocomm.indexSet().begin(); index != end; ++index) {
193 if (OwnerSet::contains(index->local().attribute())) {
197 parmetisToDune.resize(numOfOwnVtx);
198 std::vector<int> globalNumOfVtx(npes);
200 MPI_Allgather(&numOfOwnVtx, 1, MPI_INT, &(globalNumOfVtx[0]), 1, MPI_INT, oocomm.communicator());
204 for(
int i=0; i<npes; i++) {
206 base += globalNumOfVtx[i];
208 vtxDist_[i+1] = vtxDist_[i] + globalNumOfVtx[i];
214 std::cout << oocomm.communicator().rank()<<
" vtxDist: ";
215 for(
int i=0; i<= npes; ++i)
216 std::cout << vtxDist_[i]<<
" ";
217 std::cout<<std::endl;
224 auto vend = graph.end();
225 for(
auto vertex = graph.begin(); vertex != vend; ++vertex) {
226 const typename OOComm::ParallelIndexSet::IndexPair* index=oocomm.globalLookup().pair(*vertex);
228 if (OwnerSet::contains(index->local().attribute())) {
230 parmetisToDune[base-base_]=index->local();
231 duneToParmetis[index->local()] = base++;
241 std::cout <<oocomm.communicator().rank()<<
": before ";
242 for(std::size_t i=0; i<duneToParmetis.size(); ++i)
243 std::cout<<duneToParmetis[i]<<
" ";
244 std::cout<<std::endl;
246 oocomm.copyOwnerToAll(duneToParmetis,duneToParmetis);
248 std::cout <<oocomm.communicator().rank()<<
": after ";
249 for(std::size_t i=0; i<duneToParmetis.size(); ++i)
250 std::cout<<duneToParmetis[i]<<
" ";
251 std::cout<<std::endl;
263 template<
class Flags,
class IS>
266 std::map<int,int> sizes;
268 for(
auto i=idxset.begin(), end=idxset.end(); i!=end; ++i)
269 if(Flags::contains(i->local().attribute()))
270 ++sizes[toPart[i->local()]];
273 for(
auto i=sizes.begin(), end=sizes.end(); i!=end; ++i)
274 interfaces()[i->first].first.reserve(i->second);
277 for(
auto i=idxset.begin(), end=idxset.end(); i!=end; ++i)
278 if(Flags::contains(i->local().attribute()))
279 interfaces()[toPart[i->local()]].first.add(i->local());
284 interfaces()[proc].second.reserve(size);
288 interfaces()[proc].second.add(idx);
290 template<
typename TG>
294 for(
auto idx=indices.begin(); idx!= indices.end(); ++idx) {
295 interfaces()[idx->second].second.add(i++);
316 void createSendBuf(std::vector<GI>& ownerVec, std::set<GI>& overlapVec, std::set<int>& neighbors,
char *sendBuf,
int buffersize, MPI_Comm comm) {
318 std::size_t s=ownerVec.size();
322 MPI_Pack(&s, 1, MPITraits<std::size_t>::getType(), sendBuf, buffersize, &pos, comm);
323 MPI_Pack(&(ownerVec[0]), s, MPITraits<GI>::getType(), sendBuf, buffersize, &pos, comm);
324 s = overlapVec.size();
325 MPI_Pack(&s, 1, MPITraits<std::size_t>::getType(), sendBuf, buffersize, &pos, comm);
326 for(
auto i=overlapVec.begin(), end= overlapVec.end(); i != end; ++i)
327 MPI_Pack(
const_cast<GI*
>(&(*i)), 1, MPITraits<GI>::getType(), sendBuf, buffersize, &pos, comm);
330 MPI_Pack(&s, 1, MPITraits<std::size_t>::getType(), sendBuf, buffersize, &pos, comm);
332 for(
auto i=neighbors.begin(), end= neighbors.end(); i != end; ++i)
333 MPI_Pack(
const_cast<int*
>(&(*i)), 1, MPI_INT, sendBuf, buffersize, &pos, comm);
344 void saveRecvBuf(
char *recvBuf,
int bufferSize, std::vector<std::pair<GI,int> >& ownerVec,
345 std::set<GI>& overlapVec, std::set<int>& neighbors, RedistributeInterface& inf,
int from, MPI_Comm comm) {
349 MPI_Unpack(recvBuf, bufferSize, &pos, &size, 1, MPITraits<std::size_t>::getType(), comm);
350 inf.reserveSpaceForReceiveInterface(from, size);
351 ownerVec.reserve(ownerVec.size()+size);
352 for(; size!=0; --size) {
354 MPI_Unpack(recvBuf, bufferSize, &pos, &gi, 1, MPITraits<GI>::getType(), comm);
355 ownerVec.push_back(std::make_pair(gi,from));
358 MPI_Unpack(recvBuf, bufferSize, &pos, &size, 1, MPITraits<std::size_t>::getType(), comm);
359 typename std::set<GI>::iterator ipos = overlapVec.begin();
360 Dune::dverb <<
"unpacking "<<size<<
" overlap"<<std::endl;
361 for(; size!=0; --size) {
363 MPI_Unpack(recvBuf, bufferSize, &pos, &gi, 1, MPITraits<GI>::getType(), comm);
364 ipos=overlapVec.insert(ipos, gi);
367 MPI_Unpack(recvBuf, bufferSize, &pos, &size, 1, MPITraits<std::size_t>::getType(), comm);
368 Dune::dverb <<
"unpacking "<<size<<
" neighbors"<<std::endl;
369 typename std::set<int>::iterator npos = neighbors.begin();
370 for(; size!=0; --size) {
372 MPI_Unpack(recvBuf, bufferSize, &pos, &n, 1, MPI_INT, comm);
373 npos=neighbors.insert(npos, n);
391 void getDomain(
const MPI_Comm& comm, T *part,
int numOfOwnVtx,
int nparts,
int *myDomain, std::vector<int> &domainMapping) {
393 MPI_Comm_size(comm, &npes);
394 MPI_Comm_rank(comm, &mype);
401 std::vector<int> domain(nparts, 0);
402 std::vector<int> assigned(npes, 0);
404 domainMapping.assign(domainMapping.size(), -1);
407 for (i=0; i<numOfOwnVtx; i++) {
411 std::vector<int> domainMatrix(npes * nparts, -1);
414 int *buf =
new int[nparts];
415 for (i=0; i<nparts; i++) {
417 domainMatrix[mype*nparts+i] = domain[i];
420 int src = (mype-1+npes)%npes;
421 int dest = (mype+1)%npes;
423 for (i=0; i<npes-1; i++) {
424 MPI_Sendrecv_replace(buf, nparts, MPI_INT, dest, 0, src, 0, comm, &status);
426 pe = ((mype-1-i)+npes)%npes;
427 for(j=0; j<nparts; j++) {
429 domainMatrix[pe*nparts+j] = buf[j];
437 int maxOccurance = 0;
439 std::set<std::size_t> unassigned;
441 for(i=0; i<nparts; i++) {
442 for(j=0; j<npes; j++) {
444 if (assigned[j]==0) {
445 if (maxOccurance < domainMatrix[j*nparts+i]) {
446 maxOccurance = domainMatrix[j*nparts+i];
454 domainMapping[i] = pe;
464 unassigned.insert(i);
469 typename std::vector<int>::iterator next_free = assigned.begin();
471 for(
auto udomain = unassigned.begin(),
472 end = unassigned.end(); udomain != end; ++udomain)
474 next_free = std::find_if(next_free, assigned.end(), std::bind(std::less<int>(), std::placeholders::_1, 1));
475 assert(next_free != assigned.end());
476 domainMapping[*udomain] = next_free-assigned.begin();
484 bool operator()(
const T& t1,
const T& t2)
const
502 void mergeVec(std::vector<std::pair<GI, int> >& ownerVec, std::set<GI>& overlapSet) {
506 if(ownerVec.size()>0)
508 auto old=ownerVec.begin();
509 for(
auto i=old+1, end=ownerVec.end(); i != end; old=i++)
511 if(i->first==old->first)
513 std::cerr<<
"Value at indes"<<old-ownerVec.begin()<<
" is the same as at index "
514 <<i-ownerVec.begin()<<
" ["<<old->first<<
","<<old->second<<
"]==["
515 <<i->first<<
","<<i->second<<
"]"<<std::endl;
523 auto v=ownerVec.begin(), vend=ownerVec.end();
524 for(
auto s=overlapSet.begin(), send=overlapSet.end(); s!=send;)
526 while(v!=vend && v->first<*s) ++v;
527 if(v!=vend && v->first==*s) {
532 overlapSet.erase(tmp);
552 template<
class OwnerSet,
class Graph,
class IS,
class GI>
553 void getNeighbor(
const Graph& g, std::vector<int>& part,
554 typename Graph::VertexDescriptor vtx,
const IS& indexSet,
555 int toPe, std::set<GI>& neighbor, std::set<int>& neighborProcs) {
556 for(
auto edge=g.beginEdges(vtx), end=g.endEdges(vtx); edge!=end; ++edge)
558 const typename IS::IndexPair* pindex = indexSet.pair(edge.target());
560 if(part[pindex->local()]!=toPe || !OwnerSet::contains(pindex->local().attribute()))
563 neighbor.insert(pindex->global());
564 neighborProcs.insert(part[pindex->local()]);
569 template<
class T,
class I>
570 void my_push_back(std::vector<T>& ownerVec,
const I& index, [[maybe_unused]]
int proc)
572 ownerVec.push_back(index);
575 template<
class T,
class I>
576 void my_push_back(std::vector<std::pair<T,int> >& ownerVec,
const I& index,
int proc)
578 ownerVec.push_back(std::make_pair(index,proc));
581 void reserve(std::vector<T>&, RedistributeInterface&,
int)
584 void reserve(std::vector<std::pair<T,int> >& ownerVec, RedistributeInterface& redist,
int proc)
586 redist.reserveSpaceForReceiveInterface(proc, ownerVec.size());
607 template<
class OwnerSet,
class G,
class IS,
class T,
class GI>
608 void getOwnerOverlapVec(
const G& graph, std::vector<int>& part, IS& indexSet,
609 [[maybe_unused]]
int myPe,
int toPe, std::vector<T>& ownerVec, std::set<GI>& overlapSet,
610 RedistributeInterface& redist, std::set<int>& neighborProcs) {
611 for(
auto index = indexSet.begin(); index != indexSet.end(); ++index) {
613 if(OwnerSet::contains(index->local().attribute()))
615 if(part[index->local()]==toPe)
617 getNeighbor<OwnerSet>(graph, part, index->local(), indexSet,
618 toPe, overlapSet, neighborProcs);
619 my_push_back(ownerVec, index->global(), toPe);
623 reserve(ownerVec, redist, toPe);
634 template<
class F,
class IS>
635 inline bool isOwner(IS& indexSet,
int index) {
637 const typename IS::IndexPair* pindex=indexSet.pair(index);
640 return F::contains(pindex->local().attribute());
644 class BaseEdgeFunctor
647 BaseEdgeFunctor(
Metis::idx_t* adj,
const ParmetisDuneIndexMap& data)
648 : i_(), adj_(adj), data_(data)
652 void operator()(
const T& edge)
656 adj_[i_] = data_.toParmetis(edge.target());
667 const ParmetisDuneIndexMap& data_;
672 :
public BaseEdgeFunctor
674 EdgeFunctor(
Metis::idx_t* adj,
const ParmetisDuneIndexMap& data, std::size_t)
675 : BaseEdgeFunctor(adj, data)
685 template<
class G,
class V,
class E,
class VM,
class EM>
686 class EdgeFunctor<
Dune::Amg::PropertiesGraph<G,V,E,VM,EM> >
687 :
public BaseEdgeFunctor
690 EdgeFunctor(
Metis::idx_t* adj,
const ParmetisDuneIndexMap& data, std::size_t s)
691 : BaseEdgeFunctor(adj, data)
697 void operator()(
const T& edge)
699 weight_[index()]=edge.properties().depends() ? 3 : 1;
700 BaseEdgeFunctor::operator()(edge);
731 template<
class F,
class G,
class IS,
class EW>
732 void getAdjArrays(G& graph, IS& indexSet,
Metis::idx_t *xadj,
736 auto vend = graph.end();
738 for(
auto vertex = graph.begin(); vertex != vend; ++vertex) {
739 if (isOwner<F>(indexSet,*vertex)) {
741 auto eend = vertex.end();
742 xadj[j] = ew.index();
744 for(
auto edge = vertex.begin(); edge != eend; ++edge) {
749 xadj[j] = ew.index();
753 template<
class G,
class T1,
class T2>
757 RedistributeInterface& redistInf,
760 #ifndef METIS_VER_MAJOR
765 Metis::idx_t *adjwgt,
int *wgtflag,
int *numflag,
int *nparts,
769 Metis::idx_t *adjwgt,
int *wgtflag,
int *numflag,
int *nparts,
775 template<
class S,
class T>
778 for(T *cur=array, *end=array+l; cur!=end; ++cur)
782 template<
class S,
class T>
783 inline bool isValidGraph(std::size_t noVtx, std::size_t gnoVtx, S noEdges, T* xadj,
784 T* adjncy,
bool checkSymmetry)
790 if(
static_cast<S
>(xadj[vtx])>noEdges || signbit(xadj[vtx])) {
791 std::cerr <<
"Check graph: xadj["<<vtx<<
"]="<<xadj[vtx]<<
" (>"
792 <<noEdges<<
") out of range!"<<std::endl;
795 if(
static_cast<S
>(xadj[vtx+1])>noEdges || signbit(xadj[vtx+1])) {
796 std::cerr <<
"Check graph: xadj["<<vtx+1<<
"]="<<xadj[vtx+1]<<
" (>"
797 <<noEdges<<
") out of range!"<<std::endl;
802 if(signbit(adjncy[i]) || ((std::size_t)adjncy[i])>gnoVtx) {
803 std::cerr<<
" Edge "<<adjncy[i]<<
" out of range ["<<0<<
","<<noVtx<<
")"
813 for(
Metis::idx_t j=xadj[target]; j< xadj[target+1]; ++j)
817 std::cerr<<
"Edge ("<<target<<
","<<vtx<<
") "<<i<<
" time"<<std::endl;
826 template<
class M,
class T1,
class T2>
834 std::cout<<
"Repartitioning from "<<oocomm.
communicator().size()
835 <<
" to "<<nparts<<
" parts"<<std::endl;
839 int* part =
new int[1];
880 xadj[1]=noNeighbours;
923 if(n->first != rank) {
933 noNeighbours, xadj, adjncy,
false));
942 for(
int i=0; i<nparts; ++i)
943 tpwgts[i]=1.0/nparts;
946 Dune::dinfo<<rank<<
" vtxdist: ";
948 Dune::dinfo<<std::endl<<rank<<
" xadj: ";
950 Dune::dinfo<<std::endl<<rank<<
" adjncy: ";
954 Dune::dinfo<<std::endl<<rank<<
" vwgt: ";
956 Dune::dinfo<<std::endl<<rank<<
" adwgt: ";
959 Dune::dinfo<<std::endl;
962 std::cout<<
"Creating comm graph took "<<time.elapsed()<<std::endl;
965 #ifdef PARALLEL_PARTITION
968 int options[5] ={ 0,1,15,0,0};
973 ParMETIS_V3_PartKway(vtxdist, xadj, adjncy,
974 vwgt, adjwgt, &wgtflag,
975 &numflag, &ncon, &nparts, tpwgts, &ubvec, options, &edgecut, part,
978 std::cout<<
"ParMETIS took "<<time.elapsed()<<std::endl;
982 std::size_t gnoedges=0;
985 Dune::dverb<<
"noNeighbours: "<<noNeighbours<<std::endl;
987 MPI_Allgather(&noNeighbours,1,MPI_INT,noedges,1, MPI_INT,oocomm.
communicator());
990 std::cout<<
"Gathering noedges took "<<time1.elapsed()<<std::endl;
1005 std::size_t localNoVtx=vtxdist[rank+1]-vtxdist[rank];
1010 Dune::dinfo<<
"noedges: ";
1012 Dune::dinfo<<std::endl;
1020 noxs[i]=vtxdist[i+1]-vtxdist[i]+1;
1021 novs[i]=vtxdist[i+1]-vtxdist[i];
1026 for(
int *xcurr = xdispl, *vcurr = vdispl, *end=vdispl+oocomm.
communicator().size();
1027 vcurr!=end; ++vcurr, ++xcurr, ++so, ++offset) {
1029 *xcurr = offset + *so;
1035 for(
int *curr=noedges, *end=noedges+oocomm.
communicator().size()-1;
1036 curr!=end; ++curr) {
1041 Dune::dinfo<<
"displ: ";
1043 Dune::dinfo<<std::endl;
1047 for(
int *curr=noedges, *end=noedges+oocomm.
communicator().size();
1052 Dune::dinfo<<
"gxadjlen: "<<gxadjlen<<
" noVertices: "<<noVertices
1053 <<
" gnoedges: "<<gnoedges<<std::endl;
1064 std::cout<<
"Preparing global graph took "<<time1.elapsed()<<std::endl;
1068 MPI_Allgatherv(xadj,2,MPITraits<Metis::idx_t>::getType(),
1069 gxadj,noxs,xdispl,MPITraits<Metis::idx_t>::getType(),
1071 MPI_Allgatherv(adjncy,noNeighbours,MPITraits<Metis::idx_t>::getType(),
1072 gadjncy,noedges,displ,MPITraits<Metis::idx_t>::getType(),
1075 MPI_Allgatherv(adjwgt,noNeighbours,MPITraits<Metis::idx_t>::getType(),
1076 gadjwgt,noedges,displ,MPITraits<Metis::idx_t>::getType(),
1078 MPI_Allgatherv(vwgt,localNoVtx,MPITraits<Metis::idx_t>::getType(),
1079 gvwgt,novs,vdispl,MPITraits<Metis::idx_t>::getType(),
1083 std::cout<<
"Gathering global graph data took "<<time1.elapsed()<<std::endl;
1097 int lprev = vtxdist[i]-vtxdist[i-1];
1098 int l = vtxdist[i+1]-vtxdist[i];
1100 assert((start+l+offset)-gxadj<=
static_cast<Metis::idx_t>(gxadjlen));
1101 increment = *(start-1);
1102 std::transform(start+offset, start+l+offset, start, std::bind(std::plus<Metis::idx_t>(), std::placeholders::_1, increment));
1104 Dune::dinfo<<std::endl<<
"shifted xadj:";
1106 Dune::dinfo<<std::endl<<
" gadjncy: ";
1109 Dune::dinfo<<std::endl<<
" gvwgt: ";
1111 Dune::dinfo<<std::endl<<
"adjwgt: ";
1113 Dune::dinfo<<std::endl;
1117 std::cout<<
"Postprocesing global graph data took "<<time1.elapsed()<<std::endl;
1121 gxadj, gadjncy,
true));
1125 std::cout<<
"Creating grah one 1 process took "<<time.elapsed()<<std::endl;
1127 #if METIS_VER_MAJOR >= 5
1130 METIS_SetDefaultOptions(moptions);
1131 moptions[METIS_OPTION_NUMBERING] = numflag;
1132 METIS_PartGraphRecursive(&noVertices, &ncon, gxadj, gadjncy, gvwgt, NULL, gadjwgt,
1133 &nparts, NULL, NULL, moptions, &edgecut, gpart);
1135 int options[5] = {0, 1, 1, 3, 3};
1137 METIS_PartGraphRecursive(&noVertices, gxadj, gadjncy, gvwgt, gadjwgt, &wgtflag,
1138 &numflag, &nparts, options, &edgecut, gpart);
1142 std::cout<<
"METIS took "<<time.elapsed()<<std::endl;
1145 Dune::dinfo<<std::endl<<
"part:";
1156 MPI_Scatter(gpart, 1, MPITraits<Metis::idx_t>::getType(), part, 1,
1157 MPITraits<Metis::idx_t>::getType(), 0, comm);
1181 Dune::dinfo<<
" repart "<<rank <<
" -> "<< part[0]<<std::endl;
1183 std::vector<int> realpart(
mat.N(), part[0]);
1189 std::cout<<
"Scattering repartitioning took "<<time.elapsed()<<std::endl;
1197 std::cout<<
"Filling index set took "<<time.elapsed()<<std::endl;
1205 std::cout<<
"Average no neighbours was "<<noNeighbours<<std::endl;
1210 std::cout<<
"Building index sets took "<<time.elapsed()<<std::endl;
1232 template<
class G,
class T1,
class T2>
1245 std::cout<<
"Filling holes took "<<time.elapsed()<<std::endl;
1252 double t1=0.0, t2=0.0, t3=0.0, t4=0.0, tSum=0.0;
1280 typedef typename OOComm::OwnerSet OwnerSet;
1285 ParmetisDuneIndexMap indexMap(graph,oocomm);
1287 for(std::size_t i=0; i < indexMap.numOfOwnVtx(); ++i)
1292 std::cerr<<
"ParMETIS not activated. Will repartition to 1 domain instead of requested "
1293 <<nparts<<
" domains."<<std::endl;
1302 EdgeFunctor<G> ef(adjncy, indexMap, graph.noEdges());
1303 getAdjArrays<OwnerSet>(graph, oocomm.
globalLookup(), xadj, ef);
1309 Metis::idx_t numflag=0, wgtflag=0, options[3], edgecut=0, ncon=1;
1312 for(
int i=0; i<nparts; ++i)
1313 tpwgts[i]=1.0/nparts;
1322 wgtflag = (ef.getWeights()!=NULL) ? 1 : 0;
1330 std::cout<<std::endl;
1331 std::cout<<
"Testing ParMETIS_V3_PartKway with options[1-2] = {"
1332 <<options[1]<<
" "<<options[2]<<
"}, Ncon: "
1333 <<ncon<<
", Nparts: "<<nparts<<std::endl;
1346 std::cout<<
"Preparing for parmetis took "<<time.elapsed()<<std::endl;
1353 ParMETIS_V3_PartKway(indexMap.vtxDist(), xadj, adjncy,
1354 NULL, ef.getWeights(), &wgtflag,
1355 &numflag, &ncon, &nparts, tpwgts, ubvec, options, &edgecut, part, &
const_cast<MPI_Comm&
>(comm));
1366 std::cout<<std::endl;
1367 std::cout<<
"ParMETIS_V3_PartKway reported a cut of "<<edgecut<<std::endl;
1368 std::cout<<std::endl;
1370 std::cout<<mype<<
": PARMETIS-Result: ";
1371 for(
int i=0; i < indexMap.vtxDist()[mype+1]-indexMap.vtxDist()[mype]; ++i) {
1372 std::cout<<part[i]<<
" ";
1374 std::cout<<std::endl;
1375 std::cout<<
"Testing ParMETIS_V3_PartKway with options[1-2] = {"
1376 <<options[1]<<
" "<<options[2]<<
"}, Ncon: "
1377 <<ncon<<
", Nparts: "<<nparts<<std::endl;
1390 std::cout<<
"Parmetis took "<<time.elapsed()<<std::endl;
1397 for(std::size_t i=0; i<indexMap.numOfOwnVtx(); ++i)
1407 std::vector<int> domainMapping(nparts);
1409 getDomain(comm, part, indexMap.numOfOwnVtx(), nparts, &myDomain, domainMapping);
1414 std::cout<<mype<<
": myDomain: "<<myDomain<<std::endl;
1415 std::cout<<mype<<
": DomainMapping: ";
1416 for(
auto j : range(nparts)) {
1417 std::cout<<
" do: "<<j<<
" pe: "<<domainMapping[j]<<
" ";
1419 std::cout<<std::endl;
1426 std::vector<int> setPartition(oocomm.
indexSet().size(), -1);
1429 for(
auto index = oocomm.
indexSet().begin(); index != oocomm.
indexSet().end(); ++index)
1430 if(OwnerSet::contains(index->local().attribute())) {
1431 setPartition[index->local()]=domainMapping[part[i++]];
1438 if (SolverCategory::category(oocomm) ==
1439 static_cast<int>(SolverCategory::nonoverlapping))
1446 std::cout<<
"Creating indexsets took "<<time.elapsed()<<std::endl;
1453 template<
class G,
class T1,
class T2>
1461 typedef typename OOComm::OwnerSet OwnerSet;
1495 std::set<int> recvFrom;
1504 std::set<int> tsendTo;
1505 for(
auto i=setPartition.begin(), iend = setPartition.end(); i!=iend; ++i)
1508 noSendTo = tsendTo.size();
1509 sendTo =
new int[noSendTo];
1511 for(
auto i=tsendTo.begin(); i != tsendTo.end(); ++i, ++idx)
1517 int* gsendToDispl =
new int[oocomm.
communicator().size()+1];
1519 MPI_Allgather(&noSendTo, 1, MPI_INT, gnoSend, 1,
1523 int totalNoRecv = 0;
1524 for(
int i=0; i<npes; ++i)
1525 totalNoRecv += gnoSend[i];
1527 int *gsendTo =
new int[totalNoRecv];
1531 for(
int i=0; i<npes; ++i)
1532 gsendToDispl[i+1]=gsendToDispl[i]+gnoSend[i];
1535 MPI_Allgatherv(sendTo, noSendTo, MPI_INT, gsendTo, gnoSend, gsendToDispl,
1539 for(
int proc=0; proc < npes; ++proc)
1540 for(
int i=gsendToDispl[proc]; i < gsendToDispl[proc+1]; ++i)
1541 if(gsendTo[i]==mype)
1542 recvFrom.insert(proc);
1544 bool existentOnNextLevel = recvFrom.size()>0;
1548 delete[] gsendToDispl;
1553 if(recvFrom.size()) {
1554 std::cout<<mype<<
": recvFrom: ";
1555 for(
auto i=recvFrom.begin(); i!= recvFrom.end(); ++i) {
1560 std::cout<<std::endl<<std::endl;
1561 std::cout<<mype<<
": sendTo: ";
1562 for(
int i=0; i<noSendTo; i++) {
1563 std::cout<<sendTo[i]<<
" ";
1565 std::cout<<std::endl<<std::endl;
1570 std::cout<<
" Communicating the receive information took "<<
1571 time.elapsed()<<std::endl;
1585 typedef typename OOComm::ParallelIndexSet::GlobalIndex GI;
1586 typedef std::vector<GI> GlobalVector;
1587 std::vector<std::pair<GI,int> > myOwnerVec;
1588 std::set<GI> myOverlapSet;
1589 GlobalVector sendOwnerVec;
1590 std::set<GI> sendOverlapSet;
1591 std::set<int> myNeighbors;
1596 char **sendBuffers=
new char*[noSendTo];
1597 MPI_Request *requests =
new MPI_Request[noSendTo];
1600 for(
int i=0; i < noSendTo; ++i) {
1602 sendOwnerVec.clear();
1603 sendOverlapSet.clear();
1606 std::set<int> neighbors;
1607 getOwnerOverlapVec<OwnerSet>(graph, setPartition, oocomm.
globalLookup(),
1608 mype, sendTo[i], sendOwnerVec, sendOverlapSet, redistInf,
1614 MPI_Pack_size(1, MPITraits<std::size_t>::getType(), oocomm.
communicator(), &buffersize);
1615 MPI_Pack_size(sendOwnerVec.size(), MPITraits<GI>::getType(), oocomm.
communicator(), &tsize);
1617 MPI_Pack_size(1, MPITraits<std::size_t>::getType(), oocomm.
communicator(), &tsize);
1619 MPI_Pack_size(sendOverlapSet.size(), MPITraits<GI>::getType(), oocomm.
communicator(), &tsize);
1620 buffersize += tsize;
1621 MPI_Pack_size(1, MPITraits<std::size_t>::getType(), oocomm.
communicator(), &tsize);
1622 buffersize += tsize;
1623 MPI_Pack_size(neighbors.size(), MPI_INT, oocomm.
communicator(), &tsize);
1624 buffersize += tsize;
1626 sendBuffers[i] =
new char[buffersize];
1629 std::cout<<mype<<
" sending "<<sendOwnerVec.size()<<
" owner and "<<
1630 sendOverlapSet.size()<<
" overlap to "<<sendTo[i]<<
" buffersize="<<buffersize<<std::endl;
1632 createSendBuf(sendOwnerVec, sendOverlapSet, neighbors, sendBuffers[i], buffersize, oocomm.
communicator());
1633 MPI_Issend(sendBuffers[i], buffersize, MPI_PACKED, sendTo[i], 99, oocomm.
communicator(), requests+i);
1639 std::cout<<
" Creating sends took "<<
1640 time.elapsed()<<std::endl;
1645 int noRecv = recvFrom.size();
1646 int oldbuffersize=0;
1651 MPI_Probe(MPI_ANY_SOURCE, 99, oocomm.
communicator(), &stat);
1653 MPI_Get_count(&stat, MPI_PACKED, &buffersize);
1655 if(oldbuffersize<buffersize) {
1658 recvBuf =
new char[buffersize];
1659 oldbuffersize = buffersize;
1661 MPI_Recv(recvBuf, buffersize, MPI_PACKED, stat.MPI_SOURCE, 99, oocomm.
communicator(), &stat);
1662 saveRecvBuf(recvBuf, buffersize, myOwnerVec, myOverlapSet, myNeighbors, redistInf,
1672 MPI_Status *statuses =
new MPI_Status[noSendTo];
1673 int send = MPI_Waitall(noSendTo, requests, statuses);
1676 if(send==MPI_ERR_IN_STATUS) {
1677 std::cerr<<mype<<
": Error in sending :"<<std::endl;
1679 for(
int i=0; i< noSendTo; i++)
1680 if(statuses[i].MPI_ERROR!=MPI_SUCCESS) {
1683 MPI_Error_string(statuses[i].MPI_ERROR, message, &messageLength);
1684 std::cerr<<
" source="<<statuses[i].MPI_SOURCE<<
" message: ";
1685 for(
int j = 0; j < messageLength; j++)
1686 std::cout<<message[j];
1688 std::cerr<<std::endl;
1694 std::cout<<
" Receiving and saving took "<<
1695 time.elapsed()<<std::endl;
1699 for(
int i=0; i < noSendTo; ++i)
1700 delete[] sendBuffers[i];
1702 delete[] sendBuffers;
1717 if (!existentOnNextLevel) {
1719 color= MPI_UNDEFINED;
1721 MPI_Comm outputComm;
1724 outcomm = std::make_shared<OOComm>(outputComm,SolverCategory::category(oocomm),
true);
1727 int newrank=outcomm->communicator().rank();
1729 std::vector<int> tneighbors;
1730 tneighbors.reserve(myNeighbors.size());
1732 typename OOComm::ParallelIndexSet& outputIndexSet = outcomm->indexSet();
1734 MPI_Allgather(&newrank, 1, MPI_INT, newranks, 1,
1739 for(
auto i=myNeighbors.begin(), end=myNeighbors.end();
1741 assert(newranks[*i]>=0);
1742 std::cout<<*i<<
"->"<<newranks[*i]<<
" ";
1743 tneighbors.push_back(newranks[*i]);
1745 std::cout<<std::endl;
1747 for(
auto i=myNeighbors.begin(), end=myNeighbors.end();
1749 tneighbors.push_back(newranks[*i]);
1753 myNeighbors.clear();
1758 std::cout<<
" Calculating new neighbours ("<<tneighbors.size()<<
") took "<<
1759 time.elapsed()<<std::endl;
1764 outputIndexSet.beginResize();
1767 std::sort(myOwnerVec.begin(), myOwnerVec.end(), SortFirst());
1772 using LocalIndexT =
typename OOComm::ParallelIndexSet::LocalIndex;
1773 for(
auto g=myOwnerVec.begin(), end =myOwnerVec.end(); g!=end; ++g, ++i ) {
1774 outputIndexSet.add(g->first,LocalIndexT(i, OwnerOverlapCopyAttributeSet::owner,
true));
1781 std::cout<<
" Adding owner indices took "<<
1782 time.elapsed()<<std::endl;
1791 mergeVec(myOwnerVec, myOverlapSet);
1795 myOwnerVec.swap(myOwnerVec);
1800 std::cout<<
" Merging indices took "<<
1801 time.elapsed()<<std::endl;
1807 for(
auto g=myOverlapSet.begin(), end=myOverlapSet.end(); g!=end; ++g, i++) {
1808 outputIndexSet.add(*g,LocalIndexT(i, OwnerOverlapCopyAttributeSet::copy,
true));
1810 myOverlapSet.clear();
1811 outputIndexSet.endResize();
1813 #ifdef DUNE_ISTL_WITH_CHECKING
1815 auto end = outputIndexSet.end();
1816 for(
auto index = outputIndexSet.begin(); index != end; ++index) {
1817 if (OwnerSet::contains(index->local().attribute())) {
1828 std::is_sorted(outputIndexSet.begin(), outputIndexSet.end(),
1829 [](
const auto& v1,
const auto& v2){ return v1.global() < v2.global();});
1834 std::cout<<
" Adding overlap indices took "<<
1835 time.elapsed()<<std::endl;
1840 if(color != MPI_UNDEFINED) {
1841 outcomm->remoteIndices().setNeighbours(tneighbors);
1842 outcomm->remoteIndices().template rebuild<true>();
1852 std::cout<<
" Storing indexsets took "<<
1853 time.elapsed()<<std::endl;
1859 tSum = t1 + t2 + t3 + t4;
1860 std::cout<<std::endl
1861 <<mype<<
": WTime for step 1): "<<t1
1869 return color!=MPI_UNDEFINED;
1873 template<
class G,
class P,
class T1,
class T2,
class R>
1875 std::shared_ptr<P>& outcomm,
1879 if(nparts!=oocomm.size())
1880 DUNE_THROW(NotImplemented,
"only available for MPI programs");
1884 template<
class G,
class P,
class T1,
class T2,
class R>
1886 std::shared_ptr<P>& outcomm,
1890 if(nparts!=oocomm.size())
1891 DUNE_THROW(NotImplemented,
"only available for MPI programs");
Classes providing communication interfaces for overlapping Schwarz methods.
Provides classes for building the matrix graph.
int globalOwnerVertices
Definition: repartition.hh:173
Matrix & mat
Definition: matrixmatrix.hh:345
Definition: allocator.hh:9
void fillIndexSetHoles(const G &graph, Dune::OwnerOverlapCopyCommunication< T1, T2 > &oocomm)
Fills the holes in an index set.
Definition: repartition.hh:81
bool graphRepartition(const G &graph, Dune::OwnerOverlapCopyCommunication< T1, T2 > &oocomm, Metis::idx_t nparts, std::shared_ptr< Dune::OwnerOverlapCopyCommunication< T1, T2 >> &outcomm, RedistributeInterface &redistInf, bool verbose=false)
execute a graph repartition for a giving graph and indexset.
Definition: repartition.hh:1233
void print_carray(S &os, T *array, std::size_t l)
Definition: repartition.hh:776
bool isValidGraph(std::size_t noVtx, std::size_t gnoVtx, S noEdges, T *xadj, T *adjncy, bool checkSymmetry)
Definition: repartition.hh:783
bool commGraphRepartition(const M &mat, Dune::OwnerOverlapCopyCommunication< T1, T2 > &oocomm, Metis::idx_t nparts, std::shared_ptr< Dune::OwnerOverlapCopyCommunication< T1, T2 >> &outcomm, RedistributeInterface &redistInf, bool verbose=false)
Definition: repartition.hh:827
bool buildCommunication(const G &graph, std::vector< int > &realparts, Dune::OwnerOverlapCopyCommunication< T1, T2 > &oocomm, std::shared_ptr< Dune::OwnerOverlapCopyCommunication< T1, T2 >> &outcomm, RedistributeInterface &redistInf, bool verbose=false)
Definition: repartition.hh:1454
float real_t
Definition: repartition.hh:51
std::size_t idx_t
Definition: repartition.hh:61
@ owner
Definition: owneroverlapcopy.hh:59
A class setting up standard communication for a two-valued attribute set with owner/overlap/copy sema...
Definition: owneroverlapcopy.hh:172
const CollectiveCommunication< MPI_Comm > & communicator() const
Definition: owneroverlapcopy.hh:297
const ParallelIndexSet & indexSet() const
Get the underlying parallel index set.
Definition: owneroverlapcopy.hh:460
void copyCopyToAll(const T &source, T &dest) const
Communicate values from copy data points to all other data points.
Definition: owneroverlapcopy.hh:326
Dune::GlobalLookupIndexSet< ParallelIndexSet > GlobalLookupIndexSet
The type of the reverse lookup of indices.
Definition: owneroverlapcopy.hh:454
void buildGlobalLookup()
Definition: owneroverlapcopy.hh:493
const RemoteIndices & remoteIndices() const
Get the underlying remote indices.
Definition: owneroverlapcopy.hh:469
void copyOwnerToAll(const T &source, T &dest) const
Communicate values from owner data points to all other data points.
Definition: owneroverlapcopy.hh:309
void freeGlobalLookup()
Definition: owneroverlapcopy.hh:518
Dune::ParallelIndexSet< GlobalIdType, LI, 512 > ParallelIndexSet
The type of the parallel index set.
Definition: owneroverlapcopy.hh:447
const GlobalLookupIndexSet & globalLookup() const
Definition: owneroverlapcopy.hh:524
The (undirected) graph of a matrix.
Definition: graph.hh:49
Definition: repartition.hh:258
void reserveSpaceForReceiveInterface(int proc, int size)
Definition: repartition.hh:282
void buildReceiveInterface(std::vector< std::pair< TG, int > > &indices)
Definition: repartition.hh:291
~RedistributeInterface()
Definition: repartition.hh:299
void setCommunicator(MPI_Comm comm)
Definition: repartition.hh:259
void buildSendInterface(const std::vector< int > &toPart, const IS &idxset)
Definition: repartition.hh:264
void addReceiveIndex(int proc, std::size_t idx)
Definition: repartition.hh:286