CpGridData.hpp
1 //===========================================================================
2 //
3 // File: CpGrid.hpp
4 //
5 // Created: Sep 17 21:11:41 2013
6 //
7 // Author(s): Atgeirr F Rasmussen <atgeirr@sintef.no>
8 // Bård Skaflestad <bard.skaflestad@sintef.no>
9 // Markus Blatt <markus@dr-blatt.de>
10 //
11 // Comment: Major parts of this file originated in dune/grid/CpGrid.hpp
12 // and got transfered here during refactoring for the parallelization.
13 //
14 // $Date$
15 //
16 // $Revision$
17 //
18 //===========================================================================
19 
20 /*
21  Copyright 2009, 2010 SINTEF ICT, Applied Mathematics.
22  Copyright 2009, 2010, 2013 Statoil ASA.
23  Copyright 2013 Dr. Blatt - HPC-Simulation-Software & Services
24 
25  This file is part of The Open Porous Media project (OPM).
26 
27  OPM is free software: you can redistribute it and/or modify
28  it under the terms of the GNU General Public License as published by
29  the Free Software Foundation, either version 3 of the License, or
30  (at your option) any later version.
31 
32  OPM is distributed in the hope that it will be useful,
33  but WITHOUT ANY WARRANTY; without even the implied warranty of
34  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
35  GNU General Public License for more details.
36 
37  You should have received a copy of the GNU General Public License
38  along with OPM. If not, see <http://www.gnu.org/licenses/>.
39 */
47 #ifndef OPM_CPGRIDDATA_HEADER
48 #define OPM_CPGRIDDATA_HEADER
49 
50 // Warning suppression for Dune includes.
51 #include <opm/grid/utility/platform_dependent/disable_warnings.h>
52 
53 #include <dune/common/version.hh>
54 #include <dune/common/parallel/mpihelper.hh>
55 #ifdef HAVE_DUNE_ISTL
56 #include <dune/istl/owneroverlapcopy.hh>
57 #endif
58 
59 #include <dune/common/parallel/collectivecommunication.hh>
60 #include <dune/common/parallel/indexset.hh>
61 #include <dune/common/parallel/interface.hh>
62 #include <dune/common/parallel/plocalindex.hh>
63 #include <dune/common/parallel/variablesizecommunicator.hh>
64 #include <dune/grid/common/gridenums.hh>
65 
66 #include <opm/grid/utility/platform_dependent/reenable_warnings.h>
67 
68 
69 #include <array>
70 #include <tuple>
71 
72 #include "OrientedEntityTable.hpp"
73 #include "DefaultGeometryPolicy.hpp"
75 
76 #include <opm/grid/utility/OpmParserIncludes.hpp>
77 
78 #include "Entity2IndexDataHandle.hpp"
79 #include "GlobalIdMapping.hpp"
80 
81 namespace Dune
82 {
83 class CpGrid;
84 
85 namespace cpgrid
86 {
87 
88 class IndexSet;
89 class IdSet;
90 class GlobalIdSet;
91 class PartitionTypeIndicator;
92 template<int,int> class Geometry;
93 template<int> class Entity;
94 template<int> class EntityRep;
95 
96 namespace mover
97 {
98 template<class T, int i> struct Mover;
99 }
100 
106 {
107  template<class T, int i> friend struct mover::Mover;
108 
109 private:
110  CpGridData(const CpGridData& g);
111 
112 public:
115  explicit CpGridData(CpGrid& grid);
116 #if HAVE_MPI
117  CpGridData(MPI_Comm comm);
121 #endif
122  CpGridData();
125  ~CpGridData();
127  int size(int codim) const;
128 
130  int size (GeometryType type) const
131  {
132  if (type.isCube()) {
133  return size(3 - type.dim());
134  } else {
135  return 0;
136  }
137  }
141  void readSintefLegacyFormat(const std::string& grid_prefix);
142 
146  void writeSintefLegacyFormat(const std::string& grid_prefix) const;
147 
153  void readEclipseFormat(const std::string& filename, bool periodic_extension, bool turn_normals = false);
154 
155 #if HAVE_OPM_PARSER
156  void processEclipseFormat(const Opm::Deck& deck, bool periodic_extension, bool turn_normals = false, bool clip_z = false,
165  const std::vector<double>& poreVolume = std::vector<double>());
166 
175  void processEclipseFormat(const Opm::EclipseGrid& ecl_grid, bool periodic_extension, bool turn_normals = false, bool clip_z = false,
176  const std::vector<double>& poreVolume = std::vector<double>());
177 #endif
178 
184  void processEclipseFormat(const grdecl& input_data, double z_tolerance, bool remove_ij_boundary, bool turn_normals = false);
185 
186 
194  void getIJK(int c, std::array<int,3>& ijk) const
195  {
196  int gc = global_cell_[c];
197  ijk[0] = gc % logical_cartesian_size_[0]; gc /= logical_cartesian_size_[0];
198  ijk[1] = gc % logical_cartesian_size_[1];
199  ijk[2] = gc / logical_cartesian_size_[1];
200  }
201 
202  // Make unique boundary ids for all intersections.
203  void computeUniqueBoundaryIds();
204 
208  bool uniqueBoundaryIds() const
209  {
210  return use_unique_boundary_ids_;
211  }
212 
215  void setUniqueBoundaryIds(bool uids)
216  {
217  use_unique_boundary_ids_ = uids;
218  if (use_unique_boundary_ids_ && unique_boundary_ids_.empty()) {
219  computeUniqueBoundaryIds();
220  }
221  }
222 
226  const std::vector<double>& zcornData() const {
227  return zcorn;
228  }
229 
230 
233  const IndexSet& indexSet() const
234  {
235  return *index_set_;
236  }
240  const std::array<int, 3>& logicalCartesianSize() const
241  {
242  return logical_cartesian_size_;
243  }
244 
248  void distributeGlobalGrid(const CpGrid& grid,
249  const CpGridData& view_data,
250  const std::vector<int>& cell_part,
251  int overlap_layers);
252 
258  template<class DataHandle>
259  void communicate(DataHandle& data, InterfaceType iftype, CommunicationDirection dir);
260 
261 private:
262 
263 #if HAVE_MPI
264 
270  template<class DataHandle>
271  void gatherData(DataHandle& data, CpGridData* global_view,
272  CpGridData* distributed_view);
273 
274 
281  template<int codim, class DataHandle>
282  void gatherCodimData(DataHandle& data, CpGridData* global_data,
283  CpGridData* distributed_data);
284 
291  template<class DataHandle>
292  void scatterData(DataHandle& data, CpGridData* global_data,
293  CpGridData* distributed_data);
294 
302  template<int codim, class DataHandle>
303  void scatterCodimData(DataHandle& data, CpGridData* global_data,
304  CpGridData* distributed_data);
305 
307  typedef VariableSizeCommunicator<>::InterfaceMap InterfaceMap;
308 
317  template<int codim, class DataHandle>
318  void communicateCodim(DataHandle& data, CommunicationDirection dir,
319  const Interface& interface);
320 
329  template<int codim, class DataHandle>
330  void communicateCodim(DataHandle& data, CommunicationDirection dir,
331  const InterfaceMap& interface);
332 #endif
333  // Representing the topology
335  cpgrid::OrientedEntityTable<0, 1> cell_to_face_;
343  cpgrid::OrientedEntityTable<1, 0> face_to_cell_;
345  Opm::SparseTable<int> face_to_point_;
347  std::vector< array<int,8> > cell_to_point_;
354  std::array<int, 3> logical_cartesian_size_;
361  std::vector<int> global_cell_;
363  cpgrid::EntityVariable<enum face_tag, 1> face_tag_; // {LEFT, BACK, TOP}
367  typedef FieldVector<double, 3> PointType;
371  std::vector<PointType> allcorners_; // Yes, this is already stored in the point geometries. \TODO Improve by removing it.
373  cpgrid::EntityVariable<int, 1> unique_boundary_ids_;
375  cpgrid::IndexSet* index_set_;
377  const cpgrid::IdSet* local_id_set_;
379  GlobalIdSet* global_id_set_;
381  PartitionTypeIndicator* partition_type_indicator_;
382 
384  typedef MPIHelper::MPICommunicator MPICommunicator;
385  typedef Dune::CollectiveCommunication<MPICommunicator> CollectiveCommunication;
387  CollectiveCommunication ccobj_;
388 
389  // Boundary information (optional).
390  bool use_unique_boundary_ids_;
391 
397  std::vector<double> zcorn;
398 
399 #ifdef HAVE_DUNE_ISTL
400  typedef Dune::OwnerOverlapCopyAttributeSet::AttributeSet AttributeSet;
401 #else
402  enum AttributeSet{owner, overlap, copy};
404 #endif
405 
406 #if HAVE_MPI
407 
409  typedef Dune::ParallelIndexSet<int,ParallelLocalIndex<AttributeSet>,512> ParallelIndexSet;
410 
412  ParallelIndexSet cell_indexset_;
413 
415  typedef Dune::RemoteIndices<ParallelIndexSet> RemoteIndices;
416 
418  RemoteIndices cell_remote_indices_;
419 
421  std::tuple<Interface,Interface,Interface,Interface,Interface> cell_interfaces_;
422  /*
423  // code deactivated, because users cannot access face indices and therefore
424  // communication on faces makes no sense!
426  std::tuple<InterfaceMap,InterfaceMap,InterfaceMap,InterfaceMap,InterfaceMap>
427  face_interfaces_;
428  */
430  std::tuple<InterfaceMap,InterfaceMap,InterfaceMap,InterfaceMap,InterfaceMap>
431  point_interfaces_;
433  InterfaceMap cell_gather_scatter_interface;
435  InterfaceMap point_gather_scatter_interface;
436 
437 #endif
438 
439  // Return the geometry vector corresponding to the given codim.
440  template <int codim>
441  const EntityVariable<Geometry<3 - codim, 3>, codim>& geomVector() const
442  {
443  return geometry_.geomVector<codim>();
444  }
445 
446  friend class Dune::CpGrid;
447  template<int> friend class Entity;
448  template<int> friend class EntityRep;
449  template<int> friend class EntityPointer;
450  friend class Intersection;
451  friend class PartitionTypeIndicator;
452 };
453 
454 #if HAVE_MPI
455 
456 namespace
457 {
462 template<class T>
463 T& getInterface(InterfaceType iftype,
464  std::tuple<T,T,T,T,T>& interfaces)
465 {
466  switch(iftype)
467  {
468  case 0:
469  return std::get<0>(interfaces);
470  case 1:
471  return std::get<1>(interfaces);
472  case 2:
473  return std::get<2>(interfaces);
474  case 3:
475  return std::get<3>(interfaces);
476  case 4:
477  return std::get<4>(interfaces);
478  }
479  OPM_THROW(std::runtime_error, "Invalid Interface type was used during communication");
480 }
481 
482 } // end unnamed namespace
483 
484 template<int codim, class DataHandle>
485 void CpGridData::communicateCodim(DataHandle& data, CommunicationDirection dir,
486  const Interface& interface)
487 {
488  this->template communicateCodim<codim>(data, dir, interface.interfaces());
489 }
490 
491 template<int codim, class DataHandle>
492 void CpGridData::communicateCodim(DataHandle& data, CommunicationDirection dir,
493  const InterfaceMap& interface)
494 {
495  if ( interface.empty() )
496  {
497  // The communication interface is empty, do nothing.
498  // Otherwise we will produce a memory error in
499  // VariableSizeCommunicator prior to DUNE 2.4
500  return;
501  }
502  Entity2IndexDataHandle<DataHandle, codim> data_wrapper(*this, data);
503  VariableSizeCommunicator<> comm(ccobj_, interface);
504  if(dir==ForwardCommunication)
505  comm.forward(data_wrapper);
506  else
507  comm.backward(data_wrapper);
508 }
509 #endif
510 
511 template<class DataHandle>
512 void CpGridData::communicate(DataHandle& data, InterfaceType iftype,
513  CommunicationDirection dir)
514 {
515 #if HAVE_MPI
516  if(data.contains(3,0))
517  communicateCodim<0>(data, dir, getInterface(iftype, cell_interfaces_));
518  if(data.contains(3,3))
519  communicateCodim<3>(data, dir, getInterface(iftype, point_interfaces_));
520 #else
521  // Suppress warnings for unused arguments.
522  (void) data;
523  (void) iftype;
524  (void) dir;
525 #endif
526 }
527 }}
528 
529 #if HAVE_MPI
530 #include <dune/grid/cpgrid/Entity.hpp>
531 #include <dune/grid/cpgrid/Indexsets.hpp>
532 
533 namespace Dune {
534 namespace cpgrid {
535 
536 namespace mover
537 {
538 template<class T>
539 class MoveBuffer
540 {
541  friend class Dune::cpgrid::CpGridData;
542 public:
543  void read(T& data)
544  {
545  data=buffer_[index_++];
546  }
547  void write(const T& data)
548  {
549  buffer_[index_++]=data;
550  }
551  void reset()
552  {
553  index_=0;
554  }
555  void resize(std::size_t size)
556  {
557  buffer_.resize(size);
558  index_=0;
559  }
560 private:
561  std::vector<T> buffer_;
562  typename std::vector<T>::size_type index_;
563 };
564 template<class DataHandle,int codim>
565 struct Mover
566 {
567 };
568 
569 template<class DataHandle>
570 struct BaseMover
571 {
572  BaseMover(DataHandle& data)
573  : data_(data)
574  {}
575  template<class E>
576  void moveData(const E& from, const E& to)
577  {
578  std::size_t size=data_.size(from);
579  buffer.resize(size);
580  data_.gather(buffer, from);
581  buffer.reset();
582  data_.scatter(buffer, to, size);
583  }
584  DataHandle& data_;
585  MoveBuffer<typename DataHandle::DataType> buffer;
586 };
587 
588 
589 template<class DataHandle>
590 struct Mover<DataHandle,0> : public BaseMover<DataHandle>
591 {
592  Mover<DataHandle,0>(DataHandle& data, CpGridData* gatherView,
593  CpGridData* scatterView)
594  : BaseMover<DataHandle>(data), gatherView_(gatherView), scatterView_(scatterView)
595  {}
596 
597  void operator()(std::size_t from_cell_index,std::size_t to_cell_index)
598  {
599  Entity<0> from_entity=Entity<0>(*gatherView_, from_cell_index, true);
600  Entity<0> to_entity=Entity<0>(*scatterView_, to_cell_index, true);
601  this->moveData(from_entity, to_entity);
602  }
603  CpGridData* gatherView_;
604  CpGridData* scatterView_;
605 };
606 
607 template<class DataHandle>
608 struct Mover<DataHandle,1> : public BaseMover<DataHandle>
609 {
610  Mover<DataHandle,1>(DataHandle& data, CpGridData* gatherView,
611  CpGridData* scatterView)
612  : BaseMover<DataHandle>(data), gatherView_(gatherView), scatterView_(scatterView)
613  {}
614 
615  void operator()(std::size_t from_cell_index,std::size_t to_cell_index)
616  {
617  typedef typename OrientedEntityTable<0,1>::row_type row_type;
618  EntityRep<0> from_cell=EntityRep<0>(from_cell_index, true);
619  EntityRep<0> to_cell=EntityRep<0>(to_cell_index, true);
620  OrientedEntityTable<0,1>& table = gatherView_->cell_to_face_;
621  row_type from_faces=table.operator[](from_cell);
622  row_type to_faces=scatterView_->cell_to_face_[to_cell];
623 
624  for(int i=0; i<from_faces.size(); ++i)
625  this->moveData(from_faces[i], to_faces[i]);
626  }
627  CpGridData *gatherView_;
628  CpGridData *scatterView_;
629 };
630 
631 template<class DataHandle>
632 struct Mover<DataHandle,3> : public BaseMover<DataHandle>
633 {
634  Mover<DataHandle,3>(DataHandle& data, CpGridData* gatherView,
635  CpGridData* scatterView)
636  : BaseMover<DataHandle>(data), gatherView_(gatherView), scatterView_(scatterView)
637  {}
638  void operator()(std::size_t from_cell_index,std::size_t to_cell_index)
639  {
640  const std::array<int,8>& from_cell_points=
641  gatherView_->cell_to_point_[from_cell_index];
642  const std::array<int,8>& to_cell_points=
643  scatterView_->cell_to_point_[to_cell_index];
644  for(std::size_t i=0; i<8; ++i)
645  {
646  this->moveData(Entity<3>(*gatherView_, from_cell_points[i], true),
647  Entity<3>(*scatterView_, to_cell_points[i], true));
648  }
649  }
650  CpGridData* gatherView_;
651  CpGridData* scatterView_;
652 };
653 
654 } // end mover namespace
655 
656 template<class DataHandle>
657 void CpGridData::scatterData(DataHandle& data, CpGridData* global_data,
658  CpGridData* distributed_data)
659 {
660 #if HAVE_MPI
661  if(data.contains(3,0))
662  scatterCodimData<0>(data, global_data, distributed_data);
663  if(data.contains(3,3))
664  scatterCodimData<3>(data, global_data, distributed_data);
665 #endif
666 }
667 
668 template<int codim, class DataHandle>
669 void CpGridData::scatterCodimData(DataHandle& data, CpGridData* global_data,
670  CpGridData* distributed_data)
671 {
672  CpGridData *gather_view, *scatter_view;
673  gather_view=global_data;
674  scatter_view=distributed_data;
675 
676  mover::Mover<DataHandle,codim> mover(data, gather_view, scatter_view);
677 
678  typedef typename ParallelIndexSet::const_iterator Iter;
679  for(Iter index=distributed_data->cell_indexset_.begin(),
680  end = distributed_data->cell_indexset_.end();
681  index!=end; ++index)
682  {
683  std::size_t from=index->global();
684  std::size_t to=index->local();
685  mover(from,to);
686  }
687 }
688 
689 namespace
690 {
691 
692 template<int codim, class T, class F>
693 void visitInterior(CpGridData& distributed_data, T begin, T endit, F& func)
694 {
695  for(T it=begin; it!=endit; ++it)
696  {
697  Entity<codim> entity(distributed_data, it-begin, true);
698  PartitionType pt = entity.partitionType();
699  if(pt==Dune::InteriorEntity)
700  {
701  func(*it, entity);
702  }
703  }
704 }
705 
706 template<class DataHandle>
707 struct GlobalIndexSizeGatherer
708 {
709  GlobalIndexSizeGatherer(DataHandle& data_,
710  std::vector<int>& ownedGlobalIndices_,
711  std::vector<int>& ownedSizes_)
712  : data(data_), ownedGlobalIndices(ownedGlobalIndices_), ownedSizes(ownedSizes_)
713  {}
714 
715  template<class T, class E>
716  void operator()(T& i, E& entity)
717  {
718  ownedGlobalIndices.push_back(i);
719  ownedSizes.push_back(data.size(entity));
720  }
721  DataHandle& data;
722  std::vector<int>& ownedGlobalIndices;
723  std::vector<int>& ownedSizes;
724 };
725 
726 template<class DataHandle>
727 struct DataGatherer
728 {
729  DataGatherer(mover::MoveBuffer<typename DataHandle::DataType>& buffer_,
730  DataHandle& data_)
731  : buffer(buffer_), data(data_)
732  {}
733 
734  template<class T, class E>
735  void operator()(T& /* it */, E& entity)
736  {
737  data.gather(buffer, entity);
738  }
739  mover::MoveBuffer<typename DataHandle::DataType>& buffer;
740  DataHandle& data;
741 };
742 
743 }
744 
745 template<class DataHandle>
746 void CpGridData::gatherData(DataHandle& data, CpGridData* global_data,
747  CpGridData* distributed_data)
748 {
749 #if HAVE_MPI
750  if(data.contains(3,0))
751  gatherCodimData<0>(data, global_data, distributed_data);
752  if(data.contains(3,3))
753  gatherCodimData<3>(data, global_data, distributed_data);
754 #endif
755 }
756 
757 template<int codim, class DataHandle>
758 void CpGridData::gatherCodimData(DataHandle& data, CpGridData* global_data,
759  CpGridData* distributed_data)
760 {
761 #if HAVE_MPI
762  // Get the mapping to global index from the global id set
763  const std::vector<int>& mapping =
764  distributed_data->global_id_set_->getMapping<codim>();
765 
766  // Get the global indices and data size for the entities whose data is
767  // to be sent, i.e. the ones that we own.
768  std::vector<int> owned_global_indices;
769  std::vector<int> owned_sizes;
770  owned_global_indices.reserve(mapping.size());
771  owned_sizes.reserve(mapping.size());
772 
773  GlobalIndexSizeGatherer<DataHandle> gisg(data, owned_global_indices, owned_sizes);
774  visitInterior<codim>(*distributed_data, mapping.begin(), mapping.end(), gisg);
775 
776  // communicate the number of indices that each processor sends
777  int no_indices=owned_sizes.size();
778  // We will take the address of the first elemet for MPI_Allgather below.
779  // Make sure the containers have such an element.
780  if ( owned_global_indices.empty() )
781  owned_global_indices.resize(1);
782  if ( owned_sizes.empty() )
783  owned_sizes.resize(1);
784  std::vector<int> no_indices_to_recv(distributed_data->ccobj_.size());
785  distributed_data->ccobj_.allgather(&no_indices, 1, &(no_indices_to_recv[0]));
786  // compute size of the vector capable for receiving all indices
787  // and allgather the global indices and the sizes.
788  // calculate displacements
789  std::vector<int> displ(distributed_data->ccobj_.size()+1, 0);
790  std::transform(displ.begin(), displ.end()-1, no_indices_to_recv.begin(), displ.begin()+1,
791  std::plus<int>());
792  int global_size=displ[displ.size()-1];//+no_indices_to_recv[displ.size()-1];
793  std::vector<int> global_indices(global_size);
794  std::vector<int> global_sizes(global_size);
795  MPI_Allgatherv(&(owned_global_indices[0]), no_indices, MPITraits<int>::getType(),
796  &(global_indices[0]), &(no_indices_to_recv[0]), &(displ[0]),
797  MPITraits<int>::getType(),
798  distributed_data->ccobj_);
799  MPI_Allgatherv(&(owned_sizes[0]), no_indices, MPITraits<int>::getType(),
800  &(global_sizes[0]), &(no_indices_to_recv[0]), &(displ[0]),
801  MPITraits<int>::getType(),
802  distributed_data->ccobj_);
803  std::vector<int>().swap(owned_global_indices); // free data for reuse.
804  // Compute the number of data items to send
805  std::vector<int> no_data_send(distributed_data->ccobj_.size());
806  for(typename std::vector<int>::iterator begin=no_data_send.begin(),
807  i=begin, end=no_data_send.end(); i!=end; ++i)
808  *i = std::accumulate(global_sizes.begin()+displ[i-begin],
809  global_sizes.begin()+displ[i-begin+1], std::size_t());
810  // free at least some memory that can be reused.
811  std::vector<int>().swap(owned_sizes);
812  // compute the displacements for receiving with allgatherv
813  displ[0]=0;
814  std::transform(displ.begin(), displ.end()-1, no_data_send.begin(), displ.begin()+1,
815  std::plus<std::size_t>());
816  // Compute the number of data items we will receive
817  int no_data_recv = displ[displ.size()-1];//+global_sizes[displ.size()-1];
818 
819  // Collect the data to send, gather it
820  mover::MoveBuffer<typename DataHandle::DataType> local_data_buffer, global_data_buffer;
821  if ( no_data_send[distributed_data->ccobj_.rank()] )
822  {
823  local_data_buffer.resize(no_data_send[distributed_data->ccobj_.rank()]);
824  }
825  else
826  {
827  local_data_buffer.resize(1);
828  }
829  global_data_buffer.resize(no_data_recv);
830 
831  DataGatherer<DataHandle> gatherer(local_data_buffer, data);
832  visitInterior<codim>(*distributed_data, mapping.begin(), mapping.end(), gatherer);
833  MPI_Allgatherv(&(local_data_buffer.buffer_[0]), no_data_send[distributed_data->ccobj_.rank()],
834  MPITraits<typename DataHandle::DataType>::getType(),
835  &(global_data_buffer.buffer_[0]), &(no_data_send[0]), &(displ[0]),
836  MPITraits<typename DataHandle::DataType>::getType(),
837  distributed_data->ccobj_);
838  Entity2IndexDataHandle<DataHandle, codim> edata(*global_data, data);
839  int offset=0;
840  for(int i=0; i< codim; ++i)
841  offset+=global_data->size(i);
842 
843  typename std::vector<int>::const_iterator s=global_sizes.begin();
844  for(typename std::vector<int>::const_iterator i=global_indices.begin(),
845  end=global_indices.end();
846  i!=end; ++s, ++i)
847  {
848  edata.scatter(global_data_buffer, *i-offset, *s);
849  }
850 #endif
851 }
852 
853 } // end namespace cpgrid
854 } // end namespace Dune
855 
856 #endif
857 
858 #endif
const std::vector< double > & zcornData() const
Return the internalized zcorn copy from the grid processing, if no cells were adjusted during the min...
Definition: CpGridData.hpp:226
Definition: CpGridData.hpp:93
const IndexSet & indexSet() const
Get the index set.
Definition: CpGridData.hpp:233
Definition: DefaultGeometryPolicy.hpp:54
Raw corner-point specification of a particular geological model.
Definition: preprocess.h:56
const std::array< int, 3 > & logicalCartesianSize() const
The logical cartesian size of the grid.
Definition: CpGridData.hpp:240
void readSintefLegacyFormat(const std::string &grid_prefix)
Read the Sintef legacy grid format (&#39;topogeom&#39;).
Definition: readSintefLegacyFormat.cpp:66
Holds the implementation of the CpGrid as a pimple.
Definition: OpmParserIncludes.hpp:42
Definition: Indexsets.hpp:192
void readEclipseFormat(const std::string &filename, bool periodic_extension, bool turn_normals=false)
Read the Eclipse grid format (&#39;grdecl&#39;).
[ provides Dune::Grid ]
Definition: CpGrid.hpp:213
This class encapsulates geometry for both vertices, intersections and cells.
Definition: CpGridData.hpp:92
Struct that hods all the data needed to represent a Cpgrid.
Definition: CpGridData.hpp:105
void setUniqueBoundaryIds(bool uids)
Set whether we want to have unique boundary ids.
Definition: CpGridData.hpp:215
void communicate(DataHandle &data, InterfaceType iftype, CommunicationDirection dir)
communicate objects for all codims on a given level
Definition: CpGridData.hpp:512
CpGridData()
Constructor.
Definition: CpGridData.cpp:38
bool uniqueBoundaryIds() const
Is the grid currently using unique boundary ids?
Definition: CpGridData.hpp:208
Definition: PartitionTypeIndicator.hpp:49
void processEclipseFormat(const grdecl &input_data, double z_tolerance, bool remove_ij_boundary, bool turn_normals=false)
Read the Eclipse grid format (&#39;grdecl&#39;).
Definition: processEclipseFormat.cpp:208
int size(int codim) const
number of leaf entities per codim in this process
Definition: CpGridData.cpp:132
void getIJK(int c, std::array< int, 3 > &ijk) const
Extract Cartesian index triplet (i,j,k) of an active cell.
Definition: CpGridData.hpp:194
void writeSintefLegacyFormat(const std::string &grid_prefix) const
Write the Sintef legacy grid format (&#39;topogeom&#39;).
Definition: writeSintefLegacyFormat.cpp:72
Definition: Indexsets.hpp:250
int size(GeometryType type) const
number of leaf entities per geometry type in this process
Definition: CpGridData.hpp:130
Low-level corner-point processing routines and supporting data structures.
Represents an entity of a given codim, with positive or negative orientation.
Definition: CpGridData.hpp:94
Definition: Indexsets.hpp:52
Definition: CpGridData.hpp:98
void distributeGlobalGrid(const CpGrid &grid, const CpGridData &view_data, const std::vector< int > &cell_part, int overlap_layers)
Redistribute a global grid.
Definition: CpGridData.cpp:518
const EntityVariable< cpgrid::Geometry< 3 - codim, 3 >, codim > & geomVector() const
Definition: DefaultGeometryPolicy.hpp:79
~CpGridData()
Destructor.
Definition: CpGridData.cpp:94