Actual source code: dasub.c
2: /*
3: Code for manipulating distributed regular arrays in parallel.
4: */
6: #include <petsc/private/dmdaimpl.h>
8: /*@
9: DMDAGetLogicalCoordinate - Returns a the i,j,k logical coordinate for the closest mesh point to a x,y,z point in the coordinates of the DMDA
11: Collective on da
13: Input Parameters:
14: + da - the distributed array
15: . x - the first physical coordinate
16: . y - the second physical coordinate
17: - z - the third physical coordinate
19: Output Parameters:
20: + II - the first logical coordinate (-1 on processes that do not contain that point)
21: . JJ - the second logical coordinate (-1 on processes that do not contain that point)
22: . KK - the third logical coordinate (-1 on processes that do not contain that point)
23: . X - (optional) the first coordinate of the located grid point
24: . Y - (optional) the second coordinate of the located grid point
25: - Z - (optional) the third coordinate of the located grid point
27: Level: advanced
29: Notes:
30: All processors that share the DMDA must call this with the same coordinate value
32: @*/
33: PetscErrorCode DMDAGetLogicalCoordinate(DM da,PetscScalar x,PetscScalar y,PetscScalar z,PetscInt *II,PetscInt *JJ,PetscInt *KK,PetscScalar *X,PetscScalar *Y,PetscScalar *Z)
34: {
35: Vec coors;
36: DM dacoors;
37: DMDACoor2d **c;
38: PetscInt i,j,xs,xm,ys,ym;
39: PetscReal d,D = PETSC_MAX_REAL,Dv;
40: PetscMPIInt rank,root;
45: *II = -1;
46: *JJ = -1;
48: DMGetCoordinateDM(da,&dacoors);
49: DMDAGetCorners(dacoors,&xs,&ys,NULL,&xm,&ym,NULL);
50: DMGetCoordinates(da,&coors);
51: DMDAVecGetArrayRead(dacoors,coors,&c);
52: for (j=ys; j<ys+ym; j++) {
53: for (i=xs; i<xs+xm; i++) {
54: d = PetscSqrtReal(PetscRealPart((c[j][i].x - x)*(c[j][i].x - x) + (c[j][i].y - y)*(c[j][i].y - y)));
55: if (d < D) {
56: D = d;
57: *II = i;
58: *JJ = j;
59: }
60: }
61: }
62: MPIU_Allreduce(&D,&Dv,1,MPIU_REAL,MPIU_MIN,PetscObjectComm((PetscObject)da));
63: if (D != Dv) {
64: *II = -1;
65: *JJ = -1;
66: rank = 0;
67: } else {
68: *X = c[*JJ][*II].x;
69: *Y = c[*JJ][*II].y;
70: MPI_Comm_rank(PetscObjectComm((PetscObject)da),&rank);
71: rank++;
72: }
73: MPIU_Allreduce(&rank,&root,1,MPI_INT,MPI_SUM,PetscObjectComm((PetscObject)da));
74: root--;
75: MPI_Bcast(X,1,MPIU_SCALAR,root,PetscObjectComm((PetscObject)da));
76: MPI_Bcast(Y,1,MPIU_SCALAR,root,PetscObjectComm((PetscObject)da));
77: DMDAVecRestoreArrayRead(dacoors,coors,&c);
78: return 0;
79: }
81: /*@
82: DMDAGetRay - Returns a vector on process zero that contains a row or column of the values in a DMDA vector
84: Collective on DMDA
86: Input Parameters:
87: + da - the distributed array
88: . dir - Cartesian direction, either DM_X, DM_Y, or DM_Z
89: - gp - global grid point number in this direction
91: Output Parameters:
92: + newvec - the new vector that can hold the values (size zero on all processes except process 0)
93: - scatter - the VecScatter that will map from the original vector to the slice
95: Level: advanced
97: Notes:
98: All processors that share the DMDA must call this with the same gp value
100: @*/
101: PetscErrorCode DMDAGetRay(DM da,DMDirection dir,PetscInt gp,Vec *newvec,VecScatter *scatter)
102: {
103: PetscMPIInt rank;
104: DM_DA *dd = (DM_DA*)da->data;
105: IS is;
106: AO ao;
107: Vec vec;
108: PetscInt *indices,i,j;
111: MPI_Comm_rank(PetscObjectComm((PetscObject) da), &rank);
112: DMDAGetAO(da, &ao);
113: if (rank == 0) {
114: if (da->dim == 1) {
115: if (dir == DM_X) {
116: PetscMalloc1(dd->w, &indices);
117: indices[0] = dd->w*gp;
118: for (i = 1; i < dd->w; ++i) indices[i] = indices[i-1] + 1;
119: AOApplicationToPetsc(ao, dd->w, indices);
120: VecCreate(PETSC_COMM_SELF, newvec);
121: VecSetBlockSize(*newvec, dd->w);
122: VecSetSizes(*newvec, dd->w, PETSC_DETERMINE);
123: VecSetType(*newvec, VECSEQ);
124: ISCreateGeneral(PETSC_COMM_SELF, dd->w, indices, PETSC_OWN_POINTER, &is);
126: else SETERRQ(PetscObjectComm((PetscObject) da), PETSC_ERR_ARG_OUTOFRANGE, "Unknown DMDirection");
127: } else {
128: if (dir == DM_Y) {
129: PetscMalloc1(dd->w*dd->M,&indices);
130: indices[0] = gp*dd->M*dd->w;
131: for (i=1; i<dd->M*dd->w; i++) indices[i] = indices[i-1] + 1;
133: AOApplicationToPetsc(ao,dd->M*dd->w,indices);
134: VecCreate(PETSC_COMM_SELF,newvec);
135: VecSetBlockSize(*newvec,dd->w);
136: VecSetSizes(*newvec,dd->M*dd->w,PETSC_DETERMINE);
137: VecSetType(*newvec,VECSEQ);
138: ISCreateGeneral(PETSC_COMM_SELF,dd->w*dd->M,indices,PETSC_OWN_POINTER,&is);
139: } else if (dir == DM_X) {
140: PetscMalloc1(dd->w*dd->N,&indices);
141: indices[0] = dd->w*gp;
142: for (j=1; j<dd->w; j++) indices[j] = indices[j-1] + 1;
143: for (i=1; i<dd->N; i++) {
144: indices[i*dd->w] = indices[i*dd->w-1] + dd->w*dd->M - dd->w + 1;
145: for (j=1; j<dd->w; j++) indices[i*dd->w + j] = indices[i*dd->w + j - 1] + 1;
146: }
147: AOApplicationToPetsc(ao,dd->w*dd->N,indices);
148: VecCreate(PETSC_COMM_SELF,newvec);
149: VecSetBlockSize(*newvec,dd->w);
150: VecSetSizes(*newvec,dd->N*dd->w,PETSC_DETERMINE);
151: VecSetType(*newvec,VECSEQ);
152: ISCreateGeneral(PETSC_COMM_SELF,dd->w*dd->N,indices,PETSC_OWN_POINTER,&is);
153: } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Unknown DMDirection");
154: }
155: } else {
156: VecCreateSeq(PETSC_COMM_SELF, 0, newvec);
157: ISCreateGeneral(PETSC_COMM_SELF, 0, NULL, PETSC_COPY_VALUES, &is);
158: }
159: DMGetGlobalVector(da, &vec);
160: VecScatterCreate(vec, is, *newvec, NULL, scatter);
161: DMRestoreGlobalVector(da, &vec);
162: ISDestroy(&is);
163: return 0;
164: }
166: /*@C
167: DMDAGetProcessorSubset - Returns a communicator consisting only of the
168: processors in a DMDA that own a particular global x, y, or z grid point
169: (corresponding to a logical plane in a 3D grid or a line in a 2D grid).
171: Collective on da
173: Input Parameters:
174: + da - the distributed array
175: . dir - Cartesian direction, either DM_X, DM_Y, or DM_Z
176: - gp - global grid point number in this direction
178: Output Parameter:
179: . comm - new communicator
181: Level: advanced
183: Notes:
184: All processors that share the DMDA must call this with the same gp value
186: After use, comm should be freed with MPI_Comm_free()
188: This routine is particularly useful to compute boundary conditions
189: or other application-specific calculations that require manipulating
190: sets of data throughout a logical plane of grid points.
192: Not supported from Fortran
194: @*/
195: PetscErrorCode DMDAGetProcessorSubset(DM da,DMDirection dir,PetscInt gp,MPI_Comm *comm)
196: {
197: MPI_Group group,subgroup;
198: PetscInt i,ict,flag,*owners,xs,xm,ys,ym,zs,zm;
199: PetscMPIInt size,*ranks = NULL;
200: DM_DA *dd = (DM_DA*)da->data;
203: flag = 0;
204: DMDAGetCorners(da,&xs,&ys,&zs,&xm,&ym,&zm);
205: MPI_Comm_size(PetscObjectComm((PetscObject)da),&size);
206: if (dir == DM_Z) {
209: if (gp >= zs && gp < zs+zm) flag = 1;
210: } else if (dir == DM_Y) {
213: if (gp >= ys && gp < ys+ym) flag = 1;
214: } else if (dir == DM_X) {
216: if (gp >= xs && gp < xs+xm) flag = 1;
217: } else SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_ARG_OUTOFRANGE,"Invalid direction");
219: PetscMalloc2(size,&owners,size,&ranks);
220: MPI_Allgather(&flag,1,MPIU_INT,owners,1,MPIU_INT,PetscObjectComm((PetscObject)da));
221: ict = 0;
222: PetscInfo(da,"DMDAGetProcessorSubset: dim=%D, direction=%d, procs: ",da->dim,(int)dir);
223: for (i=0; i<size; i++) {
224: if (owners[i]) {
225: ranks[ict] = i; ict++;
226: PetscInfo(da,"%D ",i);
227: }
228: }
229: PetscInfo(da,"\n");
230: MPI_Comm_group(PetscObjectComm((PetscObject)da),&group);
231: MPI_Group_incl(group,ict,ranks,&subgroup);
232: MPI_Comm_create(PetscObjectComm((PetscObject)da),subgroup,comm);
233: MPI_Group_free(&subgroup);
234: MPI_Group_free(&group);
235: PetscFree2(owners,ranks);
236: return 0;
237: }
239: /*@C
240: DMDAGetProcessorSubsets - Returns communicators consisting only of the
241: processors in a DMDA adjacent in a particular dimension,
242: corresponding to a logical plane in a 3D grid or a line in a 2D grid.
244: Collective on da
246: Input Parameters:
247: + da - the distributed array
248: - dir - Cartesian direction, either DM_X, DM_Y, or DM_Z
250: Output Parameter:
251: . subcomm - new communicator
253: Level: advanced
255: Notes:
256: This routine is useful for distributing one-dimensional data in a tensor product grid.
258: After use, comm should be freed with MPI_Comm_free()
260: Not supported from Fortran
262: @*/
263: PetscErrorCode DMDAGetProcessorSubsets(DM da, DMDirection dir, MPI_Comm *subcomm)
264: {
265: MPI_Comm comm;
266: MPI_Group group, subgroup;
267: PetscInt subgroupSize = 0;
268: PetscInt *firstPoints;
269: PetscMPIInt size, *subgroupRanks = NULL;
270: PetscInt xs, xm, ys, ym, zs, zm, firstPoint, p;
273: PetscObjectGetComm((PetscObject)da,&comm);
274: DMDAGetCorners(da, &xs, &ys, &zs, &xm, &ym, &zm);
275: MPI_Comm_size(comm, &size);
276: if (dir == DM_Z) {
278: firstPoint = zs;
279: } else if (dir == DM_Y) {
281: firstPoint = ys;
282: } else if (dir == DM_X) {
283: firstPoint = xs;
284: } else SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"Invalid direction");
286: PetscMalloc2(size, &firstPoints, size, &subgroupRanks);
287: MPI_Allgather(&firstPoint, 1, MPIU_INT, firstPoints, 1, MPIU_INT, comm);
288: PetscInfo(da,"DMDAGetProcessorSubset: dim=%D, direction=%d, procs: ",da->dim,(int)dir);
289: for (p = 0; p < size; ++p) {
290: if (firstPoints[p] == firstPoint) {
291: subgroupRanks[subgroupSize++] = p;
292: PetscInfo(da, "%D ", p);
293: }
294: }
295: PetscInfo(da, "\n");
296: MPI_Comm_group(comm, &group);
297: MPI_Group_incl(group, subgroupSize, subgroupRanks, &subgroup);
298: MPI_Comm_create(comm, subgroup, subcomm);
299: MPI_Group_free(&subgroup);
300: MPI_Group_free(&group);
301: PetscFree2(firstPoints, subgroupRanks);
302: return 0;
303: }