| 1 | /* |
| 2 | * Copyright (c) 2003, 2007-11 Matteo Frigo |
| 3 | * Copyright (c) 2003, 2007-11 Massachusetts Institute of Technology |
| 4 | * Copyright (c) 2012 Michael Pippig |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License as published by |
| 8 | * the Free Software Foundation; either version 2 of the License, or |
| 9 | * (at your option) any later version. |
| 10 | * |
| 11 | * This program is distributed in the hope that it will be useful, |
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 14 | * GNU General Public License for more details. |
| 15 | * |
| 16 | * You should have received a copy of the GNU General Public License |
| 17 | * along with this program; if not, write to the Free Software |
| 18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
| 19 | * |
| 20 | */ |
| 21 | |
| 22 | /* Distributed transposes using a sequence of carefully scheduled |
| 23 | pairwise exchanges. This has the advantage that it can be done |
| 24 | in-place, or out-of-place while preserving the input, using buffer |
| 25 | space proportional to the local size divided by the number of |
| 26 | processes (i.e. to the total array size divided by the number of |
| 27 | processes squared). */ |
| 28 | |
| 29 | #include "mpi-transpose.h" |
| 30 | #include <string.h> |
| 31 | |
| 32 | typedef struct { |
| 33 | solver super; |
| 34 | int preserve_input; /* preserve input even if DESTROY_INPUT was passed */ |
| 35 | } S; |
| 36 | |
| 37 | typedef struct { |
| 38 | plan_mpi_transpose super; |
| 39 | |
| 40 | plan *cld1, *cld2, *cld2rest, *cld3; |
| 41 | INT rest_Ioff, rest_Ooff; |
| 42 | |
| 43 | int n_pes, my_pe, *sched; |
| 44 | INT *send_block_sizes, *send_block_offsets; |
| 45 | INT *recv_block_sizes, *recv_block_offsets; |
| 46 | MPI_Comm comm; |
| 47 | int preserve_input; |
| 48 | } P; |
| 49 | |
| 50 | static void transpose_chunks(int *sched, int n_pes, int my_pe, |
| 51 | INT *sbs, INT *sbo, INT *rbs, INT *rbo, |
| 52 | MPI_Comm comm, |
| 53 | R *I, R *O) |
| 54 | { |
| 55 | if (sched) { |
| 56 | int i; |
| 57 | |
| 58 | /* TODO: explore non-synchronous send/recv? */ |
| 59 | |
| 60 | if (I == O) { |
| 61 | R *buf = (R*) MALLOC(sizeof(R) * sbs[0], BUFFERS); |
| 62 | |
| 63 | for (i = 0; i < n_pes; ++i) { |
| 64 | int pe = sched[i]; |
| 65 | if (my_pe == pe) { |
| 66 | if (rbo[pe] != sbo[pe]) |
| 67 | memmove(O + rbo[pe], O + sbo[pe], |
| 68 | sbs[pe] * sizeof(R)); |
| 69 | } |
| 70 | else { |
| 71 | memcpy(buf, O + sbo[pe], sbs[pe] * sizeof(R)); |
| 72 | MPI_Sendrecv(buf, (int) (sbs[pe]), FFTW_MPI_TYPE, |
| 73 | pe, (my_pe * n_pes + pe) & 0xffff, |
| 74 | O + rbo[pe], (int) (rbs[pe]), |
| 75 | FFTW_MPI_TYPE, |
| 76 | pe, (pe * n_pes + my_pe) & 0xffff, |
| 77 | comm, MPI_STATUS_IGNORE); |
| 78 | } |
| 79 | } |
| 80 | |
| 81 | X(ifree)(buf); |
| 82 | } |
| 83 | else { /* I != O */ |
| 84 | for (i = 0; i < n_pes; ++i) { |
| 85 | int pe = sched[i]; |
| 86 | if (my_pe == pe) |
| 87 | memcpy(O + rbo[pe], I + sbo[pe], sbs[pe] * sizeof(R)); |
| 88 | else |
| 89 | MPI_Sendrecv(I + sbo[pe], (int) (sbs[pe]), |
| 90 | FFTW_MPI_TYPE, |
| 91 | pe, (my_pe * n_pes + pe) & 0xffff, |
| 92 | O + rbo[pe], (int) (rbs[pe]), |
| 93 | FFTW_MPI_TYPE, |
| 94 | pe, (pe * n_pes + my_pe) & 0xffff, |
| 95 | comm, MPI_STATUS_IGNORE); |
| 96 | } |
| 97 | } |
| 98 | } |
| 99 | } |
| 100 | |
| 101 | /* transpose locally to get contiguous chunks |
| 102 | this may take two transposes if the block sizes are unequal |
| 103 | (3 subplans, two of which operate on disjoint data) */ |
| 104 | static void apply_pretranspose( |
| 105 | const P *ego, R *I, R *O |
| 106 | ) |
| 107 | { |
| 108 | plan_rdft *cld2, *cld2rest, *cld3; |
| 109 | |
| 110 | cld3 = (plan_rdft *) ego->cld3; |
| 111 | if (cld3) |
| 112 | cld3->apply(ego->cld3, O, O); |
| 113 | /* else TRANSPOSED_IN is true and user wants I transposed */ |
| 114 | |
| 115 | cld2 = (plan_rdft *) ego->cld2; |
| 116 | cld2->apply(ego->cld2, I, O); |
| 117 | cld2rest = (plan_rdft *) ego->cld2rest; |
| 118 | if (cld2rest) { |
| 119 | cld2rest->apply(ego->cld2rest, |
| 120 | I + ego->rest_Ioff, O + ego->rest_Ooff); |
| 121 | } |
| 122 | } |
| 123 | |
| 124 | static void apply(const plan *ego_, R *I, R *O) |
| 125 | { |
| 126 | const P *ego = (const P *) ego_; |
| 127 | plan_rdft *cld1 = (plan_rdft *) ego->cld1; |
| 128 | |
| 129 | if (cld1) { |
| 130 | /* transpose locally to get contiguous chunks */ |
| 131 | apply_pretranspose(ego, I, O); |
| 132 | |
| 133 | if(ego->preserve_input) I = O; |
| 134 | |
| 135 | /* transpose chunks globally */ |
| 136 | transpose_chunks(ego->sched, ego->n_pes, ego->my_pe, |
| 137 | ego->send_block_sizes, ego->send_block_offsets, |
| 138 | ego->recv_block_sizes, ego->recv_block_offsets, |
| 139 | ego->comm, O, I); |
| 140 | |
| 141 | /* transpose locally to get non-transposed output */ |
| 142 | cld1->apply(ego->cld1, I, O); |
| 143 | } /* else TRANSPOSED_OUT is true and user wants O transposed */ |
| 144 | else if (ego->preserve_input) { |
| 145 | /* transpose locally to get contiguous chunks */ |
| 146 | apply_pretranspose(ego, I, O); |
| 147 | |
| 148 | /* transpose chunks globally */ |
| 149 | transpose_chunks(ego->sched, ego->n_pes, ego->my_pe, |
| 150 | ego->send_block_sizes, ego->send_block_offsets, |
| 151 | ego->recv_block_sizes, ego->recv_block_offsets, |
| 152 | ego->comm, O, O); |
| 153 | } |
| 154 | else { |
| 155 | /* transpose locally to get contiguous chunks */ |
| 156 | apply_pretranspose(ego, I, I); |
| 157 | |
| 158 | /* transpose chunks globally */ |
| 159 | transpose_chunks(ego->sched, ego->n_pes, ego->my_pe, |
| 160 | ego->send_block_sizes, ego->send_block_offsets, |
| 161 | ego->recv_block_sizes, ego->recv_block_offsets, |
| 162 | ego->comm, I, O); |
| 163 | } |
| 164 | } |
| 165 | |
| 166 | static int applicable(const S *ego, const problem *p_, |
| 167 | const planner *plnr) |
| 168 | { |
| 169 | const problem_mpi_transpose *p = (const problem_mpi_transpose *) p_; |
| 170 | /* Note: this is *not* UGLY for out-of-place, destroy-input plans; |
| 171 | the planner often prefers transpose-pairwise to transpose-alltoall, |
| 172 | at least with LAM MPI on my machine. */ |
| 173 | return (1 |
| 174 | && (!ego->preserve_input || (!NO_DESTROY_INPUTP(plnr) |
| 175 | && p->I != p->O)) |
| 176 | && ONLY_TRANSPOSEDP(p->flags)); |
| 177 | } |
| 178 | |
| 179 | static void awake(plan *ego_, enum wakefulness wakefulness) |
| 180 | { |
| 181 | P *ego = (P *) ego_; |
| 182 | X(plan_awake)(ego->cld1, wakefulness); |
| 183 | X(plan_awake)(ego->cld2, wakefulness); |
| 184 | X(plan_awake)(ego->cld2rest, wakefulness); |
| 185 | X(plan_awake)(ego->cld3, wakefulness); |
| 186 | } |
| 187 | |
| 188 | static void destroy(plan *ego_) |
| 189 | { |
| 190 | P *ego = (P *) ego_; |
| 191 | X(ifree0)(ego->sched); |
| 192 | X(ifree0)(ego->send_block_sizes); |
| 193 | MPI_Comm_free(&ego->comm); |
| 194 | X(plan_destroy_internal)(ego->cld3); |
| 195 | X(plan_destroy_internal)(ego->cld2rest); |
| 196 | X(plan_destroy_internal)(ego->cld2); |
| 197 | X(plan_destroy_internal)(ego->cld1); |
| 198 | } |
| 199 | |
| 200 | static void print(const plan *ego_, printer *p) |
| 201 | { |
| 202 | const P *ego = (const P *) ego_; |
| 203 | p->print(p, "(mpi-transpose-pairwise-transposed%s%(%p%)%(%p%)%(%p%)%(%p%))", |
| 204 | ego->preserve_input==2 ?"/p":"", |
| 205 | ego->cld1, ego->cld2, ego->cld2rest, ego->cld3); |
| 206 | } |
| 207 | |
| 208 | /* Given a process which_pe and a number of processes npes, fills |
| 209 | the array sched[npes] with a sequence of processes to communicate |
| 210 | with for a deadlock-free, optimum-overlap all-to-all communication. |
| 211 | (All processes must call this routine to get their own schedules.) |
| 212 | The schedule can be re-ordered arbitrarily as long as all processes |
| 213 | apply the same permutation to their schedules. |
| 214 | |
| 215 | The algorithm here is based upon the one described in: |
| 216 | J. A. M. Schreuder, "Constructing timetables for sport |
| 217 | competitions," Mathematical Programming Study 13, pp. 58-67 (1980). |
| 218 | In a sport competition, you have N teams and want every team to |
| 219 | play every other team in as short a time as possible (maximum overlap |
| 220 | between games). This timetabling problem is therefore identical |
| 221 | to that of an all-to-all communications problem. In our case, there |
| 222 | is one wrinkle: as part of the schedule, the process must do |
| 223 | some data transfer with itself (local data movement), analogous |
| 224 | to a requirement that each team "play itself" in addition to other |
| 225 | teams. With this wrinkle, it turns out that an optimal timetable |
| 226 | (N parallel games) can be constructed for any N, not just for even |
| 227 | N as in the original problem described by Schreuder. |
| 228 | */ |
| 229 | static void fill1_comm_sched(int *sched, int which_pe, int npes) |
| 230 | { |
| 231 | int pe, i, n, s = 0; |
| 232 | A(which_pe >= 0 && which_pe < npes); |
| 233 | if (npes % 2 == 0) { |
| 234 | n = npes; |
| 235 | sched[s++] = which_pe; |
| 236 | } |
| 237 | else |
| 238 | n = npes + 1; |
| 239 | for (pe = 0; pe < n - 1; ++pe) { |
| 240 | if (npes % 2 == 0) { |
| 241 | if (pe == which_pe) sched[s++] = npes - 1; |
| 242 | else if (npes - 1 == which_pe) sched[s++] = pe; |
| 243 | } |
| 244 | else if (pe == which_pe) sched[s++] = pe; |
| 245 | |
| 246 | if (pe != which_pe && which_pe < n - 1) { |
| 247 | i = (pe - which_pe + (n - 1)) % (n - 1); |
| 248 | if (i < n/2) |
| 249 | sched[s++] = (pe + i) % (n - 1); |
| 250 | |
| 251 | i = (which_pe - pe + (n - 1)) % (n - 1); |
| 252 | if (i < n/2) |
| 253 | sched[s++] = (pe - i + (n - 1)) % (n - 1); |
| 254 | } |
| 255 | } |
| 256 | A(s == npes); |
| 257 | } |
| 258 | |
| 259 | /* Sort the communication schedule sched for npes so that the schedule |
| 260 | on process sortpe is ascending or descending (!ascending). This is |
| 261 | necessary to allow in-place transposes when the problem does not |
| 262 | divide equally among the processes. In this case there is one |
| 263 | process where the incoming blocks are bigger/smaller than the |
| 264 | outgoing blocks and thus have to be received in |
| 265 | descending/ascending order, respectively, to avoid overwriting data |
| 266 | before it is sent. */ |
| 267 | static void sort1_comm_sched(int *sched, int npes, int sortpe, int ascending) |
| 268 | { |
| 269 | int *sortsched, i; |
| 270 | sortsched = (int *) MALLOC(npes * sizeof(int) * 2, OTHER); |
| 271 | fill1_comm_sched(sortsched, sortpe, npes); |
| 272 | if (ascending) |
| 273 | for (i = 0; i < npes; ++i) |
| 274 | sortsched[npes + sortsched[i]] = sched[i]; |
| 275 | else |
| 276 | for (i = 0; i < npes; ++i) |
| 277 | sortsched[2*npes - 1 - sortsched[i]] = sched[i]; |
| 278 | for (i = 0; i < npes; ++i) |
| 279 | sched[i] = sortsched[npes + i]; |
| 280 | X(ifree)(sortsched); |
| 281 | } |
| 282 | |
| 283 | /* make the plans to do the pre-MPI transpositions (shared with |
| 284 | transpose-alltoall-transposed) */ |
| 285 | int XM(mkplans_pretranspose)(const problem_mpi_transpose *p, planner *plnr, |
| 286 | R *I, R *O, int my_pe, |
| 287 | plan **cld2, plan **cld2rest, plan **cld3, |
| 288 | INT *rest_Ioff, INT *rest_Ooff) |
| 289 | { |
| 290 | INT vn = p->vn; |
| 291 | INT b = XM(block)(p->nx, p->block, my_pe); |
| 292 | INT bt = p->tblock; |
| 293 | INT nyb = p->ny / bt; /* number of equal-sized blocks */ |
| 294 | INT nyr = p->ny - nyb * bt; /* leftover rows after equal blocks */ |
| 295 | |
| 296 | *cld2 = *cld2rest = *cld3 = NULL; |
| 297 | *rest_Ioff = *rest_Ooff = 0; |
| 298 | |
| 299 | if (!(p->flags & TRANSPOSED_IN) && (nyr == 0 || I != O)) { |
| 300 | INT ny = p->ny * vn; |
| 301 | bt *= vn; |
| 302 | *cld2 = X(mkplan_f_d)(plnr, |
| 303 | X(mkproblem_rdft_0_d)(X(mktensor_3d) |
| 304 | (nyb, bt, b * bt, |
| 305 | b, ny, bt, |
| 306 | bt, 1, 1), |
| 307 | I, O), |
| 308 | 0, 0, NO_SLOW); |
| 309 | if (!*cld2) goto nada; |
| 310 | |
| 311 | if (nyr > 0) { |
| 312 | *rest_Ioff = nyb * bt; |
| 313 | *rest_Ooff = nyb * b * bt; |
| 314 | bt = nyr * vn; |
| 315 | *cld2rest = X(mkplan_f_d)(plnr, |
| 316 | X(mkproblem_rdft_0_d)(X(mktensor_2d) |
| 317 | (b, ny, bt, |
| 318 | bt, 1, 1), |
| 319 | I + *rest_Ioff, |
| 320 | O + *rest_Ooff), |
| 321 | 0, 0, NO_SLOW); |
| 322 | if (!*cld2rest) goto nada; |
| 323 | } |
| 324 | } |
| 325 | else { |
| 326 | *cld2 = X(mkplan_f_d)(plnr, |
| 327 | X(mkproblem_rdft_0_d)( |
| 328 | X(mktensor_4d) |
| 329 | (nyb, b * bt * vn, b * bt * vn, |
| 330 | b, vn, bt * vn, |
| 331 | bt, b * vn, vn, |
| 332 | vn, 1, 1), |
| 333 | I, O), |
| 334 | 0, 0, NO_SLOW); |
| 335 | if (!*cld2) goto nada; |
| 336 | |
| 337 | *rest_Ioff = *rest_Ooff = nyb * bt * b * vn; |
| 338 | *cld2rest = X(mkplan_f_d)(plnr, |
| 339 | X(mkproblem_rdft_0_d)( |
| 340 | X(mktensor_3d) |
| 341 | (b, vn, nyr * vn, |
| 342 | nyr, b * vn, vn, |
| 343 | vn, 1, 1), |
| 344 | I + *rest_Ioff, O + *rest_Ooff), |
| 345 | 0, 0, NO_SLOW); |
| 346 | if (!*cld2rest) goto nada; |
| 347 | |
| 348 | if (!(p->flags & TRANSPOSED_IN)) { |
| 349 | *cld3 = X(mkplan_f_d)(plnr, |
| 350 | X(mkproblem_rdft_0_d)( |
| 351 | X(mktensor_3d) |
| 352 | (p->ny, vn, b * vn, |
| 353 | b, p->ny * vn, vn, |
| 354 | vn, 1, 1), |
| 355 | I, I), |
| 356 | 0, 0, NO_SLOW); |
| 357 | if (!*cld3) goto nada; |
| 358 | } |
| 359 | } |
| 360 | |
| 361 | return 1; |
| 362 | |
| 363 | nada: |
| 364 | X(plan_destroy_internal)(*cld3); |
| 365 | X(plan_destroy_internal)(*cld2rest); |
| 366 | X(plan_destroy_internal)(*cld2); |
| 367 | *cld2 = *cld2rest = *cld3 = NULL; |
| 368 | return 0; |
| 369 | } |
| 370 | |
| 371 | static plan *mkplan(const solver *ego_, const problem *p_, planner *plnr) |
| 372 | { |
| 373 | const S *ego = (const S *) ego_; |
| 374 | const problem_mpi_transpose *p; |
| 375 | P *pln; |
| 376 | plan *cld1 = 0, *cld2 = 0, *cld2rest = 0, *cld3 = 0; |
| 377 | INT b, bt, vn, rest_Ioff, rest_Ooff; |
| 378 | INT *sbs, *sbo, *rbs, *rbo; |
| 379 | int pe, my_pe, n_pes, sort_pe = -1, ascending = 1; |
| 380 | R *I, *O; |
| 381 | static const plan_adt padt = { |
| 382 | XM(transpose_solve), awake, print, destroy |
| 383 | }; |
| 384 | |
| 385 | UNUSED(ego); |
| 386 | |
| 387 | if (!applicable(ego, p_, plnr)) |
| 388 | return (plan *) 0; |
| 389 | |
| 390 | p = (const problem_mpi_transpose *) p_; |
| 391 | vn = p->vn; |
| 392 | I = p->I; O = p->O; |
| 393 | |
| 394 | MPI_Comm_rank(p->comm, &my_pe); |
| 395 | MPI_Comm_size(p->comm, &n_pes); |
| 396 | |
| 397 | bt = XM(block)(p->ny, p->tblock, my_pe); |
| 398 | |
| 399 | |
| 400 | if (ego->preserve_input || NO_DESTROY_INPUTP(plnr)) I = p->O; |
| 401 | |
| 402 | if (!(p->flags & TRANSPOSED_OUT)) { /* nx x bt x vn -> bt x nx x vn */ |
| 403 | cld1 = X(mkplan_f_d)(plnr, |
| 404 | X(mkproblem_rdft_0_d)(X(mktensor_3d) |
| 405 | (bt, vn, p->nx * vn, |
| 406 | p->nx, bt * vn, vn, |
| 407 | vn, 1, 1), |
| 408 | I, O = p->O), |
| 409 | 0, 0, NO_SLOW); |
| 410 | if (XM(any_true)(!cld1, p->comm)) goto nada; |
| 411 | |
| 412 | } |
| 413 | else { |
| 414 | if (ego->preserve_input || NO_DESTROY_INPUTP(plnr)) |
| 415 | O = p->O; |
| 416 | else |
| 417 | O = p->I; |
| 418 | } |
| 419 | |
| 420 | if (XM(any_true)(!XM(mkplans_pretranspose)(p, plnr, p->I, O, my_pe, |
| 421 | &cld2, &cld2rest, &cld3, |
| 422 | &rest_Ioff, &rest_Ooff), |
| 423 | p->comm)) goto nada; |
| 424 | |
| 425 | pln = MKPLAN_MPI_TRANSPOSE(P, &padt, apply); |
| 426 | |
| 427 | pln->cld1 = cld1; |
| 428 | pln->cld2 = cld2; |
| 429 | pln->cld2rest = cld2rest; |
| 430 | pln->rest_Ioff = rest_Ioff; |
| 431 | pln->rest_Ooff = rest_Ooff; |
| 432 | pln->cld3 = cld3; |
| 433 | pln->preserve_input = ego->preserve_input ? 2 : NO_DESTROY_INPUTP(plnr); |
| 434 | |
| 435 | MPI_Comm_dup(p->comm, &pln->comm); |
| 436 | |
| 437 | n_pes = (int) X(imax)(XM(num_blocks)(p->nx, p->block), |
| 438 | XM(num_blocks)(p->ny, p->tblock)); |
| 439 | |
| 440 | /* Compute sizes/offsets of blocks to exchange between processors */ |
| 441 | sbs = (INT *) MALLOC(4 * n_pes * sizeof(INT), PLANS); |
| 442 | sbo = sbs + n_pes; |
| 443 | rbs = sbo + n_pes; |
| 444 | rbo = rbs + n_pes; |
| 445 | b = XM(block)(p->nx, p->block, my_pe); |
| 446 | bt = XM(block)(p->ny, p->tblock, my_pe); |
| 447 | for (pe = 0; pe < n_pes; ++pe) { |
| 448 | INT db, dbt; /* destination block sizes */ |
| 449 | db = XM(block)(p->nx, p->block, pe); |
| 450 | dbt = XM(block)(p->ny, p->tblock, pe); |
| 451 | |
| 452 | sbs[pe] = b * dbt * vn; |
| 453 | sbo[pe] = pe * (b * p->tblock) * vn; |
| 454 | rbs[pe] = db * bt * vn; |
| 455 | rbo[pe] = pe * (p->block * bt) * vn; |
| 456 | |
| 457 | if (db * dbt > 0 && db * p->tblock != p->block * dbt) { |
| 458 | A(sort_pe == -1); /* only one process should need sorting */ |
| 459 | sort_pe = pe; |
| 460 | ascending = db * p->tblock > p->block * dbt; |
| 461 | } |
| 462 | } |
| 463 | pln->n_pes = n_pes; |
| 464 | pln->my_pe = my_pe; |
| 465 | pln->send_block_sizes = sbs; |
| 466 | pln->send_block_offsets = sbo; |
| 467 | pln->recv_block_sizes = rbs; |
| 468 | pln->recv_block_offsets = rbo; |
| 469 | |
| 470 | if (my_pe >= n_pes) { |
| 471 | pln->sched = 0; /* this process is not doing anything */ |
| 472 | } |
| 473 | else { |
| 474 | pln->sched = (int *) MALLOC(n_pes * sizeof(int), PLANS); |
| 475 | fill1_comm_sched(pln->sched, my_pe, n_pes); |
| 476 | if (sort_pe >= 0) |
| 477 | sort1_comm_sched(pln->sched, n_pes, sort_pe, ascending); |
| 478 | } |
| 479 | |
| 480 | X(ops_zero)(&pln->super.super.ops); |
| 481 | if (cld1) X(ops_add2)(&cld1->ops, &pln->super.super.ops); |
| 482 | if (cld2) X(ops_add2)(&cld2->ops, &pln->super.super.ops); |
| 483 | if (cld2rest) X(ops_add2)(&cld2rest->ops, &pln->super.super.ops); |
| 484 | if (cld3) X(ops_add2)(&cld3->ops, &pln->super.super.ops); |
| 485 | /* FIXME: should MPI operations be counted in "other" somehow? */ |
| 486 | |
| 487 | return &(pln->super.super); |
| 488 | |
| 489 | nada: |
| 490 | X(plan_destroy_internal)(cld3); |
| 491 | X(plan_destroy_internal)(cld2rest); |
| 492 | X(plan_destroy_internal)(cld2); |
| 493 | X(plan_destroy_internal)(cld1); |
| 494 | return (plan *) 0; |
| 495 | } |
| 496 | |
| 497 | static solver *mksolver(int preserve_input) |
| 498 | { |
| 499 | static const solver_adt sadt = { PROBLEM_MPI_TRANSPOSE, mkplan, 0 }; |
| 500 | S *slv = MKSOLVER(S, &sadt); |
| 501 | slv->preserve_input = preserve_input; |
| 502 | return &(slv->super); |
| 503 | } |
| 504 | |
| 505 | void XM(transpose_pairwise_transposed_register)(planner *p) |
| 506 | { |
| 507 | int preserve_input; |
| 508 | for (preserve_input = 0; preserve_input <= 1; ++preserve_input) |
| 509 | REGISTER_SOLVER(p, mksolver(preserve_input)); |
| 510 | } |