root/ompi/mpi/java/java/Intracomm.java

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. clone
  2. dup
  3. iDup
  4. dupWithInfo
  5. split
  6. split
  7. splitType
  8. splitType
  9. create
  10. create
  11. createGroup
  12. createGroup
  13. createCart
  14. createCart
  15. createGraph
  16. createGraph
  17. createDistGraph
  18. createDistGraph
  19. createDistGraph
  20. createDistGraphAdjacent
  21. createDistGraphAdjacent
  22. createDistGraphAdjacent
  23. scan
  24. scan
  25. scan
  26. iScan
  27. iScan
  28. iScan
  29. exScan
  30. exScan
  31. exScan
  32. iExScan
  33. iExScan
  34. iExScan
  35. openPort
  36. openPort
  37. openPort
  38. closePort
  39. closePort_jni
  40. accept
  41. accept
  42. accept
  43. connect
  44. connect
  45. connect
  46. publishName
  47. publishName
  48. publishName
  49. unpublishName
  50. unpublishName
  51. unpublishName
  52. lookupName
  53. lookupName
  54. lookupName
  55. spawn
  56. spawn
  57. spawnMultiple
  58. spawnMultiple

   1 /*
   2  * Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
   3  *                         University Research and Technology
   4  *                         Corporation.  All rights reserved.
   5  * Copyright (c) 2004-2005 The University of Tennessee and The University
   6  *                         of Tennessee Research Foundation.  All rights
   7  *                         reserved.
   8  * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
   9  *                         University of Stuttgart.  All rights reserved.
  10  * Copyright (c) 2004-2005 The Regents of the University of California.
  11  *                         All rights reserved.
  12  * Copyright (c) 2015      Los Alamos National Security, LLC. All rights
  13  *                         reserved.
  14  * Copyright (c) 2018      FUJITSU LIMITED.  All rights reserved.
  15  * $COPYRIGHT$
  16  *
  17  * Additional copyrights may follow
  18  *
  19  * $HEADER$
  20  *
  21  *
  22  * This file is almost a complete re-write for Open MPI compared to the
  23  * original mpiJava package. Its license and copyright are listed below.
  24  * See <path to ompi/mpi/java/README> for more information.
  25  *
  26  *
  27  *  Licensed under the Apache License, Version 2.0 (the "License");
  28  *  you may not use this file except in compliance with the License.
  29  *  You may obtain a copy of the License at
  30  *
  31  *     http://www.apache.org/licenses/LICENSE-2.0
  32  *
  33  *  Unless required by applicable law or agreed to in writing, software
  34  *  distributed under the License is distributed on an "AS IS" BASIS,
  35  *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  36  *  See the License for the specific language governing permissions and
  37  *  limitations under the License.
  38  *
  39  *
  40  * File         : Intracommm.java
  41  * Author       : Sang Lim, Xinying Li, Bryan Carpenter
  42  * Created      : Thu Apr  9 12:22:15 1998
  43  * Revision     : $Revision: 1.14 $
  44  * Updated      : $Date: 2002/12/16 15:25:13 $
  45  * Copyright: Northeast Parallel Architectures Center
  46  *            at Syracuse University 1998
  47  *
  48  *
  49  *
  50  * IMPLEMENTATION DETAILS
  51  *
  52  * All methods with buffers that can be direct or non direct have
  53  * a companion argument 'db' which is true if the buffer is direct.
  54  * For example, if the buffer argument is recvBuf, the companion
  55  * argument will be 'rdb', meaning if the receive buffer is direct.
  56  *
  57  * Checking if a buffer is direct is faster in Java than C.
  58  */
  59 package mpi;
  60 
  61 import java.nio.*;
  62 import static mpi.MPI.assertDirectBuffer;
  63 
  64 /**
  65  * This class represents intracommunicator.
  66  */
  67 public class Intracomm extends Comm
  68 {
  69         protected Intracomm()
  70         {
  71         }
  72 
  73         protected Intracomm(long handle)
  74         {
  75                 super(handle);
  76         }
  77 
  78         protected Intracomm(long[] commRequest)
  79         {
  80                 super(commRequest);
  81         }
  82 
  83         /**
  84          * Duplicates this communicator.
  85          * <p>Java binding of {@code MPI_COMM_DUP}.
  86          * <p>It is recommended to use {@link #dup} instead of {@link #clone}
  87          * because the last can't throw an {@link mpi.MPIException}.
  88          * @return copy of this communicator
  89          */
  90         @Override public Intracomm clone()
  91         {
  92                 try
  93                 {
  94                         return dup();
  95                 }
  96                 catch(MPIException e)
  97                 {
  98                         throw new RuntimeException(e.getMessage());
  99                 }
 100         }
 101 
 102         /**
 103          * Duplicates this communicator.
 104          * <p>Java binding of {@code MPI_COMM_DUP}.
 105          * @return copy of this communicator
 106          * @throws MPIException Signals that an MPI exception of some sort has occurred.
 107          */
 108         @Override public Intracomm dup() throws MPIException
 109         {
 110                 MPI.check();
 111                 return new Intracomm(dup(handle));
 112         }
 113 
 114         /**
 115          * Duplicates this communicator.
 116          * <p>Java binding of {@code MPI_COMM_IDUP}.
 117          * <p>The new communicator can't be used before the operation completes.
 118          * The request object must be obtained calling {@link #getRequest}.
 119          * @return copy of this communicator
 120          * @throws MPIException Signals that an MPI exception of some sort has occurred.
 121          */
 122         @Override public Intracomm iDup() throws MPIException
 123         {
 124                 MPI.check();
 125                 return new Intracomm(iDup(handle));
 126         }
 127 
 128         /**
 129          * Duplicates this communicator with the info object used in the call.
 130          * <p>Java binding of {@code MPI_COMM_DUP_WITH_INFO}.
 131          * @param info  info object to associate with the new communicator
 132          * @return copy of this communicator
 133          * @throws MPIException Signals that an MPI exception of some sort has occurred.
 134          */
 135         @Override public Intracomm dupWithInfo(Info info) throws MPIException
 136         {
 137             MPI.check();
 138             return new Intracomm(dupWithInfo(handle, info.handle));
 139         }
 140 
 141         /**
 142          * Partition the group associated with this communicator and create
 143          * a new communicator within each subgroup.
 144          * <p>Java binding of the MPI operation {@code MPI_COMM_SPLIT}.
 145          * @param colour control of subset assignment
 146          * @param key    control of rank assignment
 147          * @return new communicator
 148          * @throws MPIException Signals that an MPI exception of some sort has occurred.
 149          */
 150         public final Intracomm split(int colour, int key) throws MPIException
 151         {
 152                 MPI.check();
 153                 return new Intracomm(split(handle, colour, key));
 154         }
 155 
 156         private native long split(long comm, int colour, int key) throws MPIException;
 157 
 158         /**
 159          * Partition the group associated with this communicator and create
 160          * a new communicator within each subgroup.
 161          * <p>Java binding of the MPI operation {@code MPI_COMM_SPLIT_TYPE}.
 162          * @param splitType     type of processes to be grouped together
 163          * @param key       control of rank assignment
 164          * @param info          info argument
 165          * @return new communicator
 166          * @throws MPIException Signals that an MPI exception of some sort has occurred.
 167          */
 168         public final Intracomm splitType(int splitType, int key, Info info) throws MPIException
 169         {
 170                 MPI.check();
 171                 return new Intracomm(splitType(handle, splitType, key, info.handle));
 172         }
 173 
 174         private native long splitType(long comm, int colour, int key, long info) throws MPIException;
 175 
 176         /**
 177          * Create a new communicator.
 178          * <p>Java binding of the MPI operation {@code MPI_COMM_CREATE}.
 179          * @param group group which is a subset of the group of this communicator
 180          * @return new communicator
 181          * @throws MPIException Signals that an MPI exception of some sort has occurred.
 182          */
 183         public final Intracomm create(Group group) throws MPIException
 184         {
 185                 MPI.check();
 186                 return new Intracomm(create(handle, group.handle));
 187         }
 188 
 189         private native long create(long comm, long group);
 190 
 191         /**
 192          * Create a new intracommunicator for the given group.
 193          * <p>Java binding of the MPI operation {@code MPI_COMM_CREATE_GROUP}.
 194          * @param group group which is a subset of the group of this communicator
 195          * @param tag   an integer tag
 196          * @return new communicator
 197          * @throws MPIException Signals that an MPI exception of some sort has occurred.
 198          */
 199         public final Intracomm createGroup(Group group, int tag) throws MPIException
 200         {
 201             MPI.check();
 202             return new Intracomm(createGroup(handle, group.handle, tag));
 203         }
 204 
 205         private native long createGroup(long comm, long group, int tag);
 206 
 207         // Topology Constructors
 208 
 209         /**
 210          * Creates a communicator to which the Cartesian topology
 211          * information is attached.
 212          * Create a cartesian topology communicator whose group is a subset
 213          * of the group of this communicator.
 214          * <p>Java binding of the MPI operation {@code MPI_CART_CREATE}.
 215          * <p>The number of dimensions of the Cartesian grid is taken to be the
 216          * size of the {@code dims} argument. The array {@code periods} must
 217          * be the same size.
 218          * @param dims    the number of processes in each dimension
 219          * @param periods {@code true}  if grid is periodic,
 220          *                {@code false} if not, in each dimension
 221          * @param reorder {@code true}  if ranking may be reordered,
 222          *                {@code false} if not
 223          * @return new cartesian topology communicator
 224          * @throws MPIException Signals that an MPI exception of some sort has occurred.
 225          */
 226         public final CartComm createCart(int[] dims, boolean[] periods, boolean reorder)
 227                         throws MPIException
 228         {
 229                 MPI.check();
 230                 return new CartComm(createCart(handle, dims, periods, reorder));
 231         }
 232 
 233         private native long createCart(
 234                         long comm, int[] dims, boolean[] periods, boolean reorder)
 235                                         throws MPIException;
 236 
 237         /**
 238          * Creates a communicator to which the graph topology information is attached.
 239          * <p>Java binding of the MPI operation {@code MPI_GRAPH_CREATE}.
 240          * <p>The number of nodes in the graph, <em>nnodes</em>, is taken
 241          * to be size of the {@code index} argument.
 242          * @param index   node degrees
 243          * @param edges   graph edges
 244          * @param reorder {@code true} if ranking may be reordered,
 245          *                {@code false} if not
 246          * @return new graph topology communicator
 247          * @throws MPIException Signals that an MPI exception of some sort has occurred.
 248          */
 249         public final GraphComm createGraph(int[] index, int[] edges, boolean reorder)
 250                         throws MPIException
 251         {
 252                 MPI.check();
 253                 return new GraphComm(createGraph(handle, index, edges, reorder));
 254         }
 255 
 256         private native long createGraph(
 257                         long comm, int[] index, int[] edges, boolean reorder)
 258                                         throws MPIException;
 259 
 260         /**
 261          * Creates a communicator to which the distributed graph topology
 262          * information is attached.
 263          * <p>Java binding of the MPI operation {@code MPI_DIST_GRAPH_CREATE}.
 264          * <p>The number of source nodes is the size of the {@code sources} argument.
 265          * @param sources      source nodes for which this process specifies edges
 266          * @param degrees      number of destinations for each source node
 267          * @param destinations destination nodes for the source nodes
 268          * @param weights      weights for source to destination edges
 269          * @param info         hints on optimization and interpretation of weights
 270          * @param reorder      the process may be reordered (true) or not (false)
 271          * @return communicator with distributed graph topology
 272          * @throws MPIException Signals that an MPI exception of some sort has occurred.
 273          */
 274         public final GraphComm createDistGraph(
 275                         int[] sources, int[] degrees, int[] destinations,
 276                         int[] weights, Info info, boolean reorder)
 277                                         throws MPIException
 278         {
 279                 MPI.check();
 280 
 281                 return new GraphComm(createDistGraph(
 282                                 handle, sources, degrees, destinations,
 283                                 weights, info.handle, reorder, true));
 284         }
 285 
 286         /**
 287          * Creates a communicator to which the distributed graph topology
 288          * information is attached.
 289          * <p>Java binding of the MPI operation {@code MPI_DIST_GRAPH_CREATE}
 290          * using {@code MPI_UNWEIGHTED}.
 291          * <p>The number of source nodes is the size of the {@code sources} argument.
 292          * @param sources      source nodes for which this process specifies edges
 293          * @param degrees      number of destinations for each source node
 294          * @param destinations destination nodes for the source nodes
 295          * @param info         hints on optimization and interpretation of weights
 296          * @param reorder      the process may be reordered (true) or not (false)
 297          * @return communicator with distributed graph topology
 298          * @throws MPIException Signals that an MPI exception of some sort has occurred.
 299          */
 300         public final GraphComm createDistGraph(
 301                         int[] sources, int[] degrees, int[] destinations,
 302                         Info info, boolean reorder)
 303                                         throws MPIException
 304         {
 305                 MPI.check();
 306 
 307                 return new GraphComm(createDistGraph(
 308                                 handle, sources, degrees, destinations,
 309                                 null, info.handle, reorder, false));
 310         }
 311 
 312         private native long createDistGraph(
 313                         long comm, int[] sources, int[] degrees, int[] destinations,
 314                         int[] weights, long info, boolean reorder, boolean weighted)
 315                                         throws MPIException;
 316 
 317 
 318         /**
 319          * Creates a communicator to which the distributed graph topology
 320          * information is attached.
 321          * <p>Java binding of the MPI operation {@code MPI_DIST_GRAPH_CREATE_ADJACENT}.
 322          * <p>The number of source/destination nodes is the size of the
 323          * {@code sources}/{@code destinations} argument.
 324          * @param sources       ranks of processes for which the calling process
 325          *                      is a destination
 326          * @param sourceWeights weights of the edges into the calling process
 327          * @param destinations  ranks of processes for which the calling process
 328          *                      is a source
 329          * @param destWeights   weights of the edges out of the calling process
 330          * @param info          hints on optimization and interpretation of weights
 331          * @param reorder       the process may be reordered (true) or not (false)
 332          * @return communicator with distributed graph topology
 333          * @throws MPIException Signals that an MPI exception of some sort has occurred.
 334          */
 335         public final GraphComm createDistGraphAdjacent(
 336                         int[] sources, int[] sourceWeights,
 337                         int[] destinations, int[] destWeights, Info info, boolean reorder)
 338                                         throws MPIException
 339         {
 340                 MPI.check();
 341 
 342                 return new GraphComm(createDistGraphAdjacent(
 343                                 handle, sources, sourceWeights, destinations,
 344                                 destWeights, info.handle, reorder, true));
 345         }
 346 
 347         /**
 348          * Creates a communicator to which the distributed graph topology
 349          * information is attached.
 350          * <p>Java binding of the MPI operation {@code MPI_DIST_GRAPH_CREATE_ADJACENT}
 351          * using {@code MPI_UNWEIGHTED}.
 352          * <p>The number of source/destination nodes is the size of the
 353          * {@code sources}/{@code destinations} argument.
 354          * @param sources      ranks of processes for which the calling process
 355          *                     is a destination
 356          * @param destinations ranks of processes for which the calling process
 357          *                     is a source
 358          * @param info         hints on optimization and interpretation of weights
 359          * @param reorder      the process may be reordered (true) or not (false)
 360          * @return communicator with distributed graph topology
 361          * @throws MPIException Signals that an MPI exception of some sort has occurred.
 362          */
 363         public final GraphComm createDistGraphAdjacent(
 364                         int[] sources, int[] destinations, Info info, boolean reorder)
 365                                         throws MPIException
 366         {
 367                 MPI.check();
 368 
 369                 return new GraphComm(createDistGraphAdjacent(
 370                                 handle, sources, null, destinations, null,
 371                                 info.handle, reorder, false));
 372         }
 373 
 374         private native long createDistGraphAdjacent(
 375                         long comm, int[] sources, int []sourceweights, int[] destinations,
 376                         int[] distweights, long info, boolean reorder, boolean weighted)
 377                                         throws MPIException;
 378 
 379 
 380         /**
 381          * Perform a prefix reduction on data distributed across the group.
 382          * <p>Java binding of the MPI operation {@code MPI_SCAN}.
 383          * @param sendbuf send buffer array
 384          * @param recvbuf receive buffer array
 385          * @param count   number of items in input buffer
 386          * @param type    data type of each item in input buffer
 387          * @param op      reduce operation
 388          * @throws MPIException Signals that an MPI exception of some sort has occurred.
 389          */
 390         public final void scan(Object sendbuf, Object recvbuf,
 391                         int count, Datatype type, Op op)
 392                                         throws MPIException
 393         {
 394                 MPI.check();
 395 
 396                 int sendoff = 0,
 397                                 recvoff = 0;
 398 
 399                 boolean sdb = false,
 400                                 rdb = false;
 401 
 402                 if(sendbuf instanceof Buffer && !(sdb = ((Buffer)sendbuf).isDirect()))
 403                 {
 404                         sendoff = type.getOffset(sendbuf);
 405                         sendbuf = ((Buffer)sendbuf).array();
 406                 }
 407 
 408                 if(recvbuf instanceof Buffer && !(rdb = ((Buffer)recvbuf).isDirect()))
 409                 {
 410                         recvoff = type.getOffset(recvbuf);
 411                         recvbuf = ((Buffer)recvbuf).array();
 412                 }
 413 
 414                 op.setDatatype(type);
 415 
 416                 scan(handle, sendbuf, sdb, sendoff, recvbuf, rdb, recvoff,
 417                                 count, type.handle, type.baseType, op, op.handle);
 418         }
 419 
 420         /**
 421          * Perform a prefix reduction on data distributed across the group.
 422          * <p>Java binding of the MPI operation {@code MPI_SCAN}
 423          * using {@code MPI_IN_PLACE} instead of the send buffer.
 424          * @param recvbuf receive buffer array
 425          * @param count   number of items in input buffer
 426          * @param type    data type of each item in input buffer
 427          * @param op      reduce operation
 428          * @throws MPIException Signals that an MPI exception of some sort has occurred.
 429          */
 430         public final void scan(Object recvbuf, int count, Datatype type, Op op)
 431                         throws MPIException
 432         {
 433                 MPI.check();
 434                 int recvoff = 0;
 435                 boolean rdb = false;
 436 
 437                 if(recvbuf instanceof Buffer && !(rdb = ((Buffer)recvbuf).isDirect()))
 438                 {
 439                         recvoff = type.getOffset(recvbuf);
 440                         recvbuf = ((Buffer)recvbuf).array();
 441                 }
 442 
 443                 op.setDatatype(type);
 444 
 445                 scan(handle, null, false, 0, recvbuf, rdb, recvoff,
 446                                 count, type.handle, type.baseType, op, op.handle);
 447         }
 448 
 449         private native void scan(
 450                         long comm, Object sendbuf, boolean sdb, int sendoff,
 451                         Object recvbuf, boolean rdb, int recvoff, int count,
 452                         long type, int baseType, Op jOp, long hOp) throws MPIException;
 453 
 454         /**
 455          * Perform a prefix reduction on data distributed across the group.
 456          * <p>Java binding of the MPI operation {@code MPI_ISCAN}.
 457          * @param sendbuf send buffer array
 458          * @param recvbuf receive buffer array
 459          * @param count   number of items in input buffer
 460          * @param type    data type of each item in input buffer
 461          * @param op      reduce operation
 462          * @return communication request
 463          * @throws MPIException Signals that an MPI exception of some sort has occurred.
 464          */
 465         public final Request iScan(Buffer sendbuf, Buffer recvbuf,
 466                         int count, Datatype type, Op op)
 467                                         throws MPIException
 468         {
 469                 MPI.check();
 470                 op.setDatatype(type);
 471                 assertDirectBuffer(sendbuf, recvbuf);
 472                 Request req = new Request(iScan(handle, sendbuf, recvbuf, count,
 473                                 type.handle, type.baseType, op, op.handle));
 474                 req.addSendBufRef(sendbuf);
 475                 req.addRecvBufRef(recvbuf);
 476                 return req;
 477         }
 478 
 479         /**
 480          * Perform a prefix reduction on data distributed across the group.
 481          * <p>Java binding of the MPI operation {@code MPI_ISCAN}
 482          * using {@code MPI_IN_PLACE} instead of the send buffer.
 483          * @param buf   send/receive buffer array
 484          * @param count number of items in buffer
 485          * @param type  data type of each item in buffer
 486          * @param op    reduce operation
 487          * @return communication request
 488          * @throws MPIException Signals that an MPI exception of some sort has occurred.
 489          */
 490         public final Request iScan(Buffer buf, int count, Datatype type, Op op)
 491                         throws MPIException
 492         {
 493                 MPI.check();
 494                 op.setDatatype(type);
 495                 assertDirectBuffer(buf);
 496                 Request req = new Request(iScan(
 497                                 handle, null, buf, count,
 498                                 type.handle, type.baseType, op, op.handle));
 499                 req.addSendBufRef(buf);
 500                 return req;
 501         }
 502 
 503         private native long iScan(
 504                         long comm, Buffer sendbuf, Buffer recvbuf, int count,
 505                         long type, int baseType, Op jOp, long hOp) throws MPIException;
 506 
 507         /**
 508          * Perform a prefix reduction on data distributed across the group.
 509          * <p>Java binding of the MPI operation {@code MPI_EXSCAN}.
 510          * @param sendbuf send buffer array
 511          * @param recvbuf receive buffer array
 512          * @param count   number of items in input buffer
 513          * @param type    data type of each item in input buffer
 514          * @param op      reduce operation
 515          * @throws MPIException Signals that an MPI exception of some sort has occurred.
 516          */
 517         public final void exScan(Object sendbuf, Object recvbuf,
 518                         int count, Datatype type, Op op)
 519                                         throws MPIException
 520         {
 521                 MPI.check();
 522 
 523                 int sendoff = 0,
 524                                 recvoff = 0;
 525 
 526                 boolean sdb = false,
 527                                 rdb = false;
 528 
 529                 if(sendbuf instanceof Buffer && !(sdb = ((Buffer)sendbuf).isDirect()))
 530                 {
 531                         sendoff = type.getOffset(sendbuf);
 532                         sendbuf = ((Buffer)sendbuf).array();
 533                 }
 534 
 535                 if(recvbuf instanceof Buffer && !(rdb = ((Buffer)recvbuf).isDirect()))
 536                 {
 537                         recvoff = type.getOffset(recvbuf);
 538                         recvbuf = ((Buffer)recvbuf).array();
 539                 }
 540 
 541                 op.setDatatype(type);
 542 
 543                 exScan(handle, sendbuf, sdb, sendoff, recvbuf, rdb, recvoff,
 544                                 count, type.handle, type.baseType, op, op.handle);
 545         }
 546 
 547         /**
 548          * Perform a prefix reduction on data distributed across the group.
 549          * <p>Java binding of the MPI operation {@code MPI_EXSCAN}
 550          * using {@code MPI_IN_PLACE} instead of the send buffer.
 551          * @param buf   receive buffer array
 552          * @param count number of items in input buffer
 553          * @param type  data type of each item in input buffer
 554          * @param op    reduce operation
 555          * @throws MPIException Signals that an MPI exception of some sort has occurred.
 556          */
 557         public final void exScan(Object buf, int count, Datatype type, Op op)
 558                         throws MPIException
 559         {
 560                 MPI.check();
 561                 int off = 0;
 562                 boolean db = false;
 563 
 564                 if(buf instanceof Buffer && !(db = ((Buffer)buf).isDirect()))
 565                 {
 566                         off = type.getOffset(buf);
 567                         buf = ((Buffer)buf).array();
 568                 }
 569 
 570                 op.setDatatype(type);
 571 
 572                 exScan(handle, null, false, 0, buf, db, off, count,
 573                                 type.handle, type.baseType, op, op.handle);
 574         }
 575 
 576         private native void exScan(
 577                         long comm, Object sendbuf, boolean sdb, int sendoff,
 578                         Object recvbuf, boolean rdb, int recvoff, int count,
 579                         long type, int baseType, Op jOp, long hOp) throws MPIException;
 580 
 581         /**
 582          * Perform a prefix reduction on data distributed across the group.
 583          * <p>Java binding of the MPI operation {@code MPI_IEXSCAN}.
 584          * @param sendbuf send buffer array
 585          * @param recvbuf receive buffer array
 586          * @param count   number of items in input buffer
 587          * @param type    data type of each item in input buffer
 588          * @param op      reduce operation
 589          * @return communication request
 590          * @throws MPIException Signals that an MPI exception of some sort has occurred.
 591          */
 592         public final Request iExScan(Buffer sendbuf, Buffer recvbuf,
 593                         int count, Datatype type, Op op)
 594                                         throws MPIException
 595         {
 596                 MPI.check();
 597                 op.setDatatype(type);
 598                 assertDirectBuffer(sendbuf, recvbuf);
 599                 Request req = new Request(iExScan(handle, sendbuf, recvbuf, count,
 600                                 type.handle, type.baseType, op, op.handle));
 601                 req.addSendBufRef(sendbuf);
 602                 req.addRecvBufRef(recvbuf);
 603                 return req;
 604         }
 605 
 606         /**
 607          * Perform a prefix reduction on data distributed across the group.
 608          * <p>Java binding of the MPI operation {@code MPI_IEXSCAN}
 609          * using {@code MPI_IN_PLACE} instead of the send buffer.
 610          * @param buf   receive buffer array
 611          * @param count number of items in input buffer
 612          * @param type  data type of each item in input buffer
 613          * @param op    reduce operation
 614          * @return communication request
 615          * @throws MPIException Signals that an MPI exception of some sort has occurred.
 616          */
 617         public final Request iExScan(Buffer buf, int count, Datatype type, Op op)
 618                         throws MPIException
 619         {
 620                 MPI.check();
 621                 op.setDatatype(type);
 622                 assertDirectBuffer(buf);
 623                 Request req = new Request(iExScan(
 624                                 handle, null, buf, count,
 625                                 type.handle, type.baseType, op, op.handle));
 626                 req.addRecvBufRef(buf);
 627                 return req;
 628         }
 629 
 630         private native long iExScan(
 631                         long comm, Buffer sendbuf, Buffer recvbuf, int count,
 632                         long type, int baseType, Op jOp, long hOp) throws MPIException;
 633 
 634         /**
 635          * Java binding of {@code MPI_OPEN_PORT} using {@code MPI_INFO_NULL}.
 636          * @return port name
 637          * @throws MPIException Signals that an MPI exception of some sort has occurred.
 638          */
 639         public static String openPort() throws MPIException
 640         {
 641                 MPI.check();
 642                 return openPort(Info.NULL);
 643         }
 644 
 645         /**
 646          * Java binding of {@code MPI_OPEN_PORT}.
 647          * @param info implementation-specific information
 648          * @return port name
 649          * @throws MPIException Signals that an MPI exception of some sort has occurred.
 650          */
 651         public static String openPort(Info info) throws MPIException
 652         {
 653                 MPI.check();
 654                 return openPort(info.handle);
 655         }
 656 
 657         private native static String openPort(long info) throws MPIException;
 658 
 659         /**
 660          * Java binding of {@code MPI_CLOSE_PORT}.
 661          * @param name port name
 662          * @throws MPIException Signals that an MPI exception of some sort has occurred.
 663          */
 664         public static void closePort(String name) throws MPIException
 665         {
 666                 MPI.check();
 667                 closePort_jni(name);
 668         }
 669 
 670         private native static void closePort_jni(String name) throws MPIException;
 671 
 672         /**
 673          * Java binding of {@code MPI_COMM_ACCEPT} using {@code MPI_INFO_NULL}.
 674          * @param port port name
 675          * @param root rank in comm of root node
 676          * @return intercommunicator with client as remote group
 677          * @throws MPIException Signals that an MPI exception of some sort has occurred.
 678          */
 679         public final Intercomm accept(String port, int root) throws MPIException
 680         {
 681                 MPI.check();
 682                 return new Intercomm(accept(handle, port, Info.NULL, root));
 683         }
 684 
 685         /**
 686          * Java binding of {@code MPI_COMM_ACCEPT}.
 687          * @param port port name
 688          * @param info implementation-specific information
 689          * @param root rank in comm of root node
 690          * @return intercommunicator with client as remote group
 691          * @throws MPIException Signals that an MPI exception of some sort has occurred.
 692          */
 693         public final Intercomm accept(String port, Info info, int root)
 694                         throws MPIException
 695         {
 696                 MPI.check();
 697                 return new Intercomm(accept(handle, port, info.handle, root));
 698         }
 699 
 700         private native long accept(long comm, String port, long info, int root)
 701                         throws MPIException;
 702 
 703         /**
 704          * Java binding of {@code MPI_COMM_CONNECT} using {@code MPI_INFO_NULL}.
 705          * @param port port name
 706          * @param root rank in comm of root node
 707          * @return intercommunicator with server as remote group
 708          * @throws MPIException Signals that an MPI exception of some sort has occurred.
 709          */
 710         public final Intercomm connect(String port, int root) throws MPIException
 711         {
 712                 MPI.check();
 713                 return new Intercomm(connect(handle, port, Info.NULL, root));
 714         }
 715 
 716         /**
 717          * Java binding of {@code MPI_COMM_CONNECT}.
 718          * @param port port name
 719          * @param info implementation-specific information
 720          * @param root rank in comm of root node
 721          * @return intercommunicator with server as remote group
 722          * @throws MPIException Signals that an MPI exception of some sort has occurred.
 723          */
 724         public final Intercomm connect(String port, Info info, int root)
 725                         throws MPIException
 726         {
 727                 MPI.check();
 728                 return new Intercomm(connect(handle, port, info.handle, root));
 729         }
 730 
 731         private native long connect(long comm, String port, long info, int root)
 732                         throws MPIException;
 733 
 734         /**
 735          * Java binding of {@code MPI_PUBLISH_NAME} using {@code MPI_INFO_NULL}.
 736          * @param service service name
 737          * @param port    port name
 738          * @throws MPIException Signals that an MPI exception of some sort has occurred.
 739          */
 740         public static void publishName(String service, String port)
 741                         throws MPIException
 742         {
 743                 MPI.check();
 744                 publishName(service, Info.NULL, port);
 745         }
 746 
 747         /**
 748          * Java binding of {@code MPI_PUBLISH_NAME}.
 749          * @param service service name
 750          * @param info    implementation-specific information
 751          * @param port    port name
 752          * @throws MPIException Signals that an MPI exception of some sort has occurred.
 753          */
 754         public static void publishName(String service, Info info, String port)
 755                         throws MPIException
 756         {
 757                 MPI.check();
 758                 publishName(service, info.handle, port);
 759         }
 760 
 761         private native static void publishName(String service, long info, String port)
 762                         throws MPIException;
 763 
 764         /**
 765          * Java binding of {@code MPI_UNPUBLISH_NAME} using {@code MPI_INFO_NULL}.
 766          * @param service service name
 767          * @param port    port name
 768          * @throws MPIException Signals that an MPI exception of some sort has occurred.
 769          */
 770         public static void unpublishName(String service, String port)
 771                         throws MPIException
 772         {
 773                 MPI.check();
 774                 unpublishName(service, Info.NULL, port);
 775         }
 776 
 777         /**
 778          * Java binding of {@code MPI_UNPUBLISH_NAME}.
 779          * @param service service name
 780          * @param info    implementation-specific information
 781          * @param port    port name
 782          * @throws MPIException Signals that an MPI exception of some sort has occurred.
 783          */
 784         public static void unpublishName(String service, Info info, String port)
 785                         throws MPIException
 786         {
 787                 MPI.check();
 788                 unpublishName(service, info.handle, port);
 789         }
 790 
 791         private native static void unpublishName(String service, long info, String port)
 792                         throws MPIException;
 793 
 794         /**
 795          * Java binding of {@code MPI_LOOKUP_NAME} using {@code MPI_INFO_NULL}.
 796          * @param service service name
 797          * @return port name
 798          * @throws MPIException Signals that an MPI exception of some sort has occurred.
 799          */
 800         public static String lookupName(String service) throws MPIException
 801         {
 802                 MPI.check();
 803                 return lookupName(service, Info.NULL);
 804         }
 805 
 806         /**
 807          * Java binding of {@code MPI_LOOKUP_NAME}.
 808          * @param service service name
 809          * @param info    implementation-specific information
 810          * @return port name
 811          * @throws MPIException Signals that an MPI exception of some sort has occurred.
 812          */
 813         public static String lookupName(String service, Info info) throws MPIException
 814         {
 815                 MPI.check();
 816                 return lookupName(service, info.handle);
 817         }
 818 
 819         private native static String lookupName(String service, long info)
 820                         throws MPIException;
 821 
 822         /**
 823          * Java binding of {@code MPI_COMM_SPAWN}.
 824          * This intracommunicator will contain the group of spawned processes.
 825          * @param command  name of program to be spawned
 826          * @param argv     arguments to command; if this parameter is null,
 827          *                 {@code MPI_ARGV_NULL} will be used.
 828          * @param maxprocs maximum number of processes to start
 829          * @param info     info object telling the runtime where
 830          *                 and how to start the processes
 831          * @param root     rank of process in which previous arguments are examined
 832          * @param errcodes one code per process; if this parameter is null,
 833          *                 {@code MPI_ERRCODES_IGNORE} will be used.
 834          * @return intercommunicator between original group and the newly spawned group
 835          * @throws MPIException Signals that an MPI exception of some sort has occurred.
 836          */
 837         public final Intercomm spawn(String command, String[] argv, int maxprocs,
 838                         Info info, int root, int[] errcodes)
 839                                         throws MPIException
 840         {
 841                 MPI.check();
 842 
 843                 return new Intercomm(spawn(handle, command, argv, maxprocs,
 844                                 info.handle, root, errcodes));
 845         }
 846 
 847         private native long spawn(long comm, String command, String[] argv,
 848                         int maxprocs, long info, int root, int[] errcodes)
 849                                         throws MPIException;
 850 
 851         /**
 852          * Java binding of {@code MPI_COMM_SPAWN_MULTIPLE}.
 853          * This intracommunicator will contain the group of spawned processes.
 854          * @param commands programs to be executed
 855          * @param argv     arguments for commands; if this parameter is null,
 856          *                 {@code MPI_ARGVS_NULL} will be used.
 857          * @param maxprocs maximum number of processes to start for each command
 858          * @param info     info objects telling the runtime where
 859          *                 and how to start the processes
 860          * @param root     rank of process in which previous arguments are examined
 861          * @param errcodes one code per process; if this parameter is null,
 862          *                 {@code MPI_ERRCODES_IGNORE} will be used.
 863          * @return intercommunicator between original group and the newly spawned group
 864          * @throws MPIException Signals that an MPI exception of some sort has occurred.
 865          */
 866         public final Intercomm spawnMultiple(
 867                         String[] commands, String[][] argv, int[] maxprocs,
 868                         Info[] info, int root, int[] errcodes)
 869                                         throws MPIException
 870         {
 871                 MPI.check();
 872 
 873                 long hInfo[] = new long[info.length];
 874 
 875                 for(int i = 0; i < info.length; i++)
 876                         hInfo[i] = info[i].handle;
 877 
 878                 return new Intercomm(spawnMultiple(handle, commands, argv, maxprocs,
 879                                 hInfo, root, errcodes));
 880         }
 881 
 882         private native long spawnMultiple(
 883                         long comm, String[] commands, String[][] argv, int[] maxprocs,
 884                         long[] info, int root, int[] errcodes) throws MPIException;
 885 
 886 } // Intracomm

/* [<][>][^][v][top][bottom][index][help] */