• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // (C) Copyright 2005 The Trustees of Indiana University.
2 // (C) Copyright 2006 Douglas Gregor <doug.gregor -at- gmail.com>
3 
4 // Use, modification and distribution is subject to the Boost Software
5 // License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
6 // http://www.boost.org/LICENSE_1_0.txt)
7 
8 //  Authors: Douglas Gregor
9 
10 /** @file documentation.cpp
11  *
12  *  This file contains all of the documentation strings for the
13  *  Boost.MPI Python bindings.
14  */
15 namespace boost { namespace mpi { namespace python {
16 
17 const char* module_docstring =
18   "The boost.mpi module contains Python wrappers for Boost.MPI.\n"
19   "Boost.MPI is a C++ interface to the Message Passing Interface 1.1,\n"
20   "a high-performance message passing library for parallel programming.\n"
21   "\n"
22   "This module supports the most commonly used subset of MPI 1.1. All\n"
23   "communication operations can transmit any Python object that can be\n"
24   "pickled and unpickled, along with C++-serialized data types and\n"
25   "separation of the structure of a data type from its content.\n"
26   "Collectives that have a user-supplied functions,\n"
27   "such as reduce() or scan(), accept arbitrary Python functions, and\n"
28   "all collectives can operate on any serializable or picklable data type.\n"
29   "\n"
30   "IMPORTANT MODULE DATA\n"
31   "  any_source       This constant may be used for the source parameter of\n"
32   "                   receive and probe operations to indicate that a\n"
33   "                   message may be received from any source.\n"
34   "\n"
35   "  any_tag          This constant may be used for the tag parameter of\n"
36   "                   receive or probe operations to indicate that a send\n"
37   "                   with any tag will be matched.\n"
38   "\n"
39   "  collectives_tag  Returns the reserved tag value used by the Boost.MPI\n"
40   "                   implementation for collective operations. Although\n"
41   "                   users are not permitted to use this tag to send or\n"
42   "                   receive messages with this tag, it may be useful when\n"
43   "                   monitoring communication patterns.\n"
44   "\n"
45   "  host_rank        If there is a host process, this is the rank of that\n"
46   "                   that process. Otherwise, this value will be None. MPI\n"
47   "                   does not define the meaning of a \"host\" process: \n"
48   "                   consult the documentation for your MPI implementation.\n"
49   "\n"
50   "  io_rank          The rank of a process that can perform input/output\n"
51   "                   via the standard facilities. If every process can\n"
52   "                   perform I/O using the standard facilities, this value\n"
53   "                   will be the same as any_source. If no process can\n"
54   "                   perform I/O, this value will be None.\n"
55   "\n"
56   "  max_tag          The maximum value that may be used for the tag\n"
57   "                   parameter of send/receive operations. This value will\n"
58   "                   be somewhat smaller than the value of MPI_TAG_UB,\n"
59   "                   because the Boost.MPI implementation reserves some\n"
60   "                   tags for collective operations.\n"
61   "\n"
62   "  processor_name   The name of this processor. The actual form of the\n"
63   "                   of the name is unspecified, but may be documented by\n"
64   "                   the underlying MPI implementation.\n"
65   "\n"
66   "  rank             The rank of this process in the \"world\" communicator.\n"
67   "\n"
68   "  size             The number of processes in the \"world\" communicator.\n"
69   "                   that process. Otherwise, this value will be None. MPI\n"
70   "                   does not define the meaning of a \"host\" process: \n"
71   "\n"
72   "  world            The \"world\" communicator from which all other\n"
73   "                   communicators will be derived. This is the equivalent\n"
74   "                   of MPI_COMM_WORLD.\n"
75   "\n"
76   "TRANSMITTING USER-DEFINED DATA\n"
77   "  Boost.MPI can transmit user-defined data in several different ways.\n"
78   "  Most importantly, it can transmit arbitrary Python objects by pickling\n"
79   "  them at the sender and unpickling them at the receiver, allowing\n"
80   "  arbitrarily complex Python data structures to interoperate with MPI.\n"
81   "\n"
82   "  Boost.MPI also supports efficient serialization and transmission of\n"
83   "  C++ objects (that have been exposed to Python) through its C++\n"
84   "  interface. Any C++ type that provides (de-)serialization routines that\n"
85   "  meet the requirements of the Boost.Serialization library is eligible\n"
86   "  for this optimization, but the type must be registered in advance. To\n"
87   "  register a C++ type, invoke the C++ function:\n"
88   "    boost::mpi::python::register_serialized\n"
89   "\n"
90   "  Finally, Boost.MPI supports separation of the structure of an object\n"
91   "  from the data it stores, allowing the two pieces to be transmitted\n"
92   "  separately. This \"skeleton/content\" mechanism, described in more\n"
93   "  detail in a later section, is a communication optimization suitable\n"
94   "  for problems with fixed data structures whose internal data changes\n"
95   "  frequently.\n"
96   "\n"
97   "COLLECTIVES\n"
98   "  Boost.MPI supports all of the MPI collectives (scatter, reduce, scan,\n"
99   "  broadcast, etc.) for any type of data that can be transmitted with the\n"
100   "  point-to-point communication operations. For the MPI collectives that\n"
101   "  require a user-specified operation (e.g., reduce and scan), the\n"
102   "  operation can be an arbitrary Python function. For instance, one could\n"
103   "  concatenate strings with all_reduce:\n\n"
104   "    mpi.all_reduce(my_string, lambda x,y: x + y)\n\n"
105   "  The following module-level functions implement MPI collectives:\n"
106   "    all_gather    Gather the values from all processes.\n"
107   "    all_reduce    Combine the results from all processes.\n"
108   "    all_to_all    Every process sends data to every other process.\n"
109   "    broadcast     Broadcast data from one process to all other processes.\n"
110   "    gather        Gather the values from all processes to the root.\n"
111   "    reduce        Combine the results from all processes to the root.\n"
112   "    scan          Prefix reduction of the values from all processes.\n"
113   "    scatter       Scatter the values stored at the root to all processes.\n"
114   "\n"
115   "SKELETON/CONTENT MECHANISM\n"
116   "  Boost.MPI provides a skeleton/content mechanism that allows the\n"
117   "  transfer of large data structures to be split into two separate stages,\n"
118   "  with the `skeleton' (or, `shape') of the data structure sent first and\n"
119   "  the content (or, `data') of the data structure sent later, potentially\n"
120   "  several times, so long as the structure has not changed since the\n"
121   "  skeleton was transferred. The skeleton/content mechanism can improve\n"
122   "  performance when the data structure is large and its shape is fixed,\n"
123   "  because while the skeleton requires serialization (it has an unknown\n"
124   "  size), the content transfer is fixed-size and can be done without\n"
125   "  extra copies.\n"
126   "\n"
127   "  To use the skeleton/content mechanism from Python, you must first\n"
128   "  register the type of your data structure with the skeleton/content\n"
129   "  mechanism *from C++*. The registration function is\n"
130   "    boost::mpi::python::register_skeleton_and_content\n"
131   "  and resides in the <boost/mpi/python.hpp> header.\n"
132   "\n"
133   "  Once you have registered your C++ data structures, you can extract\n"
134   "  the skeleton for an instance of that data structure with skeleton().\n"
135   "  The resulting SkeletonProxy can be transmitted via the normal send\n"
136   "  routine, e.g.,\n\n"
137   "    mpi.world.send(1, 0, skeleton(my_data_structure))\n\n"
138   "  SkeletonProxy objects can be received on the other end via recv(),\n"
139   "  which stores a newly-created instance of your data structure with the\n"
140   "  same `shape' as the sender in its `object' attribute:\n\n"
141   "    shape = mpi.world.recv(0, 0)\n"
142   "    my_data_structure = shape.object\n\n"
143   "  Once the skeleton has been transmitted, the content (accessed via \n"
144   "  get_content) can be transmitted in much the same way. Note, however,\n"
145   "  that the receiver also specifies get_content(my_data_structure) in its\n"
146   "  call to receive:\n\n"
147   "    if mpi.rank == 0:\n"
148   "      mpi.world.send(1, 0, get_content(my_data_structure))\n"
149   "    else:\n"
150   "      mpi.world.recv(0, 0, get_content(my_data_structure))\n\n"
151   "  Of course, this transmission of content can occur repeatedly, if the\n"
152   "  values in the data structure--but not its shape--changes.\n"
153   "\n"
154   "  The skeleton/content mechanism is a structured way to exploit the\n"
155   "  interaction between custom-built MPI datatypes and MPI_BOTTOM, to\n"
156   "  eliminate extra buffer copies.\n"
157   "\n"
158   "C++/PYTHON MPI COMPATIBILITY\n"
159   "  Boost.MPI is a C++ library whose facilities have been exposed to Python\n"
160   "  via the Boost.Python library. Since the Boost.MPI Python bindings are\n"
161   "  build directly on top of the C++ library, and nearly every feature of\n"
162   "  C++ library is available in Python, hybrid C++/Python programs using\n"
163   "  Boost.MPI can interact, e.g., sending a value from Python but receiving\n"
164   "  that value in C++ (or vice versa). However, doing so requires some\n"
165   "  care. Because Python objects are dynamically typed, Boost.MPI transfers\n"
166   "  type information along with the serialized form of the object, so that\n"
167   "  the object can be received even when its type is not known. This\n"
168   "  mechanism differs from its C++ counterpart, where the static types of\n"
169   "  transmitted values are always known.\n"
170   "\n"
171   "  The only way to communicate between the C++ and Python views on \n"
172   "  Boost.MPI is to traffic entirely in Python objects. For Python, this is\n"
173   "  the normal state of affairs, so nothing will change. For C++, this\n"
174   "  means sending and receiving values of type boost::python::object, from\n"
175   "  the Boost.Python library. For instance, say we want to transmit an\n"
176   "  integer value from Python:\n\n"
177   "    comm.send(1, 0, 17)\n\n"
178   "  In C++, we would receive that value into a Python object and then\n"
179   "  `extract' an integer value:\n\n"
180   "    boost::python::object value;\n"
181   "    comm.recv(0, 0, value);\n"
182   "    int int_value = boost::python::extract<int>(value);\n\n"
183   "  In the future, Boost.MPI will be extended to allow improved\n"
184   "  interoperability with the C++ Boost.MPI and the C MPI bindings.\n"
185   ;
186 
187 /***********************************************************
188  * environment documentation                               *
189  ***********************************************************/
190 const char* environment_init_docstring =
191   "Initialize the MPI environment. Users should not need to call\n"
192   "this function directly, because the MPI environment will be\n"
193   "automatically initialized when the Boost.MPI module is loaded.\n";
194 
195 const char* environment_finalize_docstring =
196   "Finalize (shut down) the MPI environment. Users only need to\n"
197   "invoke this function if MPI should be shut down before program\n"
198   "termination. Boost.MPI will automatically finalize the MPI\n"
199   "environment when the program exits.\n";
200 
201 const char* environment_abort_docstring =
202   "Aborts all MPI processes and returns to the environment. The\n"
203   "precise behavior will be defined by the underlying MPI\n"
204   "implementation. This is equivalent to a call to MPI_Abort with\n"
205   "MPI_COMM_WORLD.\n"
206   "errcode is the error code to return from aborted processes.\n";
207 
208 const char* environment_initialized_docstring =
209  "Determine if the MPI environment has already been initialized.\n";
210 
211 const char* environment_finalized_docstring =
212   "Determine if the MPI environment has already been finalized.\n";
213 
214 /***********************************************************
215  * nonblocking documentation                               *
216  ***********************************************************/
217 const char* request_list_init_docstring=
218   "Without arguments, constructs an empty RequestList.\n"
219   "With one argument `iterable', copies request objects from this\n"
220   "iterable to the new RequestList.\n";
221 
222 const char* nonblocking_wait_any_docstring =
223   "Waits until any of the given requests has been completed. It provides\n"
224   "functionality equivalent to MPI_Waitany.\n"
225   "\n"
226   "requests must be a RequestList instance.\n"
227   "\n"
228   "Returns a triple (value, status, index) consisting of received value\n"
229   "(or None), the Status object for the completed request, and its index\n"
230   "in the RequestList.\n";
231 
232 const char* nonblocking_test_any_docstring =
233   "Tests if any of the given requests have been completed, but does not wait\n"
234   "for completion. It provides functionality equivalent to MPI_Testany.\n"
235   "\n"
236   "requests must be a RequestList instance.\n"
237   "\n"
238   "Returns a triple (value, status, index) like wait_any or None if no request\n"
239   "is complete.\n";
240 
241 const char* nonblocking_wait_all_docstring =
242   "Waits until all of the given requests have been completed. It provides\n"
243   "functionality equivalent to MPI_Waitall.\n"
244   "\n"
245   "requests must be a RequestList instance.\n"
246   "\n"
247   "If the second parameter `callable' is provided, it is called with each\n"
248   "completed request's received value (or None) and it s Status object as\n"
249   "its arguments. The calls occur in the order given by the `requests' list.\n";
250 
251 const char* nonblocking_test_all_docstring =
252   "Tests if all of the given requests have been completed. It provides\n"
253   "functionality equivalent to MPI_Testall.\n"
254   "\n"
255   "Returns True if all requests have been completed.\n"
256   "\n"
257   "requests must be a RequestList instance.\n"
258   "\n"
259   "If the second parameter `callable' is provided, it is called with each\n"
260   "completed request's received value (or None) and it s Status object as\n"
261   "its arguments. The calls occur in the order given by the `requests' list.\n";
262 
263 const char* nonblocking_wait_some_docstring =
264   "Waits until at least one of the given requests has completed. It\n"
265   "then completes all of the requests it can, partitioning the input\n"
266   "sequence into pending requests followed by completed requests.\n"
267   "\n"
268   "This routine provides functionality equivalent to MPI_Waitsome.\n"
269   "\n"
270   "Returns the index of the first completed request."
271   "\n"
272   "requests must be a RequestList instance.\n"
273   "\n"
274   "If the second parameter `callable' is provided, it is called with each\n"
275   "completed request's received value (or None) and it s Status object as\n"
276   "its arguments. The calls occur in the order given by the `requests' list.\n";
277 
278 const char* nonblocking_test_some_docstring =
279   "Tests to see if any of the given requests has completed. It completes\n"
280   "all of the requests it can, partitioning the input sequence into pending\n"
281   "requests followed by completed requests. This routine is similar to\n"
282   "wait_some, but does not wait until any requests have completed.\n"
283   "\n"
284   "This routine provides functionality equivalent to MPI_Testsome.\n"
285   "\n"
286   "Returns the index of the first completed request."
287   "\n"
288   "requests must be a RequestList instance.\n"
289   "\n"
290   "If the second parameter `callable' is provided, it is called with each\n"
291   "completed request's received value (or None) and it s Status object as\n"
292   "its arguments. The calls occur in the order given by the `requests' list.\n";
293 
294 /***********************************************************
295  * exception documentation                                 *
296  ***********************************************************/
297 const char* exception_docstring =
298   "Instances of this class will be thrown when an MPI error\n"
299   "occurs. MPI failures that trigger these exceptions may or may not\n"
300   "be recoverable, depending on the underlying MPI implementation.\n"
301   "Consult the documentation for your MPI implementation to determine\n"
302   "the effect of MPI errors.\n";
303 
304 const char* exception_what_docstring =
305   "A description of the error that occured. At present, this refers\n"
306   "only to the name of the MPI routine that failed.\n";
307 
308 const char* exception_routine_docstring =
309   "The name of the MPI routine that reported the error.\n";
310 
311 const char* exception_result_code_docstring =
312   "The result code returned from the MPI routine that reported the\n"
313   "error.\n";
314 
315 /***********************************************************
316  * collectives documentation                               *
317  ***********************************************************/
318 const char* all_gather_docstring =
319   "all_gather is a collective algorithm that collects the values\n"
320   "stored at each process into a tuple of values indexed by the\n"
321   "process number they came from. all_gather is (semantically) a\n"
322   "gather followed by a broadcast. The same tuple of values is\n"
323   "returned to all processes.\n";
324 
325 const char* all_reduce_docstring =
326   "all_reduce is a collective algorithm that combines the values\n"
327   "stored by each process into a single value. The values can be\n"
328   "combined arbitrarily, specified via any function. The values\n"
329   "a1, a2, .., ap provided by p processors will be combined by the\n"
330   "binary function op into the result\n"
331   "         op(a1, op(a2, ... op(ap-1,ap)))\n"
332   "that will be returned to all processes. This function is the\n"
333   "equivalent of calling all_gather() and then applying the built-in\n"
334   "reduce() function to the returned sequence. op is assumed to be\n"
335   "associative.\n";
336 
337 const char* all_to_all_docstring =
338   "all_to_all is a collective algorithm that transmits values from\n"
339   "every process to every other process. On process i, the jth value\n"
340   "of the values sequence is sent to process j and placed in the ith\n"
341   "position of the tuple that will be returned from all_to_all.\n";
342 
343 const char* broadcast_docstring =
344   "broadcast is a collective algorithm that transfers a value from an\n"
345   "arbitrary root process to every other process that is part of the\n"
346   "given communicator (comm). The root parameter must be the same for\n"
347   "every process. The value parameter need only be specified at the root\n"
348   "root. broadcast() returns the same broadcasted value to every process.\n";
349 
350 const char* gather_docstring =
351   "gather is a collective algorithm that collects the values\n"
352   "stored at each process into a tuple of values at the root\n"
353   "process. This tuple is indexed by the process number that the\n"
354   "value came from, and will be returned only by the root process.\n"
355   "All other processes return None.\n";
356 
357 const char* reduce_docstring =
358   "reduce is a collective algorithm that combines the values\n"
359   "stored by each process into a single value at the root. The\n"
360   "values can be combined arbitrarily, specified via any function.\n"
361   "The values a1, a2, .., ap provided by p processors will be\n"
362   "combined by the binary function op into the result\n"
363   "     op(a1, op(a2, ... op(ap-1,ap)))\n"
364   "that will be returned on the root process. This function is the\n"
365   "equivalent of calling gather() to the root and then applying the\n"
366   "built-in reduce() function to the returned sequence. All non-root\n"
367   "processes return None. op is assumed to be associative.\n";
368 
369 const char* scan_docstring =
370   "@c scan computes a prefix reduction of values from all processes.\n"
371   "It is a collective algorithm that combines the values stored by\n"
372   "each process with the values of all processes with a smaller rank.\n"
373   "The values can be arbitrarily combined, specified via a binary\n"
374   "function op. If each process i provides the value ai, then scan\n"
375   "returns op(a1, op(a2, ... op(ai-1, ai))) to the ith process. op is\n"
376   "assumed to be associative. This routine is the equivalent of an\n"
377   "all_gather(), followed by a built-in reduce() on the first i+1\n"
378   "values in the resulting sequence on processor i. op is assumed\n"
379   "to be associative.\n";
380 
381 const char* scatter_docstring =
382   "scatter is a collective algorithm that scatters the values stored\n"
383   "in the root process (as a container with comm.size elements) to\n"
384   "all of the processes in the communicator. The values parameter \n"
385   "(only significant at the root) is indexed by the process number to\n"
386   "which the corresponding value will be sent. The value received by \n"
387   "each process is returned from scatter.\n";
388 
389 /***********************************************************
390  * communicator documentation                              *
391  ***********************************************************/
392 const char* communicator_docstring =
393  "The Communicator class abstracts a set of communicating\n"
394  "processes in MPI. All of the processes that belong to a certain\n"
395  "communicator can determine the size of the communicator, their rank\n"
396  "within the communicator, and communicate with any other processes\n"
397  "in the communicator.\n";
398 
399 const char* communicator_default_constructor_docstring =
400   "Build a new Boost.MPI Communicator instance for MPI_COMM_WORLD.\n";
401 
402 const char* communicator_rank_docstring =
403   "Returns the rank of the process in the communicator, which will be a\n"
404   "value in [0, size).\n";
405 
406 const char* communicator_size_docstring =
407   "Returns the number of processes in the communicator.\n";
408 
409 const char* communicator_send_docstring =
410   "This routine executes a potentially blocking send with the given\n"
411   "tag to the process with rank dest. It can be received by the\n"
412   "destination process with a matching recv call. The value will be\n"
413   "transmitted in one of several ways:\n"
414   "\n"
415   "  - For C++ objects registered via register_serialized(), the value\n"
416   "    will be serialized and transmitted.\n"
417   "\n"
418   "  - For SkeletonProxy objects, the skeleton of the object will be\n"
419   "    serialized and transmitted.\n"
420   "\n"
421   "  - For Content objects, the content will be transmitted directly.\n"
422   "    This content can be received by a matching recv/irecv call that\n"
423   "    provides a suitable `buffer' argument.\n"
424   "\n"
425   "  - For all other Python objects, the value will be pickled and\n"
426   "    transmitted.\n";
427 
428 const char* communicator_recv_docstring =
429   "This routine blocks until it receives a message from the process\n"
430   "source with the given tag. If the source parameter is not specified,\n"
431   "the message can be received from any process. Likewise, if the tag\n"
432   "parameter is not specified, a message with any tag can be received.\n"
433   "If return_status is True, returns a tuple containing the received\n"
434   "object followed by a Status object describing the communication.\n"
435   "Otherwise, recv() returns just the received object.\n"
436   "\n"
437   "When receiving the content of a data type that has been sent separately\n"
438   "from its skeleton, user code must provide a value for the `buffer'\n"
439   "argument. This value should be the Content object returned from\n"
440   "get_content().\n";
441 
442 const char* communicator_isend_docstring =
443   "This routine executes a nonblocking send with the given\n"
444   "tag to the process with rank dest. It can be received by the\n"
445   "destination process with a matching recv call. The value will be\n"
446   "transmitted in the same way as with send().\n"
447   "This routine returns a Request object, which can be used to query\n"
448   "when the transmission has completed, wait for its completion, or\n"
449   "cancel the transmission.\n";
450 
451 const char* communicator_irecv_docstring =
452   "This routine initiates a non-blocking receive from the process\n"
453   "source with the given tag. If the source parameter is not specified,\n"
454   "the message can be received from any process. Likewise, if the tag\n"
455   "parameter is not specified, a message with any tag can be received.\n"
456   "This routine returns a Request object, which can be used to query\n"
457   "when the transmission has completed, wait for its completion, or\n"
458   "cancel the transmission. The received value be accessible\n"
459   "through the `value' attribute of the Request object once transmission\n"
460   "has completed.\n"
461   "\n"
462   "As with the recv() routine, when receiving the content of a data type\n"
463   "that has been sent separately from its skeleton, user code must provide\n"
464   "a value for the `buffer' argument. This value should be the Content\n"
465   "object returned from get_content().\n";
466 
467  const char* communicator_probe_docstring =
468   "This operation waits until a message matching (source, tag)\n"
469   "is available to be received. It then returns information about\n"
470   "that message. If source is omitted, a message from any process\n"
471   "will match. If tag is omitted, a message with any tag will match.\n"
472   "The actual source and tag can be retrieved from the returned Status\n"
473   "object. To check if a message is available without blocking, use\n"
474   "iprobe.\n";
475 
476 const char* communicator_iprobe_docstring =
477   "This operation determines if a message matching (source, tag) is\n"
478   "available to be received. If so, it returns information about that\n"
479   "message; otherwise, it returns None. If source is omitted, a message\n"
480   "from any process will match. If tag is omitted, a message with any\n"
481   "tag will match. The actual source and tag can be retrieved from the\n"
482   "returned Status object. To wait for a message to become available, use\n"
483   "probe.\n";
484 
485 const char* communicator_barrier_docstring =
486   "Wait for all processes within a communicator to reach the\n"
487   "barrier.\n";
488 
489 const char* communicator_split_docstring =
490   "Split the communicator into multiple, disjoint communicators\n"
491   "each of which is based on a particular color. This is a\n"
492   "collective operation that returns a new communicator that is a\n"
493   "subgroup of this. This routine is functionally equivalent to\n"
494   "MPI_Comm_split.\n\n"
495   "color is the color of this process. All processes with the\n"
496   "same color value will be placed into the same group.\n\n"
497   "If provided, key is a key value that will be used to determine\n"
498   "the ordering of processes with the same color in the resulting\n"
499   "communicator. If omitted, the key will default to the rank of\n"
500   "the process in the current communicator.\n\n"
501   "Returns a new Communicator instance containing all of the \n"
502   "processes in this communicator that have the same color.\n";
503 
504 const char* communicator_abort_docstring =
505   "Makes a \"best attempt\" to abort all of the tasks in the group of\n"
506   "this communicator. Depending on the underlying MPI\n"
507   "implementation, this may either abort the entire program (and\n"
508   "possibly return errcode to the environment) or only abort\n"
509   "some processes, allowing the others to continue. Consult the\n"
510   "documentation for your MPI implementation. This is equivalent to\n"
511   "a call to MPI_Abort\n\n"
512   "errcode is the error code to return from aborted processes.\n";
513 
514 /***********************************************************
515  * request documentation                                   *
516  ***********************************************************/
517 const char* request_docstring =
518   "The Request class contains information about a non-blocking send\n"
519   "or receive and will be returned from isend or irecv, respectively.\n"
520   "When a Request object represents a completed irecv, the `value' \n"
521   "attribute will contain the received value.\n";
522 
523 const char* request_with_value_docstring =
524   "This class is an implementation detail. Any call that accepts a\n"
525   "Request also accepts a RequestWithValue, and vice versa.\n";
526 
527 const char* request_wait_docstring =
528   "Wait until the communication associated with this request has\n"
529   "completed. For a request that is associated with an isend(), returns\n"
530   "a Status object describing the communication. For an irecv()\n"
531   "operation, returns the received value by default. However, when\n"
532   "return_status=True, a (value, status) pair is returned by a\n"
533   "completed irecv request.\n";
534 
535 const char* request_test_docstring =
536   "Determine whether the communication associated with this request\n"
537   "has completed successfully. If so, returns the Status object\n"
538   "describing the communication (for an isend request) or a tuple\n"
539   "containing the received value and a Status object (for an irecv\n"
540   "request). Note that once test() returns a Status object, the\n"
541   "request has completed and wait() should not be called.\n";
542 
543 const char* request_cancel_docstring =
544   "Cancel a pending communication, assuming it has not already been\n"
545   "completed.\n";
546 
547 const char* request_value_docstring =
548   "If this request originated in an irecv(), this property makes the"
549   "sent value accessible once the request completes.\n"
550   "\n"
551   "If no value is available, ValueError is raised.\n";
552 
553 /***********************************************************
554  * skeleton/content documentation                          *
555  ***********************************************************/
556 const char* object_without_skeleton_docstring =
557   "The ObjectWithoutSkeleton class is an exception class used only\n"
558   "when the skeleton() or get_content() function is called with an\n"
559   "object that is not supported by the skeleton/content mechanism.\n"
560   "All C++ types for which skeletons and content can be transmitted\n"
561   "must be registered with the C++ routine:\n"
562   "  boost::mpi::python::register_skeleton_and_content\n";
563 
564 const char* object_without_skeleton_object_docstring =
565   "The object on which skeleton() or get_content() was invoked.\n";
566 
567 const char* skeleton_proxy_docstring =
568   "The SkeletonProxy class is used to represent the skeleton of an\n"
569   "object. The SkeletonProxy can be used as the value parameter of\n"
570   "send() or isend() operations, but instead of transmitting the\n"
571   "entire object, only its skeleton (\"shape\") will be sent, without\n"
572   "the actual data. Its content can then be transmitted, separately.\n"
573   "\n"
574   "User code cannot generate SkeletonProxy instances directly. To\n"
575   "refer to the skeleton of an object, use skeleton(object). Skeletons\n"
576   "can also be received with the recv() and irecv() methods.\n"
577   "\n"
578   "Note that the skeleton/content mechanism can only be used with C++\n"
579   "types that have been explicitly registered.\n";
580 
581 const char* skeleton_proxy_object_docstring =
582   "The actual object whose skeleton is represented by this proxy object.\n";
583 
584 const char* content_docstring =
585   "The content is a proxy class that represents the content of an object,\n"
586   "which can be separately sent or received from its skeleton.\n"
587   "\n"
588   "User code cannot generate content instances directly. Call the\n"
589   "get_content() routine to retrieve the content proxy for a particular\n"
590   "object. The content instance can be used with any of the send() or\n"
591   "recv() variants. Note that get_content() can only be used with C++\n"
592   "data types that have been explicitly registered with the Python\n"
593   "skeleton/content mechanism.\n";
594 
595 const char* skeleton_docstring =
596   "The skeleton function retrieves the SkeletonProxy for its object\n"
597   "parameter, allowing the transmission of the skeleton (or \"shape\")\n"
598   "of the object separately from its data. The skeleton/content mechanism\n"
599   "is useful when a large data structure remains structurally the same\n"
600   "throughout a computation, but its content (i.e., the values in the\n"
601   "structure) changes several times. Tranmission of the content part does\n"
602   "not require any serialization or unnecessary buffer copies, so it is\n"
603   "very efficient for large data structures.\n"
604   "\n"
605   "Only C++ types that have been explicitly registered with the Boost.MPI\n"
606   "Python library can be used with the skeleton/content mechanism. Use:\b"
607   "  boost::mpi::python::register_skeleton_and_content\n";
608 
609 const char* get_content_docstring =
610   "The get_content function retrieves the content for its object parameter,\n"
611   "allowing the transmission of the data in a data structure separately\n"
612   "from its skeleton (or \"shape\"). The skeleton/content mechanism\n"
613   "is useful when a large data structure remains structurally the same\n"
614   "throughout a computation, but its content (i.e., the values in the\n"
615   "structure) changes several times. Tranmission of the content part does\n"
616   "not require any serialization or unnecessary buffer copies, so it is\n"
617   "very efficient for large data structures.\n"
618   "\n"
619   "Only C++ types that have been explicitly registered with the Boost.MPI\n"
620   "Python library can be used with the skeleton/content mechanism. Use:\b"
621   "  boost::mpi::python::register_skeleton_and_content\n";
622 
623 /***********************************************************
624  * status documentation                                    *
625  ***********************************************************/
626 const char* status_docstring =
627   "The Status class stores information about a given message, including\n"
628   "its source, tag, and whether the message transmission was cancelled\n"
629   "or resulted in an error.\n";
630 
631 const char* status_source_docstring =
632   "The source of the incoming message.\n";
633 
634 const char* status_tag_docstring =
635   "The tag of the incoming message.\n";
636 
637 const char* status_error_docstring =
638   "The error code associated with this transmission.\n";
639 
640 const char* status_cancelled_docstring =
641   "Whether this transmission was cancelled.\n";
642 
643 /***********************************************************
644  * timer documentation                                     *
645  ***********************************************************/
646 const char* timer_docstring =
647   "The Timer class is a simple wrapper around the MPI timing facilities.\n";
648 
649 const char* timer_default_constructor_docstring =
650   "Initializes the timer. After this call, elapsed == 0.\n";
651 
652 const char* timer_restart_docstring =
653   "Restart the timer, after which elapsed == 0.\n";
654 
655 const char* timer_elapsed_docstring =
656   "The time elapsed since initialization or the last restart(),\n"
657   "whichever is more recent.\n";
658 
659 const char* timer_elapsed_min_docstring =
660   "Returns the minimum non-zero value that elapsed may return\n"
661   "This is the resolution of the timer.\n";
662 
663 const char* timer_elapsed_max_docstring =
664   "Return an estimate of the maximum possible value of elapsed. Note\n"
665   "that this routine may return too high a value on some systems.\n";
666 
667 const char* timer_time_is_global_docstring =
668   "Determines whether the elapsed time values are global times or\n"
669   "local processor times.\n";
670 
671 } } } // end namespace boost::mpi::python
672