• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #ifndef TENSORFLOW_C_EAGER_C_API_H_
17 #define TENSORFLOW_C_EAGER_C_API_H_
18 
19 // C API extensions to experiment with eager execution of kernels.
20 // WARNING: Unlike tensorflow/c/c_api.h, the API here is not guaranteed to be
21 // stable and can change without notice.
22 
23 #include "tensorflow/c/c_api.h"
24 
25 // Macro to control visibility of exported symbols in the shared library (.so,
26 // .dylib, .dll).
27 // This duplicates the TF_EXPORT macro definition in
28 // tensorflow/core/platform/macros.h in order to keep this .h file independent
29 // of any other includes.$a
30 #ifdef SWIG
31 #define TF_CAPI_EXPORT
32 #else
33 #if defined(_WIN32)
34 #ifdef TF_COMPILE_LIBRARY
35 #define TF_CAPI_EXPORT __declspec(dllexport)
36 #else
37 #define TF_CAPI_EXPORT __declspec(dllimport)
38 #endif  // TF_COMPILE_LIBRARY
39 #else
40 #define TF_CAPI_EXPORT __attribute__((visibility("default")))
41 #endif  // _WIN32
42 #endif  // SWIG
43 
44 #ifdef __cplusplus
45 extern "C" {
46 #endif
47 
48 typedef struct TFE_ContextOptions TFE_ContextOptions;
49 
50 // Return a new options object.
51 TF_CAPI_EXPORT extern TFE_ContextOptions* TFE_NewContextOptions(void);
52 
53 // Set the config in TF_ContextOptions.options.
54 // config should be a serialized tensorflow.ConfigProto proto.
55 // If config was not parsed successfully as a ConfigProto, record the
56 // error information in *status.
57 TF_CAPI_EXPORT extern void TFE_ContextOptionsSetConfig(
58     TFE_ContextOptions* options, const void* proto, size_t proto_len,
59     TF_Status* status);
60 
61 // Controls how to act when we try to run an operation on a given device but
62 // some input tensors are not on that device.
63 typedef enum TFE_ContextDevicePlacementPolicy {
64   // Running operations with input tensors on the wrong device will fail.
65   TFE_DEVICE_PLACEMENT_EXPLICIT = 0,
66   // Copy the tensor to the right device but log a warning.
67   TFE_DEVICE_PLACEMENT_WARN = 1,
68   // Silently copy the tensor, which has a performance cost since the operation
69   // will be blocked till the copy completes. This is the default placement
70   // policy.
71   TFE_DEVICE_PLACEMENT_SILENT = 2,
72   // Placement policy which silently copies int32 tensors but not other dtypes.
73   TFE_DEVICE_PLACEMENT_SILENT_FOR_INT32 = 3,
74 } TFE_ContextDevicePlacementPolicy;
75 
76 // Sets the default execution mode (sync/async). Note that this can be
77 // overridden per thread using TFE_ContextSetAsyncForThread.
78 TF_CAPI_EXPORT extern void TFE_ContextOptionsSetAsync(TFE_ContextOptions*,
79                                                       unsigned char enable);
80 
81 TF_CAPI_EXPORT extern void TFE_ContextOptionsSetDevicePlacementPolicy(
82     TFE_ContextOptions*, TFE_ContextDevicePlacementPolicy);
83 
84 // Destroy an options object.
85 TF_CAPI_EXPORT extern void TFE_DeleteContextOptions(TFE_ContextOptions*);
86 
87 // "Context" under which operations/functions are executed. It encapsulates
88 // things like the available devices, resource manager etc.
89 //
90 // TODO(ashankar): Merge with TF_Session?
91 typedef struct TFE_Context TFE_Context;
92 
93 TF_CAPI_EXPORT extern TFE_Context* TFE_NewContext(
94     const TFE_ContextOptions* opts, TF_Status* status);
95 TF_CAPI_EXPORT extern void TFE_DeleteContext(TFE_Context* ctx);
96 TF_CAPI_EXPORT extern TF_DeviceList* TFE_ContextListDevices(TFE_Context* ctx,
97                                                             TF_Status* status);
98 
99 // Clears the internal caches in the TFE context. Useful when reseeding random
100 // ops.
101 TF_CAPI_EXPORT extern void TFE_ContextClearCaches(TFE_Context* ctx,
102                                                   TF_Status* status);
103 
104 // Sets a thread-local device placement policy. After this call, other calls to
105 // TFE_Execute in the same thread will use the device policy specified here
106 // instead of the device policy used to construct the context. This has no
107 // effect on the device policy used by other program threads.
108 TF_CAPI_EXPORT extern void TFE_ContextSetThreadLocalDevicePlacementPolicy(
109     TFE_Context*, TFE_ContextDevicePlacementPolicy);
110 
111 // Returns the device placement policy to be used by this context in the current
112 // thread.
113 TF_CAPI_EXPORT extern TFE_ContextDevicePlacementPolicy
114 TFE_ContextGetDevicePlacementPolicy(TFE_Context*);
115 
116 // Overrides the execution mode (sync/async) for the current thread.
117 TF_CAPI_EXPORT extern void TFE_ContextSetAsyncForThread(TFE_Context*,
118                                                         unsigned char enable,
119                                                         TF_Status* status);
120 
121 // A tensorflow.ServerDef specifies remote workers (in addition to the current
122 // workers name). Operations created on this context can then be executed on
123 // any of these remote workers by setting an appropriate device.
124 //
125 // If the following is set, all servers identified by the
126 // ServerDef must be up when the context is created.
127 TF_CAPI_EXPORT extern void TFE_ContextSetServerDef(TFE_Context* ctx,
128                                                    int keep_alive_secs,
129                                                    const void* proto,
130                                                    size_t proto_len,
131                                                    TF_Status* status);
132 
133 // Causes the calling thread to block till all ops dispatched in async mode
134 // have been executed. Note that "execution" here refers to kernel execution /
135 // scheduling of copies, etc. Similar to sync execution, it doesn't guarantee
136 // that lower level device queues (like GPU streams) have been flushed.
137 //
138 // This call may not block for execution of ops enqueued concurrently with this
139 // call.
140 TF_CAPI_EXPORT extern void TFE_ContextAsyncWait(TFE_Context*,
141                                                 TF_Status* status);
142 
143 // When an error happens, any pending operations are discarded and newly issued
144 // ops return an error. This call clears the error state and re-enables
145 // execution of newly issued ops.
146 //
147 // Note that outputs of discarded ops remain in a corrupt state and should not
148 // be used for future calls.
149 // TODO(agarwal): mark the affected handles and raise errors if they are used.
150 TF_CAPI_EXPORT extern void TFE_ContextAsyncClearError(TFE_Context*);
151 
152 // A handle to a tensor on a device.
153 //
154 // Like a TF_Tensor, a TFE_TensorHandle refers to a tensor with a value, shape,
155 // type etc. Unlike a TF_Tensor, a TFE_TensorHandle may refer to such tensors
156 // placed in memory of different devices or remote address spaces.
157 typedef struct TFE_TensorHandle TFE_TensorHandle;
158 
159 TF_CAPI_EXPORT extern TFE_TensorHandle* TFE_NewTensorHandle(TF_Tensor* t,
160                                                             TF_Status* status);
161 // Indicates that the caller will not be using `h` any more.
162 TF_CAPI_EXPORT extern void TFE_DeleteTensorHandle(TFE_TensorHandle* h);
163 TF_CAPI_EXPORT extern TF_DataType TFE_TensorHandleDataType(TFE_TensorHandle* h);
164 // This function will block till the operation that produces `h` has completed.
165 TF_CAPI_EXPORT extern int TFE_TensorHandleNumDims(TFE_TensorHandle* h,
166                                                   TF_Status* status);
167 TF_CAPI_EXPORT extern int64_t TFE_TensorHandleNumElements(TFE_TensorHandle* h,
168                                                           TF_Status* status);
169 // This function will block till the operation that produces `h` has completed.
170 TF_CAPI_EXPORT extern int64_t TFE_TensorHandleDim(TFE_TensorHandle* h,
171                                                   int dim_index,
172                                                   TF_Status* status);
173 
174 // Returns the device of the operation that produced `h`. If `h` was produced by
175 // a copy, returns the destination device of the copy. Note that the returned
176 // device name is not always the device holding the tensor handle's memory. If
177 // you want the latter, use TFE_TensorHandleBackingDeviceName. This function
178 // will block till the operation that produces `h` has completed.
179 TF_CAPI_EXPORT extern const char* TFE_TensorHandleDeviceName(
180     TFE_TensorHandle* h, TF_Status* status);
181 
182 // Returns the name of the device in whose memory `h` resides.
183 //
184 // This function will block till the operation that produces `h` has completed.
185 TF_CAPI_EXPORT extern const char* TFE_TensorHandleBackingDeviceName(
186     TFE_TensorHandle* h, TF_Status* status);
187 
188 // Return a pointer to a new TFE_TensorHandle that shares the underlying tensor
189 // with `h`. On success, `status` is set to OK. On failure, `status` reflects
190 // the error and a nullptr is returned.
191 TF_CAPI_EXPORT extern TFE_TensorHandle* TFE_TensorHandleCopySharingTensor(
192     TFE_TensorHandle* h, TF_Status* status);
193 
194 // This function will block till the operation that produces `h` has
195 // completed. The memory returned might alias the internal memory used by
196 // TensorFlow. Hence, callers should not mutate this memory (for example by
197 // modifying the memory region pointed to by TF_TensorData() on the returned
198 // TF_Tensor).
199 TF_CAPI_EXPORT extern TF_Tensor* TFE_TensorHandleResolve(TFE_TensorHandle* h,
200                                                          TF_Status* status);
201 
202 // Create a new TFE_TensorHandle with the same contents as 'h' but placed
203 // in the memory of the device name 'device_name'.
204 // If source and destination are the same device, then this creates a new handle
205 // that shares the underlying buffer. Otherwise, it currently requires at least
206 // one of the source or destination devices to be CPU (i.e., for the source or
207 // destination tensor to be placed in host memory).
208 // If async execution is enabled, the copy may be enqueued and the call will
209 // return "non-ready" handle. Else, this function returns after the copy has
210 // been done.
211 TF_CAPI_EXPORT extern TFE_TensorHandle* TFE_TensorHandleCopyToDevice(
212     TFE_TensorHandle* h, TFE_Context* ctx, const char* device_name,
213     TF_Status* status);
214 
215 // Debugging/Profiling information for TFE_TensorHandle
216 //
217 // TFE_TensorDebugInfo contains information useful for debugging and
218 // profiling tensors.
219 typedef struct TFE_TensorDebugInfo TFE_TensorDebugInfo;
220 
221 // Retrieves TFE_TensorDebugInfo for `handle`.
222 // If TFE_TensorHandleTensorDebugInfo succeeds, `status` is set to OK and caller
223 // is responsible for deleting returned TFE_TensorDebugInfo.
224 // If TFE_TensorHandleTensorDebugInfo fails, `status` is set to appropriate
225 // error and nullptr is returned. This function can block till the operation
226 // that produces `handle` has completed.
227 TF_CAPI_EXPORT extern TFE_TensorDebugInfo* TFE_TensorHandleTensorDebugInfo(
228     TFE_TensorHandle* handle, TF_Status* status);
229 
230 // Deletes `debug_info`.
231 TF_CAPI_EXPORT extern void TFE_DeleteTensorDebugInfo(
232     TFE_TensorDebugInfo* debug_info);
233 
234 // Returns the number of dimensions used to represent the tensor on its device.
235 // The number of dimensions used to reprensent the tensor on device can be
236 // different from the number returned by TFE_TensorHandleNumDims.
237 // The return value was current at the time of TFE_TensorDebugInfo creation.
238 TF_CAPI_EXPORT extern int TFE_TensorDebugInfoOnDeviceNumDims(
239     TFE_TensorDebugInfo* debug_info);
240 
241 // Returns the number of elements in dimension `dim_index`.
242 // Tensor representation on device can be transposed from its representation
243 // on host. The data contained in dimension `dim_index` on device
244 // can correspond to the data contained in another dimension in on-host
245 // representation. The dimensions are indexed using the standard TensorFlow
246 // major-to-minor order (slowest varying dimension first),
247 // not the XLA's minor-to-major order.
248 // On-device dimensions can be padded. TFE_TensorDebugInfoOnDeviceDim returns
249 // the number of elements in a dimension after padding.
250 // The return value was current at the time of TFE_TensorDebugInfo creation.
251 TF_CAPI_EXPORT extern int64_t TFE_TensorDebugInfoOnDeviceDim(
252     TFE_TensorDebugInfo* debug_info, int dim_index);
253 
254 // Description of the TensorFlow op to execute.
255 //
256 // Assumes that the provided 'ctx' outlives the returned TFE_Op, i.e.,
257 // TFE_DeleteOp() is called before TFE_DeleteContext().
258 //
259 // Very similar to TF_OperationDescription with some differences:
260 // (1) TF_Output or TFE_TensorHandle* as arguments to TF_AddInput,
261 //     TF_AddInputList
262 // (2) TF_ColocateWith, TF_AddControlInput etc. do not make sense.
263 // (3) Implementation detail: Avoid use of NodeBuilder/NodeDefBuilder since
264 //     the additional sanity checks there seem unnecessary;
265 typedef struct TFE_Op TFE_Op;
266 
267 TF_CAPI_EXPORT extern TFE_Op* TFE_NewOp(TFE_Context* ctx,
268                                         const char* op_or_function_name,
269                                         TF_Status* status);
270 
271 TF_CAPI_EXPORT extern void TFE_DeleteOp(TFE_Op* op);
272 
273 TF_CAPI_EXPORT extern void TFE_OpSetDevice(TFE_Op* op, const char* device_name,
274                                            TF_Status* status);
275 // The returned string remains valid throughout the lifetime of 'op'.
276 TF_CAPI_EXPORT extern const char* TFE_OpGetDevice(TFE_Op* op,
277                                                   TF_Status* status);
278 
279 // When 'enable' is set to 1, and if TensorFlow library is built with XLA
280 // support, a subsequent TFE_Execute() call on `op` will run the op via XLA.
281 //
282 // If the library is not built with XLA support, this call would be a no-op.
283 TF_CAPI_EXPORT extern void TFE_OpSetXLACompilation(TFE_Op* op,
284                                                    unsigned char enable);
285 
286 TF_CAPI_EXPORT extern void TFE_OpAddInput(TFE_Op* op, TFE_TensorHandle* input,
287                                           TF_Status* status);
288 
289 TF_CAPI_EXPORT extern void TFE_OpAddInputList(TFE_Op* op,
290                                               TFE_TensorHandle** inputs,
291                                               int num_inputs,
292                                               TF_Status* status);
293 
294 TF_CAPI_EXPORT extern TF_AttrType TFE_OpGetAttrType(TFE_Op* op,
295                                                     const char* attr_name,
296                                                     unsigned char* is_list,
297                                                     TF_Status* status);
298 // Get an attribute type given an op name; a fusion of TFE_NewOp and
299 // TFE_OpGetAttrType for use from Python without the overhead of the individual
300 // calls and memory management of TFE_Op.
301 TF_CAPI_EXPORT extern TF_AttrType TFE_OpNameGetAttrType(
302     TFE_Context* ctx, const char* op_or_function_name, const char* attr_name,
303     unsigned char* is_list, TF_Status* status);
304 
305 TF_CAPI_EXPORT extern void TFE_OpSetAttrString(TFE_Op* op,
306                                                const char* attr_name,
307                                                const void* value,
308                                                size_t length);
309 TF_CAPI_EXPORT extern void TFE_OpSetAttrInt(TFE_Op* op, const char* attr_name,
310                                             int64_t value);
311 TF_CAPI_EXPORT extern void TFE_OpSetAttrFloat(TFE_Op* op, const char* attr_name,
312                                               float value);
313 TF_CAPI_EXPORT extern void TFE_OpSetAttrBool(TFE_Op* op, const char* attr_name,
314                                              unsigned char value);
315 TF_CAPI_EXPORT extern void TFE_OpSetAttrType(TFE_Op* op, const char* attr_name,
316                                              TF_DataType value);
317 // If the number of dimensions is unknown, `num_dims` must be set to
318 // -1 and `dims` can be null.  If a dimension is unknown, the
319 // corresponding entry in the `dims` array must be -1.
320 TF_CAPI_EXPORT extern void TFE_OpSetAttrShape(TFE_Op* op, const char* attr_name,
321                                               const int64_t* dims,
322                                               const int num_dims,
323                                               TF_Status* out_status);
324 
325 // Sets the attribute attr_name to be a function specified by 'function'.
326 //
327 // TODO(ashankar,iga): Add this functionality to the C API for graph
328 // construction. Perhaps we want an AttrValueMap equivalent in the C API?
329 TF_CAPI_EXPORT extern void TFE_OpSetAttrFunction(TFE_Op* op,
330                                                  const char* attr_name,
331                                                  const TFE_Op* value);
332 
333 TF_CAPI_EXPORT void TFE_OpSetAttrFunctionName(TFE_Op* op, const char* attr_name,
334                                               const char* data, size_t length);
335 
336 TF_CAPI_EXPORT extern void TFE_OpSetAttrTensor(TFE_Op* op,
337                                                const char* attr_name,
338                                                TF_Tensor* tensor,
339                                                TF_Status* status);
340 
341 TF_CAPI_EXPORT extern void TFE_OpSetAttrStringList(TFE_Op* op,
342                                                    const char* attr_name,
343                                                    const void* const* values,
344                                                    const size_t* lengths,
345                                                    int num_values);
346 TF_CAPI_EXPORT extern void TFE_OpSetAttrIntList(TFE_Op* op,
347                                                 const char* attr_name,
348                                                 const int64_t* values,
349                                                 int num_values);
350 TF_CAPI_EXPORT extern void TFE_OpSetAttrFloatList(TFE_Op* op,
351                                                   const char* attr_name,
352                                                   const float* values,
353                                                   int num_values);
354 TF_CAPI_EXPORT extern void TFE_OpSetAttrBoolList(TFE_Op* op,
355                                                  const char* attr_name,
356                                                  const unsigned char* values,
357                                                  int num_values);
358 TF_CAPI_EXPORT extern void TFE_OpSetAttrTypeList(TFE_Op* op,
359                                                  const char* attr_name,
360                                                  const TF_DataType* values,
361                                                  int num_values);
362 TF_CAPI_EXPORT extern void TFE_OpSetAttrShapeList(
363     TFE_Op* op, const char* attr_name, const int64_t** dims,
364     const int* num_dims, int num_values, TF_Status* out_status);
365 TF_CAPI_EXPORT extern void TFE_OpSetAttrFunctionList(TFE_Op* op,
366                                                      const char* attr_name,
367                                                      const TFE_Op** value,
368                                                      int num_values);
369 
370 // Execute the operation defined by 'op' and return handles to computed
371 // tensors in `retvals`.
372 //
373 // 'retvals' must point to a pre-allocated array of TFE_TensorHandle* and
374 // '*num_retvals' should be set to the size of this array. It is an error if
375 // the size of 'retvals' is less than the number of outputs. This call sets
376 // *num_retvals to the number of outputs.
377 //
378 // If async execution is enabled, the call may simply enqueue the execution
379 // and return "non-ready" handles in `retvals`. Note that any handles contained
380 // in 'op' should not be mutated till the kernel execution actually finishes.
381 //
382 // For sync execution, if any of the inputs to `op` are not ready, this call
383 // will block till they become ready and then return when the kernel execution
384 // is done.
385 // TODO(agarwal): change num_retvals to int from int*.
386 TF_CAPI_EXPORT extern void TFE_Execute(TFE_Op* op, TFE_TensorHandle** retvals,
387                                        int* num_retvals, TF_Status* status);
388 
389 // Add a function (serialized FunctionDef protocol buffer) to ctx so
390 // that it can be invoked using TFE_Execute.
391 TF_CAPI_EXPORT extern void TFE_ContextAddFunctionDef(
392     TFE_Context* ctx, const char* serialized_function_def, size_t size,
393     TF_Status* status);
394 
395 // Adds a function (created from TF_GraphToFunction or
396 // TF_FunctionImportFunctionDef) to the context, allowing it to be executed with
397 // TFE_Execute by creating an op with the same name as the function.
398 TF_CAPI_EXPORT extern void TFE_ContextAddFunction(TFE_Context* ctx,
399                                                   TF_Function* function,
400                                                   TF_Status* status);
401 
402 // Checks whether a function is registered under `name`.
403 TF_CAPI_EXPORT unsigned char TFE_ContextHasFunction(TFE_Context* ctx,
404                                                     const char* name);
405 
406 // Enables tracing of RunMetadata on the ops executed from this context.
407 TF_CAPI_EXPORT extern void TFE_ContextEnableRunMetadata(TFE_Context* ctx);
408 
409 // Disables tracing of RunMetadata on the ops executed from this context.
410 TF_CAPI_EXPORT extern void TFE_ContextDisableRunMetadata(TFE_Context* ctx);
411 
412 // Populates the passed-in buffer with a serialized RunMetadata protocol buffer
413 // containing any run metadata information accumulated so far and clears this
414 // information.
415 // If async mode is enabled, this call blocks till all currently pending ops are
416 // done.
417 TF_CAPI_EXPORT extern void TFE_ContextExportRunMetadata(TFE_Context* ctx,
418                                                         TF_Buffer* buf,
419                                                         TF_Status* status);
420 
421 // Some TF ops need a step container to be set to limit the lifetime of some
422 // resources (mostly TensorArray and Stack, used in while loop gradients in
423 // graph mode). Calling this on a context tells it to start a step.
424 TF_CAPI_EXPORT extern void TFE_ContextStartStep(TFE_Context* ctx);
425 
426 // Ends a step. When there is no active step (that is, every started step has
427 // been ended) step containers will be cleared. Note: it is not safe to call
428 // TFE_ContextEndStep while ops which rely on the step container may be running.
429 TF_CAPI_EXPORT extern void TFE_ContextEndStep(TFE_Context* ctx);
430 
431 #ifdef __cplusplus
432 } /* end extern "C" */
433 #endif
434 
435 #ifdef __cplusplus
436 // A workaround to ease conversion to and from numpy objects and
437 // TFE_TensorHandle's.
438 //
439 // TODO(ashankar): Figure out an alternative scheme that precludes the need for
440 // these API-boundary breaking methods.
441 namespace tensorflow {
442 class Tensor;
443 }  // namespace tensorflow
444 
445 const tensorflow::Tensor* TFE_TensorHandleUnderlyingTensorInHostMemory(
446     TFE_TensorHandle* h, TF_Status* status);
447 TFE_TensorHandle* TFE_NewTensorHandle(const tensorflow::Tensor& t);
448 #endif
449 
450 #endif  // TENSORFLOW_C_EAGER_C_API_H_
451