• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #ifndef TENSORFLOW_PYTHON_EAGER_PYWRAP_TFE_H_
17 #define TENSORFLOW_PYTHON_EAGER_PYWRAP_TFE_H_
18 
19 // Place `<locale>` before <Python.h> to avoid build failure in macOS.
20 #include <locale>
21 
22 // The empty line above is on purpose as otherwise clang-format will
23 // automatically move <Python.h> before <locale>.
24 #include <Python.h>
25 
26 #include "tensorflow/c/eager/c_api.h"
27 #include "tensorflow/core/lib/core/status.h"
28 #include "tensorflow/core/lib/gtl/inlined_vector.h"
29 
30 typedef tensorflow::gtl::InlinedVector<TFE_TensorHandle*, 4>
31     TFE_InputTensorHandles;
32 typedef tensorflow::gtl::InlinedVector<TFE_TensorHandle*, 2>
33     TFE_OutputTensorHandles;
34 
35 // Execute a TensorFlow operation.
36 //
37 // 'device_name': Name of the device on which to execute the operation, or NULL
38 //                for automatic selection.
39 // 'op_name': Name of the TensorFlow op to execute.
40 // 'inputs': An array of TFE_TensorHandle*'s of size 'num_inputs'. These tensors
41 //           will be provided as input to the operation.
42 // 'attrs': A Python tuple alternating names and attr values.
43 // 'outputs': A pointer to a TFE_OutputTensorHandles in which outputs will
44 //            placed. On success, its elements will be filled in and the
45 //            caller takes ownership of each returned TFE_TensorHandle.
46 //            'outputs' MUST be sized to be at least as large as the number
47 //            of tensors produced by the operation and will be resized to
48 //            the actual number of tensors produced.
49 void TFE_Py_Execute(TFE_Context* ctx, const char* device_name,
50                     const char* op_name, TFE_InputTensorHandles* inputs,
51                     PyObject* attrs, TFE_OutputTensorHandles* outputs,
52                     TF_Status* out_status);
53 
54 // Registers e as the Exception class for handling not ok Status. Returns
55 // Py_None if registration succeeds, else throws a TypeError and returns NULL.
56 //
57 // This function is not thread-safe.
58 PyObject* TFE_Py_RegisterExceptionClass(PyObject* e);
59 
60 // Registers e as the type of the ResourceVariable class.
61 // Returns Py_None if registration succeeds, else throws a TypeError and returns
62 // NULL.
63 //
64 // This function is not thread-safe.
65 PyObject* TFE_Py_RegisterResourceVariableType(PyObject* e);
66 
67 // Registers e as the VSpace to use.
68 // `vspace` must be a imperative_grad.py:VSpace named tuple.
69 PyObject* TFE_Py_RegisterVSpace(PyObject* e);
70 
71 // Registers e as the Exception to be raised when the conditions of
72 // TFE_Py_FastPathExecute_C have not been met. When this exception is set, it
73 // is a signal to the calling code that it should fall back to the safer (and
74 // more complete) code path.
75 //
76 // This function is not thread-safe.
77 PyObject* TFE_Py_RegisterFallbackExceptionClass(PyObject* e);
78 
79 // Registers e as the gradient_function.
80 // The registered function takes
81 // (op_name, attrs, num_inputs, inputs, outputs, output_gradients) and returns
82 // the input gradients. This function will not correctly be able to generate
83 // gradients for functional ops - the gradients for those ops are calculated
84 // through a different codepath (see function.py for additional information).
85 //
86 // This function is not thread-safe.
87 PyObject* TFE_Py_RegisterGradientFunction(PyObject* e);
88 
89 // Returns 0 if 'status' is TF_OK. Otherwise, raises an exception (using
90 // `exception` if not nullptr, else using the class registered via
91 // TFE_Py_RegisterExceptionClass), and returns -1.
92 int MaybeRaiseExceptionFromTFStatus(TF_Status* status, PyObject* exception);
93 
94 // Returns 0 if 'status' is ok. Otherwise, raises an exception (using
95 // `exception` if not nullptr, else using the class registered via
96 // TFE_Py_RegisterExceptionClass), and returns -1.
97 int MaybeRaiseExceptionFromStatus(const tensorflow::Status& status,
98                                   PyObject* exception);
99 
100 // Returns the string associated with the passed-in python object.
101 const char* TFE_GetPythonString(PyObject* o);
102 
103 // Returns a unique id on each call.
104 int64_t get_uid();
105 
106 // Wraps the output of get_uid as a Python Long object. Ownership is passed to
107 // the caller.
108 PyObject* TFE_Py_UID();
109 
110 // Deleter for Context objects, called from the Capsule that owns it.
111 void TFE_DeleteContextCapsule(PyObject* context);
112 
113 // Returns true if o is an instance of EagerTensor, but not a subclass. Else
114 // returns false.
115 bool EagerTensor_CheckExact(const PyObject* o);
116 
117 // Helper function to construct a new EagerTensor from a TFE_TensorHandle.
118 PyObject* EagerTensorFromHandle(TFE_TensorHandle* handle);
119 
120 // Extracts the handle inside EagerTensor object `o`. Returns nullptr on error.
121 TFE_TensorHandle* EagerTensor_Handle(const PyObject* o);
122 
123 // Creates the `EagerTensor` class by subclassing `base_class` and returns the
124 // newly created type, or nullptr on error.
125 PyObject* TFE_Py_InitEagerTensor(PyObject* base_class);
126 
127 // Sets `profiler` as the current profiler to receive callbacks about events
128 // on eager tensors. Currently, the only reported event is creation.
129 // `profiler` is expected to have a `created(self, eager_tensor)` method that
130 // takes the created tensor as its single argument.
131 // Previous profiler, if any, is unset and will not receive any more
132 // callbacks.
133 // To unset the profiler, pass Py_None as the value of `profiler`.
134 PyObject* TFE_Py_SetEagerTensorProfiler(PyObject* profiler);
135 
136 // Creates a new tape and adds it to the active set. `persistent` and
137 // `watch_accessed_variables` must be `PyBool_Type` (`Py_True` or `Py_False`).
138 PyObject* TFE_Py_TapeSetNew(PyObject* persistent,
139                             PyObject* watch_accessed_variables);
140 
141 // Removes the passed tape from the set of active tapes.
142 void TFE_Py_TapeSetRemove(PyObject* tape);
143 
144 // Adds the passed tape to the set of active tapes.
145 void TFE_Py_TapeSetAdd(PyObject* tape);
146 
147 // Returns true if the tape stack is empty.
148 PyObject* TFE_Py_TapeSetIsEmpty();
149 
150 PyObject* TFE_Py_TapeSetShouldRecord(PyObject* tensors);
151 void TFE_Py_TapeWatch(PyObject* tape, PyObject* tensor);
152 void TFE_Py_TapeSetDeleteTrace(tensorflow::int64 tensor_id);
153 
154 // Stops any gradient recording on the current thread.
155 void TFE_Py_TapeSetStopOnThread();
156 
157 // Restarts gradient recording on the current thread.
158 void TFE_Py_TapeSetRestartOnThread();
159 
160 // Records an operation in the gradient tape stack.type is a string for the
161 // operation type, used in the backprop code. output_tensors should be a list of
162 // python ops.Tensor objects. input_tensor_ids should be a list of python
163 // integers with the ids of the input tensors of the recorded
164 // operation. backward_function should be the function to be called during
165 // backprop to, given the gradients of the output tensors, produce the gradients
166 // of the input tensors.
167 void TFE_Py_TapeSetRecordOperation(PyObject* op_type, PyObject* output_tensors,
168                                    PyObject* input_tensor_ids,
169                                    PyObject* backward_function);
170 
171 // Notifies all tapes that a variable has been accessed.
172 void TFE_Py_TapeVariableAccessed(PyObject* variable);
173 
174 // Watches the given variable object on the given tape.
175 void TFE_Py_TapeWatchVariable(PyObject* tape, PyObject* variable);
176 
177 // Computes a gradient based on information recorded on the tape.`tape` must
178 // have been produced by TFE_Py_NewTape. `target` and `sources` must be python
179 // lists of Tensor objects. `output_gradients` is either None or a python list
180 // of either Tensor or None, and if not None should have the same length as
181 // target.
182 PyObject* TFE_Py_TapeGradient(PyObject* tape, PyObject* target,
183                               PyObject* sources, PyObject* output_gradients,
184                               PyObject* unconnected_gradients,
185                               TF_Status* status);
186 
187 // Execute a tensorflow operation assuming that all provided inputs are
188 // correctly formatted (i.e. EagerTensors). If it doesn't find EagerTensors,
189 // it will simply fail with a NotImplementedError.
190 //
191 // The first PyObject* is unused.
192 // The "args" PyObject* is meant to be a tuple with the following structure:
193 //  Item 1: The TFE Context
194 //  Item 2: device_name: Name of the device on which to execute the operation,
195 //          or NULL for automatic selection.
196 //  Item 3: op_name: Name of the TensorFlow op to execute.
197 //  Item 4: name: An optional name for the operation.
198 //  Item 5: List representing all callbacks to execute after successful
199 //  op execute.
200 //  Item 6 onwards: inputs - This is a list of inputs followed by a list of
201 //        attrs. It is not necessary for type attrs to be present.
202 //
203 // This is named _C since there doesn't seem to be any way to make it visible
204 // in the SWIG interface without renaming due to the use of the %native
205 // directive.
206 PyObject* TFE_Py_FastPathExecute_C(PyObject*, PyObject* args);
207 
208 // Record the gradient for a given op.
209 PyObject* TFE_Py_RecordGradient(PyObject* op_name, PyObject* inputs,
210                                 PyObject* attrs, PyObject* results,
211                                 PyObject* name);
212 
213 // Returns all variables watched by the given tape in the order those variables
214 // were created.
215 PyObject* TFE_Py_TapeWatchedVariables(PyObject* tape);
216 
217 // Returns an EagerTensor of dimension [len(`tensors`)] containing
218 // the `slice_dim`'th dimension of each tensor in `tensors`. In other words,
219 // TFE_Py_TensorShapeSlice takes a slice of dimensions of tensors in
220 // `tensors`. For example, if `tensors` contains tensors of with shapes
221 // [1, 2, 3], [4, 5], [6, 7, 8, 9], TFE_Py_TensorShapeSlice called with
222 // `slice_dim` equal to 1 will return [2, 5, 7].
223 // On error, returns nullptr and sets python exception.
224 // REQUIRES: `tensors` is a python list/tuple of EagerTensors
225 // REQUIRES: `slice_dim` is non-negative and smaller than the rank of all
226 //   tensors in `tensors`.
227 PyObject* TFE_Py_TensorShapeSlice(PyObject* tensors, int slice_dim);
228 
229 // Returns the shape of this tensor's on-device representation.
230 // The shape is represented as a Python tuple of integers.
231 PyObject* TFE_Py_TensorShapeOnDevice(PyObject* tensor);
232 
233 // Encodes the object as a tuple that is meant to be used as part of the key
234 // for the defun function cache.  If `include_tensor_ranks_only` is true,
235 // then the encoding only stores tensor ranks, and the key is
236 // agnostic to dimension sizes.  Otherwise, full tensor shape encodings are
237 // returned.
238 PyObject* TFE_Py_EncodeArg(PyObject*, bool include_tensor_ranks_only);
239 
240 void TFE_Py_EnableInteractivePythonLogging();
241 
242 #endif  // TENSORFLOW_PYTHON_EAGER_PYWRAP_TFE_H_
243