• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 // This file is MACHINE GENERATED! Do not edit.
17 
18 #include "tensorflow/c/experimental/ops/math_ops.h"
19 
20 #include "tensorflow/c/eager/abstract_context.h"
21 #include "tensorflow/c/eager/abstract_tensor_handle.h"
22 #include "tensorflow/c/eager/tracing_utils.h"
23 #include "tensorflow/core/framework/types.h"
24 #include "tensorflow/core/platform/errors.h"
25 
26 using tensorflow::tracing::MaybeSetOpName;
27 
28 namespace tensorflow {
29 namespace ops {
30 
31 // Op: Mul()
32 // Summary: Returns x * y element-wise.
33 //
34 // Description:
35 //   *NOTE*: `Multiply` supports broadcasting. More about broadcasting
36 //   [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
Mul(AbstractContext * ctx,AbstractTensorHandle * const x,AbstractTensorHandle * const y,AbstractTensorHandle ** z,const char * name,const char * raw_device_name)37 Status Mul(AbstractContext* ctx, AbstractTensorHandle* const x,
38            AbstractTensorHandle* const y, AbstractTensorHandle** z,
39            const char* name, const char* raw_device_name) {
40   AbstractOperationPtr op_ptr(ctx->CreateOperation());
41   TF_RETURN_IF_ERROR(op_ptr->Reset("Mul", raw_device_name));
42   TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
43   TF_RETURN_IF_ERROR(op_ptr->AddInput(x));
44   TF_RETURN_IF_ERROR(op_ptr->AddInput(y));
45   int num_retvals = 1;
46   return op_ptr->Execute(absl::MakeSpan(z, 1), &num_retvals);
47 }
48 
49 // Op: Conj()
50 // Summary: Returns the complex conjugate of a complex number.
51 //
52 // Description:
53 //   Given a tensor `input` of complex numbers, this operation returns a tensor
54 //   of complex numbers that are the complex conjugate of each element in
55 //   `input`. The complex numbers in `input` must be of the form \\(a + bj\\),
56 //   where *a* is the real part and *b* is the imaginary part.
57 //
58 //   The complex conjugate returned by this operation is of the form \\(a -
59 //   bj\\).
60 //
61 //   For example:
62 //
63 //   ```
64 //   # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
65 //   tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j]
66 //   ```
Conj(AbstractContext * ctx,AbstractTensorHandle * const input,AbstractTensorHandle ** output,const char * name,const char * raw_device_name)67 Status Conj(AbstractContext* ctx, AbstractTensorHandle* const input,
68             AbstractTensorHandle** output, const char* name,
69             const char* raw_device_name) {
70   AbstractOperationPtr op_ptr(ctx->CreateOperation());
71   TF_RETURN_IF_ERROR(op_ptr->Reset("Conj", raw_device_name));
72   TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
73   TF_RETURN_IF_ERROR(op_ptr->AddInput(input));
74   int num_retvals = 1;
75   return op_ptr->Execute(absl::MakeSpan(output, 1), &num_retvals);
76 }
77 
78 // Op: AddV2()
79 // Summary: Returns x + y element-wise.
80 //
81 // Description:
82 //   *NOTE*: `Add` supports broadcasting. `AddN` does not. More about
83 //   broadcasting
84 //   [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
AddV2(AbstractContext * ctx,AbstractTensorHandle * const x,AbstractTensorHandle * const y,AbstractTensorHandle ** z,const char * name,const char * raw_device_name)85 Status AddV2(AbstractContext* ctx, AbstractTensorHandle* const x,
86              AbstractTensorHandle* const y, AbstractTensorHandle** z,
87              const char* name, const char* raw_device_name) {
88   AbstractOperationPtr op_ptr(ctx->CreateOperation());
89   TF_RETURN_IF_ERROR(op_ptr->Reset("AddV2", raw_device_name));
90   TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
91   TF_RETURN_IF_ERROR(op_ptr->AddInput(x));
92   TF_RETURN_IF_ERROR(op_ptr->AddInput(y));
93   int num_retvals = 1;
94   return op_ptr->Execute(absl::MakeSpan(z, 1), &num_retvals);
95 }
96 
97 // Op: MatMul()
98 // Summary: Multiply the matrix "a" by the matrix "b".
99 //
100 // Description:
101 //   The inputs must be two-dimensional matrices and the inner dimension of
102 //   "a" (after being transposed if transpose_a is true) must match the
103 //   outer dimension of "b" (after being transposed if transposed_b is
104 //   true).
105 //
106 //   *Note*: The default kernel implementation for MatMul on GPUs uses
107 //   cublas.
MatMul(AbstractContext * ctx,AbstractTensorHandle * const a,AbstractTensorHandle * const b,AbstractTensorHandle ** product,bool transpose_a,bool transpose_b,const char * name,const char * raw_device_name)108 Status MatMul(AbstractContext* ctx, AbstractTensorHandle* const a,
109               AbstractTensorHandle* const b, AbstractTensorHandle** product,
110               bool transpose_a, bool transpose_b, const char* name,
111               const char* raw_device_name) {
112   AbstractOperationPtr op_ptr(ctx->CreateOperation());
113   TF_RETURN_IF_ERROR(op_ptr->Reset("MatMul", raw_device_name));
114   TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
115   TF_RETURN_IF_ERROR(op_ptr->AddInput(a));
116   TF_RETURN_IF_ERROR(op_ptr->AddInput(b));
117   TF_RETURN_IF_ERROR(op_ptr->SetAttrBool("transpose_a", transpose_a));
118   TF_RETURN_IF_ERROR(op_ptr->SetAttrBool("transpose_b", transpose_b));
119   int num_retvals = 1;
120   return op_ptr->Execute(absl::MakeSpan(product, 1), &num_retvals);
121 }
122 
123 // Op: Neg()
124 // Summary: Computes numerical negative value element-wise.
125 //
126 // Description:
127 //   I.e., \\(y = -x\\).
Neg(AbstractContext * ctx,AbstractTensorHandle * const x,AbstractTensorHandle ** y,const char * name,const char * raw_device_name)128 Status Neg(AbstractContext* ctx, AbstractTensorHandle* const x,
129            AbstractTensorHandle** y, const char* name,
130            const char* raw_device_name) {
131   AbstractOperationPtr op_ptr(ctx->CreateOperation());
132   TF_RETURN_IF_ERROR(op_ptr->Reset("Neg", raw_device_name));
133   TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
134   TF_RETURN_IF_ERROR(op_ptr->AddInput(x));
135   int num_retvals = 1;
136   return op_ptr->Execute(absl::MakeSpan(y, 1), &num_retvals);
137 }
138 
139 // Op: Sum()
140 // Summary: Computes the sum of elements across dimensions of a tensor.
141 //
142 // Description:
143 //   Reduces `input` along the dimensions given in `axis`. Unless
144 //   `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry
145 //   in `axis`. If `keep_dims` is true, the reduced dimensions are retained with
146 //   length 1.
Sum(AbstractContext * ctx,AbstractTensorHandle * const input,AbstractTensorHandle * const reduction_indices,AbstractTensorHandle ** output,bool keep_dims,const char * name,const char * raw_device_name)147 Status Sum(AbstractContext* ctx, AbstractTensorHandle* const input,
148            AbstractTensorHandle* const reduction_indices,
149            AbstractTensorHandle** output, bool keep_dims, const char* name,
150            const char* raw_device_name) {
151   AbstractOperationPtr op_ptr(ctx->CreateOperation());
152   TF_RETURN_IF_ERROR(op_ptr->Reset("Sum", raw_device_name));
153   TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
154   TF_RETURN_IF_ERROR(op_ptr->AddInput(input));
155   TF_RETURN_IF_ERROR(op_ptr->AddInput(reduction_indices));
156   TF_RETURN_IF_ERROR(op_ptr->SetAttrBool("keep_dims", keep_dims));
157   int num_retvals = 1;
158   return op_ptr->Execute(absl::MakeSpan(output, 1), &num_retvals);
159 }
160 
161 // Op: Sub()
162 // Summary: Returns x - y element-wise.
163 //
164 // Description:
165 //   *NOTE*: `Subtract` supports broadcasting. More about broadcasting
166 //   [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
Sub(AbstractContext * ctx,AbstractTensorHandle * const x,AbstractTensorHandle * const y,AbstractTensorHandle ** z,const char * name,const char * raw_device_name)167 Status Sub(AbstractContext* ctx, AbstractTensorHandle* const x,
168            AbstractTensorHandle* const y, AbstractTensorHandle** z,
169            const char* name, const char* raw_device_name) {
170   AbstractOperationPtr op_ptr(ctx->CreateOperation());
171   TF_RETURN_IF_ERROR(op_ptr->Reset("Sub", raw_device_name));
172   TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
173   TF_RETURN_IF_ERROR(op_ptr->AddInput(x));
174   TF_RETURN_IF_ERROR(op_ptr->AddInput(y));
175   int num_retvals = 1;
176   return op_ptr->Execute(absl::MakeSpan(z, 1), &num_retvals);
177 }
178 
179 // Op: Div()
180 // Summary: Returns x / y element-wise.
181 //
182 // Description:
183 //   *NOTE*: `Div` supports broadcasting. More about broadcasting
184 //   [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
Div(AbstractContext * ctx,AbstractTensorHandle * const x,AbstractTensorHandle * const y,AbstractTensorHandle ** z,const char * name,const char * raw_device_name)185 Status Div(AbstractContext* ctx, AbstractTensorHandle* const x,
186            AbstractTensorHandle* const y, AbstractTensorHandle** z,
187            const char* name, const char* raw_device_name) {
188   AbstractOperationPtr op_ptr(ctx->CreateOperation());
189   TF_RETURN_IF_ERROR(op_ptr->Reset("Div", raw_device_name));
190   TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
191   TF_RETURN_IF_ERROR(op_ptr->AddInput(x));
192   TF_RETURN_IF_ERROR(op_ptr->AddInput(y));
193   int num_retvals = 1;
194   return op_ptr->Execute(absl::MakeSpan(z, 1), &num_retvals);
195 }
196 
197 // Op: DivNoNan()
198 // Summary: Returns 0 if the denominator is zero.
199 //
200 // Description:
201 //
202 //   *NOTE*: `DivNoNan` supports broadcasting. More about broadcasting
203 //   [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
DivNoNan(AbstractContext * ctx,AbstractTensorHandle * const x,AbstractTensorHandle * const y,AbstractTensorHandle ** z,const char * name,const char * raw_device_name)204 Status DivNoNan(AbstractContext* ctx, AbstractTensorHandle* const x,
205                 AbstractTensorHandle* const y, AbstractTensorHandle** z,
206                 const char* name, const char* raw_device_name) {
207   AbstractOperationPtr op_ptr(ctx->CreateOperation());
208   TF_RETURN_IF_ERROR(op_ptr->Reset("DivNoNan", raw_device_name));
209   TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
210   TF_RETURN_IF_ERROR(op_ptr->AddInput(x));
211   TF_RETURN_IF_ERROR(op_ptr->AddInput(y));
212   int num_retvals = 1;
213   return op_ptr->Execute(absl::MakeSpan(z, 1), &num_retvals);
214 }
215 
216 // Op: Exp()
217 // Summary: Computes exponential of x element-wise.  \\(y = e^x\\).
218 //
219 // Description:
220 //     This function computes the exponential of every element in the input
221 //     tensor. i.e. `exp(x)` or `e^(x)`, where `x` is the input tensor. `e`
222 //     denotes Euler's number and is approximately equal to 2.718281. Output is
223 //     positive for any real input.
224 //
225 //     ```python
226 //     x = tf.constant(2.0)
227 //     tf.math.exp(x) ==> 7.389056
228 //
229 //     x = tf.constant([2.0, 8.0])
230 //     tf.math.exp(x) ==> array([7.389056, 2980.958], dtype=float32)
231 //     ```
232 //
233 //     For complex numbers, the exponential value is calculated as follows:
234 //
235 //     ```
236 //     e^(x+iy) = e^x * e^iy = e^x * (cos y + i sin y)
237 //     ```
238 //
239 //     Let's consider complex number 1+1j as an example.
240 //     e^1 * (cos 1 + i sin 1) = 2.7182818284590 * (0.54030230586+0.8414709848j)
241 //
242 //     ```python
243 //     x = tf.constant(1 + 1j)
244 //     tf.math.exp(x) ==> 1.4686939399158851+2.2873552871788423j
245 //     ```
Exp(AbstractContext * ctx,AbstractTensorHandle * const x,AbstractTensorHandle ** y,const char * name,const char * raw_device_name)246 Status Exp(AbstractContext* ctx, AbstractTensorHandle* const x,
247            AbstractTensorHandle** y, const char* name,
248            const char* raw_device_name) {
249   AbstractOperationPtr op_ptr(ctx->CreateOperation());
250   TF_RETURN_IF_ERROR(op_ptr->Reset("Exp", raw_device_name));
251   TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
252   TF_RETURN_IF_ERROR(op_ptr->AddInput(x));
253   int num_retvals = 1;
254   return op_ptr->Execute(absl::MakeSpan(y, 1), &num_retvals);
255 }
256 
257 // Op: Sqrt()
258 // Summary: Computes square root of x element-wise.
259 //
260 // Description:
261 //   I.e., \\(y = \sqrt{x} = x^{1/2}\\).
Sqrt(AbstractContext * ctx,AbstractTensorHandle * const x,AbstractTensorHandle ** y,const char * name,const char * raw_device_name)262 Status Sqrt(AbstractContext* ctx, AbstractTensorHandle* const x,
263             AbstractTensorHandle** y, const char* name,
264             const char* raw_device_name) {
265   AbstractOperationPtr op_ptr(ctx->CreateOperation());
266   TF_RETURN_IF_ERROR(op_ptr->Reset("Sqrt", raw_device_name));
267   TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
268   TF_RETURN_IF_ERROR(op_ptr->AddInput(x));
269   int num_retvals = 1;
270   return op_ptr->Execute(absl::MakeSpan(y, 1), &num_retvals);
271 }
272 
273 // Op: SqrtGrad()
274 // Summary: Computes the gradient for the sqrt of `x` wrt its input.
275 //
276 // Description:
277 //   Specifically, `grad = dy * 0.5 / y`, where `y = sqrt(x)`, and `dy`
278 //   is the corresponding input gradient.
SqrtGrad(AbstractContext * ctx,AbstractTensorHandle * const y,AbstractTensorHandle * const dy,AbstractTensorHandle ** z,const char * name,const char * raw_device_name)279 Status SqrtGrad(AbstractContext* ctx, AbstractTensorHandle* const y,
280                 AbstractTensorHandle* const dy, AbstractTensorHandle** z,
281                 const char* name, const char* raw_device_name) {
282   AbstractOperationPtr op_ptr(ctx->CreateOperation());
283   TF_RETURN_IF_ERROR(op_ptr->Reset("SqrtGrad", raw_device_name));
284   TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
285   TF_RETURN_IF_ERROR(op_ptr->AddInput(y));
286   TF_RETURN_IF_ERROR(op_ptr->AddInput(dy));
287   int num_retvals = 1;
288   return op_ptr->Execute(absl::MakeSpan(z, 1), &num_retvals);
289 }
290 
291 // Op: Log1p()
292 // Summary: Computes natural logarithm of (1 + x) element-wise.
293 //
294 // Description:
295 //   I.e., \\(y = \log_e (1 + x)\\).
296 //
297 //   Example:
298 //
299 //   ```python
300 //   x = tf.constant([0, 0.5, 1, 5])
301 //   tf.math.log1p(x) ==> [0., 0.4054651, 0.6931472, 1.7917595]
302 //   ```
Log1p(AbstractContext * ctx,AbstractTensorHandle * const x,AbstractTensorHandle ** y,const char * name,const char * raw_device_name)303 Status Log1p(AbstractContext* ctx, AbstractTensorHandle* const x,
304              AbstractTensorHandle** y, const char* name,
305              const char* raw_device_name) {
306   AbstractOperationPtr op_ptr(ctx->CreateOperation());
307   TF_RETURN_IF_ERROR(op_ptr->Reset("Log1p", raw_device_name));
308   TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
309   TF_RETURN_IF_ERROR(op_ptr->AddInput(x));
310   int num_retvals = 1;
311   return op_ptr->Execute(absl::MakeSpan(y, 1), &num_retvals);
312 }
313 
314 }  // namespace ops
315 }  // namespace tensorflow
316