1 /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15
16 #include "tensorflow/c/eager/c_api_unified_experimental.h"
17
18 #include <memory>
19
20 #include "tensorflow/c/eager/c_api.h"
21 #include "tensorflow/c/eager/c_api_experimental.h"
22 #include "tensorflow/c/eager/c_api_test_util.h"
23 #include "tensorflow/c/tf_datatype.h"
24 #include "tensorflow/c/tf_status.h"
25 #include "tensorflow/c/tf_status_helper.h"
26 #include "tensorflow/c/tf_tensor.h"
27 #include "tensorflow/core/platform/errors.h"
28 #include "tensorflow/core/platform/status.h"
29 #include "tensorflow/core/platform/test.h"
30
31 using tensorflow::Status;
32 using tensorflow::string;
33 using tensorflow::TF_StatusPtr;
34
35 namespace tensorflow {
36 namespace {
37
38 // The tests are parameterized on:
39 // - a string representing the tracing implementation: "mlir" or "graphdef".
40 // - a boolean that when true enables TFRT as the execution engine.
41 class UnifiedCAPI
42 : public ::testing::TestWithParam<std::tuple<const char*, bool>> {
43 protected:
SetUp()44 void SetUp() override {
45 TF_StatusPtr status(TF_NewStatus());
46 TF_SetTracingImplementation(std::get<0>(GetParam()), status.get());
47 Status s = StatusFromTF_Status(status.get());
48 CHECK_EQ(errors::OK, s.code()) << s.error_message();
49 }
50 };
51
TEST_P(UnifiedCAPI,TestBasicEager)52 TEST_P(UnifiedCAPI, TestBasicEager) {
53 std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
54 TF_NewStatus(), TF_DeleteStatus);
55 TFE_ContextOptions* opts = TFE_NewContextOptions();
56 TFE_ContextOptionsSetTfrt(opts, std::get<1>(GetParam()));
57 TF_ExecutionContext* ctx = TF_NewEagerExecutionContext(opts, status.get());
58 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
59 TFE_DeleteContextOptions(opts);
60
61 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
62
63 // Build an abstract input tensor.
64 TFE_Context* eager_ctx = TF_ExecutionContextGetTFEContext(ctx, status.get());
65 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
66 TFE_TensorHandle* t = TestScalarTensorHandle(eager_ctx, 2.0f);
67 TF_AbstractTensor* at =
68 TF_CreateAbstractTensorFromEagerTensor(t, status.get());
69 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
70
71 // Build an abstract operation.
72 auto* op = TF_NewAbstractOp(ctx);
73 TF_AbstractOpSetOpType(op, "Add", status.get());
74 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
75
76 // Build inputs and outputs.
77 TF_AbstractTensor* inputs[2] = {at, at};
78 TF_OutputList* o = TF_NewOutputList();
79 TF_OutputListSetNumOutputs(o, 1, status.get());
80 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
81
82 // Execute.
83 TF_ExecuteOperation(op, 2, inputs, o, status.get());
84 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
85
86 // Clean up operation and inputs.
87 TF_DeleteAbstractOp(op);
88 TF_DeleteAbstractTensor(at);
89
90 // Verify the results.
91 ASSERT_EQ(1, TF_OutputListNumOutputs(o));
92 TF_AbstractTensor* result = TF_OutputListGet(o, 0);
93 TFE_TensorHandle* result_t =
94 TF_AbstractTensorGetEagerTensor(result, status.get());
95 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
96 TF_Tensor* result_tensor = TFE_TensorHandleResolve(result_t, status.get());
97 float* result_value = static_cast<float*>(TF_TensorData(result_tensor));
98 EXPECT_EQ(*result_value, 4.0);
99
100 TF_DeleteTensor(result_tensor);
101 TF_DeleteAbstractTensor(result);
102 TF_DeleteOutputList(o);
103 TF_DeleteExecutionContext(ctx);
104 }
105
106 // MatMul Test
TEST_P(UnifiedCAPI,TestBasicEagerMatMul)107 TEST_P(UnifiedCAPI, TestBasicEagerMatMul) {
108 std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
109 TF_NewStatus(), TF_DeleteStatus);
110 TFE_ContextOptions* opts = TFE_NewContextOptions();
111 TF_ExecutionContext* ctx = TF_NewEagerExecutionContext(opts, status.get());
112 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
113 TFE_DeleteContextOptions(opts);
114
115 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
116
117 /* Want to test simple MatMul example:
118 [[0,0], * [[0,0], = [[0,0],
119 [0,0]] [0,0]] [0,0]]
120 */
121
122 // Build an abstract input tensor.
123 int64_t dims[] = {2, 2}; // Matrices will be 2 x 2
124 int num_dims = sizeof(dims) / sizeof(dims[0]);
125
126 float vals[] = {0.0f, 0.0f, 0.0f, 0.0f};
127 TFE_Context* eager_ctx = TF_ExecutionContextGetTFEContext(ctx, status.get());
128 TFE_TensorHandle* t =
129 TestMatrixTensorHandleWithInput(eager_ctx, vals, dims, num_dims);
130
131 TF_AbstractTensor* at = TF_CreateAbstractTensorFromEagerTensor(
132 t, status.get()); // get abstract tensor
133
134 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
135
136 // Build an abstract operation.
137 auto* op = TF_NewAbstractOp(ctx);
138 TF_AbstractOpSetOpType(op, "MatMul", status.get());
139 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
140
141 // Build inputs and outputs.
142 TF_AbstractTensor* inputs[2] = {at, at};
143 TF_OutputList* o = TF_NewOutputList();
144 TF_OutputListSetNumOutputs(o, 1, status.get());
145 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
146
147 // Execute.
148 TF_ExecuteOperation(op, 2, inputs, o, status.get());
149 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
150
151 // Clean up operation and inputs.
152 TF_DeleteAbstractOp(op);
153 TF_DeleteAbstractTensor(at);
154
155 // Verify the results.
156 ASSERT_EQ(1, TF_OutputListNumOutputs(o));
157 TF_AbstractTensor* result = TF_OutputListGet(o, 0);
158 TFE_TensorHandle* result_t =
159 TF_AbstractTensorGetEagerTensor(result, status.get());
160 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
161 TF_Tensor* result_tensor = TFE_TensorHandleResolve(result_t, status.get());
162
163 // Copy Tensor data into an array.
164 float result_data[4] = {0};
165 memcpy(&result_data[0], TF_TensorData(result_tensor),
166 TF_TensorByteSize(result_tensor));
167
168 int data_len = 4; // length of result_data
169 for (int i = 0; i < data_len; i++) {
170 EXPECT_EQ(result_data[i], 0);
171 }
172
173 TF_DeleteTensor(result_tensor);
174 TF_DeleteAbstractTensor(result);
175 TF_DeleteOutputList(o);
176 TF_DeleteExecutionContext(ctx);
177 }
178
179 // MatMul Test 2
TEST_P(UnifiedCAPI,TestBasicEagerMatMul2)180 TEST_P(UnifiedCAPI, TestBasicEagerMatMul2) {
181 std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
182 TF_NewStatus(), TF_DeleteStatus);
183 TFE_ContextOptions* opts = TFE_NewContextOptions();
184 TF_ExecutionContext* ctx = TF_NewEagerExecutionContext(opts, status.get());
185 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
186 TFE_DeleteContextOptions(opts);
187
188 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
189
190 /* Want to test simple MatMul example with abstract tensors:
191 [[1,2], * [[5,6], = [[19,22],
192 [3,4]] [7,8]] [43,50]]
193 */
194
195 // Build 1st Matrix.
196 int64_t dims[] = {2, 2}; // Matrices will be 2 x 2
197 int num_dims = sizeof(dims) / sizeof(dims[0]);
198
199 float vals1[] = {1.0f, 2.0f, 3.0f, 4.0f};
200 TFE_Context* eager_ctx = TF_ExecutionContextGetTFEContext(ctx, status.get());
201 TFE_TensorHandle* t1 =
202 TestMatrixTensorHandleWithInput(eager_ctx, vals1, dims, num_dims);
203
204 TF_AbstractTensor* at1 = TF_CreateAbstractTensorFromEagerTensor(
205 t1, status.get()); // get abstract tensor
206 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
207
208 // Build 2nd Matrix.
209 float vals2[] = {5.0f, 6.0f, 7.0f, 8.0f};
210 TFE_TensorHandle* t2 =
211 TestMatrixTensorHandleWithInput(eager_ctx, vals2, dims, num_dims);
212
213 TF_AbstractTensor* at2 = TF_CreateAbstractTensorFromEagerTensor(
214 t2, status.get()); // get abstract tensor
215 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
216
217 // Build an abstract operation.
218 auto* op = TF_NewAbstractOp(ctx);
219 TF_AbstractOpSetOpType(op, "MatMul", status.get());
220 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
221
222 // Build inputs and outputs.
223 TF_AbstractTensor* inputs[2] = {at1, at2};
224 TF_OutputList* o = TF_NewOutputList();
225 TF_OutputListSetNumOutputs(o, 1, status.get());
226 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
227
228 // Execute.
229 TF_ExecuteOperation(op, 2, inputs, o, status.get());
230 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
231
232 // Clean up operation and inputs.
233 TF_DeleteAbstractOp(op);
234 TF_DeleteAbstractTensor(at1);
235 TF_DeleteAbstractTensor(at2);
236
237 // Verify the results.
238 ASSERT_EQ(1, TF_OutputListNumOutputs(o));
239 TF_AbstractTensor* result = TF_OutputListGet(o, 0);
240 TFE_TensorHandle* result_t =
241 TF_AbstractTensorGetEagerTensor(result, status.get());
242 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
243
244 TF_Tensor* result_tensor = TFE_TensorHandleResolve(result_t, status.get());
245
246 // Copy Tensor data into array.
247 float result_data[4] = {0};
248 memcpy(&result_data[0], TF_TensorData(result_tensor),
249 TF_TensorByteSize(result_tensor));
250
251 // Build expected result & verify.
252 float e_vals[] = {19.0f, 22.0f, 43.0f, 50.0f};
253
254 int data_len = 4; // length of e_vals
255 for (int i = 0; i < data_len; i++) {
256 EXPECT_EQ(result_data[i], e_vals[i]);
257 }
258
259 TF_DeleteTensor(result_tensor);
260 TF_DeleteAbstractTensor(result);
261 TF_DeleteOutputList(o);
262 TF_DeleteExecutionContext(ctx);
263 }
264
265 // MatAdd
TEST_P(UnifiedCAPI,TestBasicEagerMatAdd)266 TEST_P(UnifiedCAPI, TestBasicEagerMatAdd) {
267 std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
268 TF_NewStatus(), TF_DeleteStatus);
269 TFE_ContextOptions* opts = TFE_NewContextOptions();
270 TF_ExecutionContext* ctx = TF_NewEagerExecutionContext(opts, status.get());
271 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
272 TFE_DeleteContextOptions(opts);
273
274 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
275
276 /* Want to test simple MatAdd example with abstract tensors:
277 [[1,2] , + [[5,6], = [[6,8],
278 [3,4] ] [7,8] ] [10,12]]
279 */
280
281 // Build 1st Matrix.
282 int64_t dims[] = {2, 2}; // Matrices will be 2 x 2
283 int num_dims = sizeof(dims) / sizeof(dims[0]);
284
285 float vals1[] = {1.0f, 2.0f, 3.0f, 4.0f};
286 TFE_Context* eager_ctx = TF_ExecutionContextGetTFEContext(ctx, status.get());
287 TFE_TensorHandle* t1 =
288 TestMatrixTensorHandleWithInput(eager_ctx, vals1, dims, num_dims);
289
290 TF_AbstractTensor* at1 = TF_CreateAbstractTensorFromEagerTensor(
291 t1, status.get()); // get abstract tensor
292 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
293
294 // Build 2nd Matrix.
295 float vals2[] = {5.0f, 6.0f, 7.0f, 8.0f};
296 TFE_TensorHandle* t2 =
297 TestMatrixTensorHandleWithInput(eager_ctx, vals2, dims, num_dims);
298
299 TF_AbstractTensor* at2 = TF_CreateAbstractTensorFromEagerTensor(
300 t2, status.get()); // get abstract tensor
301 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
302
303 // Build an abstract operation.
304 auto* op = TF_NewAbstractOp(ctx);
305 TF_AbstractOpSetOpType(op, "Add", status.get());
306 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
307
308 // Build inputs and outputs.
309 TF_AbstractTensor* inputs[2] = {at1, at2};
310 TF_OutputList* o = TF_NewOutputList();
311 TF_OutputListSetNumOutputs(o, 1, status.get());
312 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
313
314 // Execute.
315 TF_ExecuteOperation(op, 2, inputs, o, status.get());
316 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
317
318 // Clean up operation and inputs.
319 TF_DeleteAbstractOp(op);
320 TF_DeleteAbstractTensor(at1);
321 TF_DeleteAbstractTensor(at2);
322
323 // Verify the results.
324 ASSERT_EQ(1, TF_OutputListNumOutputs(o));
325 TF_AbstractTensor* result = TF_OutputListGet(o, 0);
326 TFE_TensorHandle* result_t =
327 TF_AbstractTensorGetEagerTensor(result, status.get());
328 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
329
330 TF_Tensor* result_tensor = TFE_TensorHandleResolve(result_t, status.get());
331
332 // Copy Tensor data into array.
333 float result_data[4] = {0};
334 memcpy(&result_data[0], TF_TensorData(result_tensor),
335 TF_TensorByteSize(result_tensor));
336
337 // Build expected result & verify.
338 float e_vals[] = {6.0f, 8.0f, 10.0f, 12.0f};
339
340 int data_len = 4; // length of e_vals
341 for (int i = 0; i < data_len; i++) {
342 EXPECT_EQ(result_data[i], e_vals[i]);
343 }
344
345 TF_DeleteTensor(result_tensor);
346 TF_DeleteAbstractTensor(result);
347 TF_DeleteOutputList(o);
348 TF_DeleteExecutionContext(ctx);
349 }
350
TEST_P(UnifiedCAPI,TestBasicGraph)351 TEST_P(UnifiedCAPI, TestBasicGraph) {
352 std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
353 TF_NewStatus(), TF_DeleteStatus);
354
355 // Start a new function / execution context.
356 string fn_name = "double";
357 TF_ExecutionContext* graph_ctx =
358 TF_CreateFunction(fn_name.c_str(), status.get());
359 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
360
361 auto* placeholder_t =
362 TF_AddFunctionParameter(graph_ctx, TF_FLOAT, {-1, nullptr}, status.get());
363 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
364
365 // Build an abstract operation.
366 auto* add_op = TF_NewAbstractOp(graph_ctx);
367 TF_AbstractOpSetOpType(add_op, "Add", status.get());
368 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
369 TF_AbstractOpSetOpName(add_op, "my_add", status.get());
370 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
371
372 // Build inputs and outputs.
373 TF_AbstractTensor* inputs[2] = {placeholder_t, placeholder_t};
374 TF_OutputList* add_outputs = TF_NewOutputList();
375 TF_OutputListSetNumOutputs(add_outputs, 1, status.get());
376 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
377
378 // Execute.
379 TF_ExecuteOperation(add_op, 2, inputs, add_outputs, status.get());
380 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
381
382 // Clean up operation and inputs.
383 TF_DeleteAbstractOp(add_op);
384
385 TF_AbstractFunction* func =
386 TF_FinalizeFunction(graph_ctx, add_outputs, status.get());
387 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
388 // Note: TF_OutputList does not own the underlying AbstractTensors, those
389 // need to be deleted explicitly.
390 TF_DeleteAbstractTensor(TF_OutputListGet(add_outputs, 0));
391
392 // Build eager context.
393 TFE_ContextOptions* opts = TFE_NewContextOptions();
394 TFE_ContextOptionsSetTfrt(opts, std::get<1>(GetParam()));
395 TF_ExecutionContext* eager_execution_ctx =
396 TF_NewEagerExecutionContext(opts, status.get());
397 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
398 TFE_DeleteContextOptions(opts);
399
400 TF_ExecutionContextRegisterFunction(eager_execution_ctx, func, status.get());
401 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
402
403 // Build the abstract op to run the function.
404 TF_AbstractOp* fn_op = TF_NewAbstractOp(eager_execution_ctx);
405 TF_AbstractOpSetOpType(fn_op, fn_name.c_str(), status.get());
406 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
407
408 // Build an abstract input tensor.
409 TFE_Context* eager_ctx =
410 TF_ExecutionContextGetTFEContext(eager_execution_ctx, status.get());
411 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
412 TFE_TensorHandle* input_eager = TestScalarTensorHandle(eager_ctx, 2.0f);
413 TF_AbstractTensor* input_t =
414 TF_CreateAbstractTensorFromEagerTensor(input_eager, status.get());
415 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
416
417 TF_ExecuteOperation(fn_op, 1, &input_t, add_outputs, status.get());
418 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
419
420 ASSERT_EQ(1, TF_OutputListNumOutputs(add_outputs));
421 TF_AbstractTensor* final_result = TF_OutputListGet(add_outputs, 0);
422 TFE_TensorHandle* final =
423 TF_AbstractTensorGetEagerTensor(final_result, status.get());
424 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
425 TF_Tensor* f_t = TFE_TensorHandleResolve(final, status.get());
426 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
427 float* f_value = static_cast<float*>(TF_TensorData(f_t));
428 ASSERT_EQ(*f_value, 4.0);
429
430 TF_DeleteOutputList(add_outputs);
431 TF_DeleteAbstractOp(fn_op);
432 TF_DeleteAbstractTensor(input_t);
433 TF_DeleteAbstractTensor(final_result);
434 TF_DeleteAbstractTensor(placeholder_t);
435 TF_DeleteTensor(f_t);
436 TF_DeleteAbstractFunction(func);
437
438 TF_DeleteExecutionContext(eager_execution_ctx);
439 }
440
441 // Graph Tracing for MatMul
TEST_P(UnifiedCAPI,TestBasicGraphMatMul)442 TEST_P(UnifiedCAPI, TestBasicGraphMatMul) {
443 std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
444 TF_NewStatus(), TF_DeleteStatus);
445
446 // Start a new function / execution context.
447 string fn_name = "matrix_multiply";
448 TF_ExecutionContext* graph_ctx =
449 TF_CreateFunction(fn_name.c_str(), status.get());
450 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
451
452 auto* placeholder_t =
453 TF_AddFunctionParameter(graph_ctx, TF_FLOAT, {-1, nullptr}, status.get());
454 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
455
456 // Build an abstract operation.
457 auto* matmul_op = TF_NewAbstractOp(graph_ctx);
458 TF_AbstractOpSetOpType(matmul_op, "MatMul", status.get());
459 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
460 TF_AbstractOpSetOpName(matmul_op, "my_matmul", status.get());
461 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
462
463 // Build inputs and outputs.
464 TF_AbstractTensor* inputs[2] = {placeholder_t, placeholder_t};
465 TF_OutputList* mm_outputs = TF_NewOutputList();
466 TF_OutputListSetNumOutputs(mm_outputs, 1, status.get());
467 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
468
469 // Execute.
470 TF_ExecuteOperation(matmul_op, 2, inputs, mm_outputs, status.get());
471 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
472
473 // Clean up operation and inputs.
474 TF_DeleteAbstractOp(matmul_op);
475
476 TF_AbstractFunction* func =
477 TF_FinalizeFunction(graph_ctx, mm_outputs, status.get());
478 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
479
480 /* Now that the graph is built, test graph implementation on matmul example:
481 [[1,1] , * [[1,1] , = [[2,2],
482 [1,1]] [1,1]] [2,2]]
483 */
484
485 // Build eager context.
486 TFE_ContextOptions* opts = TFE_NewContextOptions();
487 TF_ExecutionContext* eager_execution_ctx =
488 TF_NewEagerExecutionContext(opts, status.get());
489 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
490 TFE_DeleteContextOptions(opts);
491
492 TF_ExecutionContextRegisterFunction(eager_execution_ctx, func, status.get());
493 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
494
495 // Build the abstract op to run the function.
496 TF_AbstractOp* fn_op = TF_NewAbstractOp(eager_execution_ctx);
497 TF_AbstractOpSetOpType(fn_op, fn_name.c_str(), status.get());
498 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
499
500 // Build an abstract input tensor.
501 TFE_Context* eager_ctx =
502 TF_ExecutionContextGetTFEContext(eager_execution_ctx, status.get());
503
504 float vals[] = {1.0f, 1.0f, 1.0f, 1.0f};
505 int64_t dims[] = {2, 2}; // Matrices will be 2 x 2
506 int num_dims = sizeof(dims) / sizeof(dims[0]);
507
508 TFE_TensorHandle* input_eager =
509 TestMatrixTensorHandleWithInput(eager_ctx, vals, dims, num_dims);
510 TF_AbstractTensor* input_t =
511 TF_CreateAbstractTensorFromEagerTensor(input_eager, status.get());
512 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
513
514 TF_OutputListSetNumOutputs(mm_outputs, 1, status.get());
515 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
516 TF_ExecuteOperation(fn_op, 1, &input_t, mm_outputs, status.get());
517 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
518
519 ASSERT_EQ(1, TF_OutputListNumOutputs(mm_outputs));
520 TF_AbstractTensor* final_result = TF_OutputListGet(mm_outputs, 0);
521 TFE_TensorHandle* final =
522 TF_AbstractTensorGetEagerTensor(final_result, status.get());
523 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
524 TF_Tensor* f_t = TFE_TensorHandleResolve(final, status.get());
525 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
526
527 float result_data[4] = {0};
528 memcpy(&result_data[0], TF_TensorData(f_t), TF_TensorByteSize(f_t));
529
530 int data_len = 4;
531 for (int i = 0; i < data_len; i++) {
532 ASSERT_EQ(result_data[i], 2.0f);
533 }
534
535 TF_DeleteAbstractTensor(final_result);
536 TF_DeleteOutputList(mm_outputs);
537 TF_DeleteAbstractTensor(placeholder_t);
538 TF_DeleteAbstractOp(fn_op);
539 TF_DeleteAbstractTensor(input_t);
540 TF_DeleteTensor(f_t);
541 TF_DeleteAbstractFunction(func);
542
543 TF_DeleteExecutionContext(eager_execution_ctx);
544 }
545
TEST_P(UnifiedCAPI,TestMultiOutputGraph)546 TEST_P(UnifiedCAPI, TestMultiOutputGraph) {
547 std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
548 TF_NewStatus(), TF_DeleteStatus);
549 TF_Status* s = status.get();
550
551 // Start a new function / execution context.
552 string fn_name = "two_adds";
553 TF_ExecutionContext* graph_ctx = TF_CreateFunction(fn_name.c_str(), s);
554 ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
555
556 auto* arg0 = TF_AddFunctionParameter(graph_ctx, TF_FLOAT, {-1, nullptr}, s);
557 ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
558 auto* arg1 = TF_AddFunctionParameter(graph_ctx, TF_FLOAT, {-1, nullptr}, s);
559 ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
560
561 // Create a first "Add" computing `arg0 + arg1`.
562 TF_AbstractTensor* add_output1;
563 {
564 // Build an abstract operation, inputs and output.
565 auto* add_op = TF_NewAbstractOp(graph_ctx);
566 TF_AbstractOpSetOpType(add_op, "Add", s);
567 ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
568 TF_AbstractOpSetOpName(add_op, "my_add", s);
569 ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
570 TF_AbstractTensor* inputs[2] = {arg0, arg1};
571 TF_OutputList* add_outputs = TF_NewOutputList();
572 TF_OutputListSetNumOutputs(add_outputs, 1, status.get());
573 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
574 // Trace the operation now (create a node in the graph).
575 TF_ExecuteOperation(add_op, 2, inputs, add_outputs, s);
576 ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
577 TF_DeleteAbstractOp(add_op);
578 // Extract the resulting tensor.
579 add_output1 = TF_OutputListGet(add_outputs, 0);
580 TF_DeleteOutputList(add_outputs);
581 }
582
583 // Same with a second "Add" computing `arg1 + arg1`.
584 TF_AbstractTensor* add_output2;
585 {
586 // Build an abstract operation, inputs and output.
587 auto* add_op = TF_NewAbstractOp(graph_ctx);
588 TF_AbstractOpSetOpType(add_op, "Add", s);
589 ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
590 TF_AbstractOpSetOpName(add_op, "my_add", s);
591 ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
592 TF_AbstractTensor* inputs[2] = {arg1, arg1};
593 TF_OutputList* add_outputs = TF_NewOutputList();
594 TF_OutputListSetNumOutputs(add_outputs, 1, status.get());
595 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
596 // Trace the operation now (create a node in the graph).
597 TF_ExecuteOperation(add_op, 2, inputs, add_outputs, s);
598 ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
599 TF_DeleteAbstractOp(add_op);
600 // Extract the resulting tensor.
601 add_output2 = TF_OutputListGet(add_outputs, 0);
602 TF_DeleteOutputList(add_outputs);
603 }
604
605 TF_DeleteAbstractTensor(arg0);
606 TF_DeleteAbstractTensor(arg1);
607
608 // Finalize the function by providing the returned values.
609 TF_AbstractFunction* func;
610 {
611 // We want to return the output of both add operations, create a new list
612 // and populate it.
613 TF_OutputList* func_outputs = TF_NewOutputList();
614 TF_OutputListPushBack(func_outputs, add_output1, s);
615 ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
616 TF_OutputListPushBack(func_outputs, add_output2, s);
617 ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
618 func = TF_FinalizeFunction(graph_ctx, func_outputs, s);
619 ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
620 TF_DeleteAbstractTensor(add_output1);
621 TF_DeleteAbstractTensor(add_output2);
622 TF_DeleteOutputList(func_outputs);
623 }
624
625 /**
626 * We traced so far this function:
627 *
628 * def two_adds(a, b):
629 * my_add1 = a + b
630 * my_add2 = b + b
631 * return my_add1, my_add2
632 *
633 * Now we will execute this function with an eager context:
634 *
635 * output1, output2 = two_adds(2.0, 3.0)
636 *
637 * and check that we got 5.0 and 6.0 as results.
638 */
639
640 // Build eager context.
641 TFE_ContextOptions* opts = TFE_NewContextOptions();
642 TFE_ContextOptionsSetTfrt(opts, std::get<1>(GetParam()));
643 TF_ExecutionContext* eager_execution_ctx =
644 TF_NewEagerExecutionContext(opts, s);
645 ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
646 TFE_DeleteContextOptions(opts);
647
648 TF_ExecutionContextRegisterFunction(eager_execution_ctx, func, s);
649 ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
650
651 // Build the abstract op to run the function.
652 TF_AbstractOp* fn_op = TF_NewAbstractOp(eager_execution_ctx);
653 TF_AbstractOpSetOpType(fn_op, fn_name.c_str(), s);
654 ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
655
656 // Build two abstract input tensors as function arguments.
657 std::vector<TF_AbstractTensor*> func_args;
658 {
659 TFE_Context* eager_ctx =
660 TF_ExecutionContextGetTFEContext(eager_execution_ctx, status.get());
661 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
662 TFE_TensorHandle* input_eager = TestScalarTensorHandle(eager_ctx, 2.0f);
663 func_args.push_back(TF_CreateAbstractTensorFromEagerTensor(input_eager, s));
664 ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
665 input_eager = TestScalarTensorHandle(eager_ctx, 3.0f);
666 func_args.push_back(TF_CreateAbstractTensorFromEagerTensor(input_eager, s));
667 ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
668 }
669
670 TF_OutputList* func_outputs = TF_NewOutputList();
671 TF_OutputListSetNumOutputs(func_outputs, 2, s);
672 ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
673 TF_ExecuteOperation(fn_op, func_args.size(), func_args.data(), func_outputs,
674 s);
675 ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
676 TF_DeleteAbstractOp(fn_op);
677 for (TF_AbstractTensor* t : func_args) TF_DeleteAbstractTensor(t);
678
679 ASSERT_EQ(2, TF_OutputListNumOutputs(func_outputs));
680 float results[2];
681 for (int idx = 0; idx < 2; ++idx) {
682 TF_AbstractTensor* result = TF_OutputListGet(func_outputs, idx);
683 TFE_TensorHandle* handle = TF_AbstractTensorGetEagerTensor(result, s);
684 ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
685 TF_Tensor* f_t = TFE_TensorHandleResolve(handle, s);
686 ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
687 results[idx] = *static_cast<float*>(TF_TensorData(f_t));
688 TF_DeleteTensor(f_t);
689 }
690 ASSERT_EQ(results[0], 5.0);
691 ASSERT_EQ(results[1], 6.0);
692
693 for (int idx = 0; idx < 2; ++idx) {
694 TF_AbstractTensor* result = TF_OutputListGet(func_outputs, idx);
695 TF_DeleteAbstractTensor(result);
696 }
697 TF_DeleteOutputList(func_outputs);
698 TF_DeleteExecutionContext(eager_execution_ctx);
699 TF_DeleteAbstractFunction(func);
700 }
701
TEST_P(UnifiedCAPI,TestMultiOutputGraphMatMul)702 TEST_P(UnifiedCAPI, TestMultiOutputGraphMatMul) {
703 std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
704 TF_NewStatus(), TF_DeleteStatus);
705 TF_Status* s = status.get();
706
707 // Start a new function / execution context.
708 string fn_name = "two_adds_and_matmul";
709 TF_ExecutionContext* graph_ctx = TF_CreateFunction(fn_name.c_str(), s);
710 ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
711
712 auto* arg0 = TF_AddFunctionParameter(graph_ctx, TF_FLOAT, {-1, nullptr}, s);
713 ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
714 auto* arg1 = TF_AddFunctionParameter(graph_ctx, TF_FLOAT, {-1, nullptr}, s);
715 ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
716
717 // Create a first "Add" computing `arg0 + arg1`.
718 TF_AbstractTensor* add_output1;
719 {
720 // Build an abstract operation, inputs and output.
721 auto* add_op = TF_NewAbstractOp(graph_ctx);
722 TF_AbstractOpSetOpType(add_op, "Add", s);
723 ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
724 TF_AbstractOpSetOpName(add_op, "my_add1", s);
725 ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
726 TF_AbstractTensor* inputs[2] = {arg0, arg1};
727 TF_OutputList* add_outputs = TF_NewOutputList();
728 TF_OutputListSetNumOutputs(add_outputs, 1, status.get());
729 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
730
731 // Trace the operation now (create a node in the graph).
732 TF_ExecuteOperation(add_op, 2, inputs, add_outputs, s);
733 ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
734 TF_DeleteAbstractOp(add_op);
735
736 // Extract the resulting tensor.
737 add_output1 = TF_OutputListGet(add_outputs, 0);
738 TF_DeleteOutputList(add_outputs);
739 }
740
741 // Same with a second "Add" computing `arg1 + arg1`.
742 TF_AbstractTensor* add_output2;
743 {
744 // Build an abstract operation, inputs and output.
745 auto* add_op = TF_NewAbstractOp(graph_ctx);
746 TF_AbstractOpSetOpType(add_op, "Add", s);
747 ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
748 TF_AbstractOpSetOpName(add_op, "my_add2", s);
749 ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
750 TF_AbstractTensor* inputs[2] = {arg1, arg1};
751 TF_OutputList* add_outputs = TF_NewOutputList();
752 TF_OutputListSetNumOutputs(add_outputs, 1, status.get());
753 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
754
755 // Trace the operation now (create a node in the graph).
756 TF_ExecuteOperation(add_op, 2, inputs, add_outputs, s);
757 ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
758 TF_DeleteAbstractOp(add_op);
759
760 // Extract the resulting tensor.
761 add_output2 = TF_OutputListGet(add_outputs, 0);
762 TF_DeleteOutputList(add_outputs);
763 }
764
765 // 3rd Output will be Matrix Multiplication of add_output1 and add_output2
766 TF_AbstractTensor* mm_output;
767 {
768 // Build an abstract operation, inputs and output.
769 auto* mm_op = TF_NewAbstractOp(graph_ctx);
770 TF_AbstractOpSetOpType(mm_op, "MatMul", s);
771 ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
772 TF_AbstractOpSetOpName(mm_op, "mm", s);
773 ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
774 TF_AbstractTensor* inputs[2] = {add_output1, add_output2};
775 TF_OutputList* mm_outputs = TF_NewOutputList();
776 TF_OutputListSetNumOutputs(mm_outputs, 1, status.get());
777 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
778
779 // Trace the operation now (create a node in the graph).
780 TF_ExecuteOperation(mm_op, 2, inputs, mm_outputs, s);
781 ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
782 TF_DeleteAbstractOp(mm_op);
783
784 // Extract the resulting tensor.
785 mm_output = TF_OutputListGet(mm_outputs, 0);
786 TF_DeleteOutputList(mm_outputs);
787 }
788
789 // Finalize the function by providing the returned values.
790 TF_AbstractFunction* func;
791 {
792 // We want to return the output of both add operations and MatMul operation,
793 // create a new list and populate it.
794 TF_OutputList* func_outputs = TF_NewOutputList();
795 TF_OutputListPushBack(func_outputs, add_output1, s);
796 ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
797 TF_OutputListPushBack(func_outputs, add_output2, s);
798 ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
799 TF_OutputListPushBack(func_outputs, mm_output, s);
800 ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
801 func = TF_FinalizeFunction(graph_ctx, func_outputs, s);
802 ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
803 TF_DeleteOutputList(func_outputs);
804 }
805
806 /**
807 * We traced so far this function:
808 *
809 * def two_adds_and_mm(A, B):
810 * my_add1 = A + B
811 * my_add2 = B + B
812 * mm = tf.MatMul(my_add1,my_add2)
813 * return my_add1, my_add2, mm
814 *
815 * Now we will execute this function with an eager context:
816 *
817 * A =[[0, 1],[1, 0]]
818 * B =[[1, 0],[0, 1]]
819 *
820 * output1, output2, output3 = two_adds_and_mm(A, B)
821 *
822 * We expect outputs:
823 *
824 * output1 = [[1, 1],[1, 1]]
825 * output2 = [[2, 0],[0, 2]]
826 * output3 = [[2, 2],[2, 2]]
827 *
828 */
829
830 // Build eager context.
831 TFE_ContextOptions* opts = TFE_NewContextOptions();
832 TF_ExecutionContext* eager_execution_ctx =
833 TF_NewEagerExecutionContext(opts, s);
834 ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
835 TFE_DeleteContextOptions(opts);
836
837 TF_ExecutionContextRegisterFunction(eager_execution_ctx, func, s);
838 ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
839
840 // Build the abstract op to run the function.
841 TF_AbstractOp* fn_op = TF_NewAbstractOp(eager_execution_ctx);
842 TF_AbstractOpSetOpType(fn_op, fn_name.c_str(), s);
843 ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
844
845 // Build two abstract input tensors as function arguments.
846 std::vector<TF_AbstractTensor*> func_args;
847 {
848 TFE_Context* eager_ctx =
849 TF_ExecutionContextGetTFEContext(eager_execution_ctx, s);
850
851 // 1st Arg
852 float vals1[] = {0.0f, 1.0f, 1.0f, 0.0f};
853 int64_t dims[] = {2, 2}; // Matrices will be 2 x 2
854 int num_dims = sizeof(dims) / sizeof(dims[0]);
855
856 TFE_TensorHandle* input_eager =
857 TestMatrixTensorHandleWithInput(eager_ctx, vals1, dims, num_dims);
858 func_args.push_back(TF_CreateAbstractTensorFromEagerTensor(input_eager, s));
859 ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
860
861 // 2nd Arg
862 float vals2[] = {1.0f, 0.0f, 0.0f, 1.0f};
863 input_eager =
864 TestMatrixTensorHandleWithInput(eager_ctx, vals2, dims, num_dims);
865 func_args.push_back(TF_CreateAbstractTensorFromEagerTensor(input_eager, s));
866 ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
867 }
868
869 TF_OutputList* func_outputs = TF_NewOutputList();
870 TF_OutputListSetNumOutputs(func_outputs, 3, s);
871 ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
872 TF_ExecuteOperation(fn_op, func_args.size(), func_args.data(), func_outputs,
873 s);
874 ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
875 TF_DeleteAbstractOp(fn_op);
876 for (TF_AbstractTensor* t : func_args) TF_DeleteAbstractTensor(t);
877
878 ASSERT_EQ(3, TF_OutputListNumOutputs(func_outputs));
879
880 float expected_outputs[3][4] = {{1.0f, 1.0f, 1.0f, 1.0f},
881 {2.0f, 0.0f, 0.0f, 2.0f},
882 {2.0f, 2.0f, 2.0f, 2.0f}};
883
884 float result_data[4];
885 for (int idx = 0; idx < 3; ++idx) {
886 TF_AbstractTensor* result = TF_OutputListGet(func_outputs, idx);
887 TFE_TensorHandle* handle = TF_AbstractTensorGetEagerTensor(result, s);
888 ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
889 TF_Tensor* f_t = TFE_TensorHandleResolve(handle, s);
890 ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
891
892 memcpy(&result_data[0], TF_TensorData(f_t), TF_TensorByteSize(f_t));
893
894 // Verify results for each output
895 for (int j = 0; j < 4; j++) {
896 ASSERT_EQ(result_data[j], expected_outputs[idx][j]);
897 }
898
899 TF_DeleteTensor(f_t);
900 }
901
902 // Free memory associated with add and MatMul outputs
903 for (int idx = 0; idx < 3; ++idx) {
904 TF_AbstractTensor* result = TF_OutputListGet(func_outputs, idx);
905 TF_DeleteAbstractTensor(result);
906 }
907
908 TF_DeleteOutputList(func_outputs);
909 TF_DeleteExecutionContext(eager_execution_ctx);
910 TF_DeleteAbstractFunction(func);
911 }
912
TEST_P(UnifiedCAPI,TF_ExecutionContextToFunctionWithEagerContextRaises)913 TEST_P(UnifiedCAPI, TF_ExecutionContextToFunctionWithEagerContextRaises) {
914 std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
915 TF_NewStatus(), TF_DeleteStatus);
916 TFE_ContextOptions* opts = TFE_NewContextOptions();
917 TFE_ContextOptionsSetTfrt(opts, std::get<1>(GetParam()));
918 TF_ExecutionContext* ctx = TF_NewEagerExecutionContext(opts, status.get());
919 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
920 TFE_DeleteContextOptions(opts);
921
922 TF_AbstractFunction* f = TF_FinalizeFunction(ctx, nullptr, status.get());
923 ASSERT_EQ(nullptr, f);
924 ASSERT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(status.get()));
925 TF_DeleteExecutionContext(ctx);
926 }
927
TEST_P(UnifiedCAPI,TF_AbstractOpSetOpTypeAfterFinishingOpBuildingRaises)928 TEST_P(UnifiedCAPI, TF_AbstractOpSetOpTypeAfterFinishingOpBuildingRaises) {
929 std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
930 TF_NewStatus(), TF_DeleteStatus);
931 TF_ExecutionContext* graph_ctx = TF_CreateFunction("some_func", status.get());
932 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
933
934 // Add a placeholder to the graph.
935 auto* placeholder_op = TF_NewAbstractOp(graph_ctx);
936 TF_AbstractOpSetOpType(placeholder_op, "Placeholder", status.get());
937 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
938 TF_AbstractOpSetOpName(placeholder_op, "my_ph", status.get());
939 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
940
941 // This should fail.
942 TF_AbstractOpSetOpType(placeholder_op, "Placeholder", status.get());
943 ASSERT_EQ(TF_FAILED_PRECONDITION, TF_GetCode(status.get()));
944
945 TF_DeleteAbstractOp(placeholder_op);
946 TF_DeleteExecutionContext(graph_ctx);
947 }
948
TEST_P(UnifiedCAPI,TF_AbstractOpSetOpNameAfterFinishingOpBuildingRaises)949 TEST_P(UnifiedCAPI, TF_AbstractOpSetOpNameAfterFinishingOpBuildingRaises) {
950 std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
951 TF_NewStatus(), TF_DeleteStatus);
952 TF_ExecutionContext* graph_ctx = TF_CreateFunction("some_func", status.get());
953 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
954
955 // Add a placeholder to the graph.
956 auto* placeholder_op = TF_NewAbstractOp(graph_ctx);
957 TF_AbstractOpSetOpType(placeholder_op, "Placeholder", status.get());
958 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
959 TF_AbstractOpSetOpName(placeholder_op, "my_ph", status.get());
960 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
961
962 // This should fail.
963 TF_AbstractOpSetOpName(placeholder_op, "my_ph", status.get());
964 ASSERT_EQ(TF_FAILED_PRECONDITION, TF_GetCode(status.get()));
965
966 TF_DeleteAbstractOp(placeholder_op);
967 TF_DeleteExecutionContext(graph_ctx);
968 }
969
TEST_P(UnifiedCAPI,TF_AbstractTensorGetEagerTensorOnGraphTensorRaises)970 TEST_P(UnifiedCAPI, TF_AbstractTensorGetEagerTensorOnGraphTensorRaises) {
971 std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
972 TF_NewStatus(), TF_DeleteStatus);
973 TF_ExecutionContext* graph_ctx = TF_CreateFunction("some_func", status.get());
974 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
975
976 // Add a placeholder to the graph.
977 auto placeholder_t =
978 TF_AddFunctionParameter(graph_ctx, TF_FLOAT, {-1, nullptr}, status.get());
979 TF_AbstractTensorGetEagerTensor(placeholder_t, status.get());
980 ASSERT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(status.get()));
981
982 TF_DeleteAbstractTensor(placeholder_t);
983 TF_DeleteExecutionContext(graph_ctx);
984 }
985
TEST_P(UnifiedCAPI,TF_ExecutionContextGetTFEContextFromFunctionContextRaises)986 TEST_P(UnifiedCAPI, TF_ExecutionContextGetTFEContextFromFunctionContextRaises) {
987 std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
988 TF_NewStatus(), TF_DeleteStatus);
989 TF_ExecutionContext* graph_ctx = TF_CreateFunction("some_func", status.get());
990 ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
991
992 TF_ExecutionContextGetTFEContext(graph_ctx, status.get());
993 ASSERT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(status.get()));
994
995 TF_DeleteExecutionContext(graph_ctx);
996 }
997
998 // The above tests are run for a combination of:
999 // - graphdef and MLIR tracing engine
1000 // - Using TFRT as an execution runtime (true == enable TFRT)
1001 #ifdef PLATFORM_GOOGLE
1002 INSTANTIATE_TEST_SUITE_P(Tracing, UnifiedCAPI,
1003 ::testing::Combine(::testing::Values("graphdef",
1004 "mlir"),
1005 ::testing::Values(true, false)));
1006 #else
1007 INSTANTIATE_TEST_SUITE_P(Tracing, UnifiedCAPI,
1008 ::testing::Combine(::testing::Values("graphdef",
1009 "mlir"),
1010 ::testing::Values(false)));
1011 #endif
1012
1013 } // namespace
1014 } // namespace tensorflow
1015