• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 #include "tensorflow/c/c_api_internal.h"
16 #include "tensorflow/c/eager/abstract_function.h"
17 #include "tensorflow/c/tf_tensor_internal.h"
18 #include "tensorflow/core/common_runtime/eager/context.h"
19 #include "tensorflow/core/common_runtime/eager/eager_operation.h"
20 #include "tensorflow/core/common_runtime/eager/execute.h"
21 #include "tensorflow/core/common_runtime/eager/placement_utils.h"
22 #include "tensorflow/core/common_runtime/eager/tensor_handle.h"
23 #include "tensorflow/core/platform/errors.h"
24 
25 namespace {
26 
IsCPU(tensorflow::Device * d)27 bool IsCPU(tensorflow::Device* d) {
28   return d == nullptr || d->tensorflow_gpu_device_info() == nullptr;
29 }
30 
31 }  // namespace
32 
33 namespace tensorflow {
34 
35 // TODO(b/152902651): This should not depend on EagerContext. This can be
36 // resolved by storing ctx->HostCPU() in the TensorHandle class.
Resolve(Status * status)37 AbstractTensorInterface* TensorHandle::Resolve(Status* status) {
38   *status = WaitUnknownDevice();
39   if (!status->ok()) {
40     return nullptr;
41   }
42   if (Type() == REMOTE) {
43     const tensorflow::Tensor* t = nullptr;
44     TensorHandle* h_cpu = nullptr;
45     *status = EagerCopyToDevice(this, ctx_, &ctx_->Executor(), ctx_->HostCPU(),
46                                 false, &h_cpu);
47     if (!status->ok()) {
48       return nullptr;
49     }
50     *status = h_cpu->Tensor(&t);
51     if (!status->ok()) {
52       h_cpu->Unref();
53       return nullptr;
54     }
55     // TODO(b/153052876): Change TF_TensorFromTensor to just return an
56     // AbstractTensorInterface
57     TF_Tensor* tf_tensor = TF_TensorFromTensor(*t, status);
58     AbstractTensorInterface* retval = tf_tensor->tensor;
59     h_cpu->Unref();
60     delete tf_tensor;
61     return retval;
62   } else if (Type() == LOCAL) {
63     tensorflow::Tensor tensor;
64     if (IsCPU(device()) || HasLocalMirror(nullptr)) {
65       const tensorflow::Tensor* src = nullptr;
66       if (HasLocalMirror(nullptr)) {
67         *status = TensorFromDevice(nullptr, &src);
68       } else {
69         *status = Tensor(&src);
70       }
71       if (!status->ok()) return nullptr;
72 
73       tensor = *src;
74     } else {
75       *status = CopyToDevice(*ctx_, ctx_->HostCPU(), &tensor);
76       if (!status->ok()) return nullptr;
77 
78       tensorflow::Tensor mirror = tensor;
79       *status = AddLocalMirror(std::move(mirror), nullptr);
80       if (!status->ok()) {
81         // If a mirror was added since we called HasLocalMirror then drop the
82         // newly copied tensor and use the previously added mirror.
83         if (status->code() != error::Code::ALREADY_EXISTS) {
84           return nullptr;
85         }
86         const tensorflow::Tensor* src = nullptr;
87         *status = TensorFromDevice(nullptr, &src);
88         if (!status->ok()) return nullptr;
89 
90         tensor = *src;
91       }
92     }
93     // TODO(b/153052876): Change TF_TensorFromTensor to just return an
94     // AbstractTensorInterface
95     TF_Tensor* tf_tensor = TF_TensorFromTensor(tensor, status);
96     AbstractTensorInterface* retval = tf_tensor->tensor;
97     delete tf_tensor;
98     return retval;
99   } else {
100     *status = errors::InvalidArgument(
101         "Resolve() is not supoorted on packed TensorHandles.");
102     return nullptr;
103   }
104 }
105 
CopyTensorHandleToDevice(ImmediateExecutionTensorHandle * handle,const char * device_name,Status * status)106 ImmediateExecutionTensorHandle* EagerContext::CopyTensorHandleToDevice(
107     ImmediateExecutionTensorHandle* handle, const char* device_name,
108     Status* status) {
109   ImmediateExecutionTensorHandle* result = nullptr;
110   Device* device;
111   *status = this->FindDeviceFromName(device_name, &device);
112   if (!status->ok()) {
113     tensorflow::CustomDevice* dev;
114     if (custom_device_op_handler_.FindCustomDeviceFromName(device_name, &dev)) {
115       *status = dev->CopyTensorToDevice(handle, &result);
116       if (status->ok()) {
117         return result;
118       }
119     } else {
120       *status =
121           tensorflow::errors::InvalidArgument(device_name, " unknown device.");
122     }
123     return nullptr;
124   }
125   // Handle tensor handles currently in custom devices
126   const char* handle_device_name = handle->DeviceName(status);
127   if (!status->ok()) {
128     return nullptr;
129   }
130   tensorflow::CustomDevice* dev;
131   if (custom_device_op_handler_.FindCustomDeviceFromName(handle_device_name,
132                                                          &dev)) {
133     *status = dev->CopyTensorFromDevice(handle, device_name, &result);
134     if (status->ok()) {
135       return result;
136     }
137     return nullptr;
138   }
139 
140   // Handle regular case.
141   TensorHandle* input = TensorHandleFromInterface(handle);
142   *status =
143       EagerCopyToDevice(input, this, &this->Executor(), device, false,
144                         reinterpret_cast<tensorflow::TensorHandle**>(&result));
145   if (status->ok()) {
146     return result;
147   }
148   return nullptr;
149 }
150 
151 // TODO(b/152902651): We unfortunately need to put this EagerContext function
152 // here to a circular BUILD dep issue. If we move this to context.cc, then we
153 // will have the circular dependency of:
154 //   context -> tensor_handle -> remote_tensor_handle_data -> context
CreateLocalHandle(AbstractTensorInterface * t)155 ImmediateExecutionTensorHandle* EagerContext::CreateLocalHandle(
156     AbstractTensorInterface* t) {
157   Tensor tensor = TensorFromInterface(t);
158   return TensorHandle::CreateLocalHandle(std::move(tensor), /*d=*/HostCPU(),
159                                          /*op_device=*/nullptr, this);
160 }
161 
CreateLocalHandleFromTFTensor(tensorflow::Tensor & t,const char * d_name)162 ImmediateExecutionTensorHandle* EagerContext::CreateLocalHandleFromTFTensor(
163     tensorflow::Tensor& t, const char* d_name) {
164   // If device name is not specified, create the TensorHandle on host cpu.
165   if (d_name == nullptr)
166     return TensorHandle::CreateLocalHandle(std::move(t), /*d=*/HostCPU(),
167                                            /*op_device=*/nullptr, this);
168   Device* d = nullptr;
169   auto status = FindDeviceFromName(d_name, &d);
170   if (!status.ok()) return nullptr;
171   return TensorHandle::CreateLocalHandle(std::move(t), /*d=*/d,
172                                          /*op_device=*/nullptr, this);
173 }
174 
TFTensorHandleFromInterface(ImmediateExecutionTensorHandle * handle)175 ImmediateExecutionTensorHandle* EagerContext::TFTensorHandleFromInterface(
176     ImmediateExecutionTensorHandle* handle) {
177   return handle;
178 }
179 
180 // TODO(b/152902651): We have to keep this function here since EagerOperation
181 // depends on EagerContext. Thus, the context build target can't depend on
182 // EagerOperation.
CreateOperation()183 ImmediateExecutionOperation* EagerContext::CreateOperation() {
184   return new EagerOperation(this);
185 }
186 
RegisterFunction(AbstractFunction * f)187 Status EagerContext::RegisterFunction(AbstractFunction* f) {
188   FunctionDef* fdef;
189   TF_RETURN_IF_ERROR(f->GetFunctionDef(&fdef));
190   if (!fdef) {
191     return errors::InvalidArgument("GetFunctionDef returned nullptr.");
192   }
193   return AddFunctionDef(*fdef);
194 }
195 
196 // TODO(b/152902651): Once we move many execute.cc functions into
197 // eager_operation.cc we can avoid a circular dependency between them.
Execute(absl::Span<AbstractTensorHandle * > retvals,int * num_retvals)198 Status EagerOperation::Execute(absl::Span<AbstractTensorHandle*> retvals,
199                                int* num_retvals) {
200   for (ImmediateExecutionTensorHandle* handle : inputs_) {
201     if (TensorHandle::classof(handle)) {
202       TF_RETURN_IF_ERROR(down_cast<TensorHandle*>(handle)->WaitUnknownDevice());
203     }
204   }
205 
206   // Run eager placement logic.
207   class Device* device = absl::get<class Device*>(Device());
208   if (device == nullptr) {
209     TF_RETURN_IF_ERROR(eager::MaybePinToResourceDevice(&device, *this));
210   }
211   if (device == nullptr && ctx_.PinSmallOpsToCPU()) {
212     bool pin_to_cpu;
213     TF_RETURN_IF_ERROR(eager::MaybePinSmallOpsToCpu(
214         &pin_to_cpu, Name(), GetInputs(), ctx_.HostCPU()->name()));
215     if (pin_to_cpu) {
216       device = ctx_.HostCPU();
217     }
218   }
219 
220   if (device != nullptr) {
221     SetDevice(device);
222   }
223   // At this point all inputs and outputs are TensorHandles associated with
224   // physical devices.
225   tensorflow::TensorHandle** retval_array =
226       reinterpret_cast<tensorflow::TensorHandle**>(retvals.data());
227   return EagerExecute(this, retval_array, num_retvals);
228 }
229 
230 }  //  namespace tensorflow
231