1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15
16 #include "tensorflow/compiler/tf2xla/xla_op_registry.h"
17
18 #include <functional>
19 #include <memory>
20
21 #include "tensorflow/compiler/jit/flags.h"
22 #include "tensorflow/compiler/jit/xla_cluster_util.h"
23 #include "tensorflow/compiler/tf2xla/type_util.h"
24 #include "tensorflow/compiler/tf2xla/xla_context.h"
25 #include "tensorflow/compiler/xla/client/client_library.h"
26 #include "tensorflow/core/common_runtime/device_factory.h"
27 #include "tensorflow/core/common_runtime/local_device.h"
28 #include "tensorflow/core/framework/device_base.h"
29 #include "tensorflow/core/framework/kernel_def.pb.h"
30 #include "tensorflow/core/framework/node_def.pb.h"
31 #include "tensorflow/core/framework/op_def_util.h"
32 #include "tensorflow/core/platform/mem.h"
33 #include "tensorflow/core/platform/stream_executor_no_cuda.h"
34
35 namespace tensorflow {
36
37 const char* const DEVICE_CPU_XLA_JIT = "XLA_CPU_JIT";
38 const char* const DEVICE_GPU_XLA_JIT = "XLA_GPU_JIT";
39 const char* const DEVICE_XLA_CPU = "XLA_CPU";
40 const char* const DEVICE_XLA_GPU = "XLA_GPU";
41
LaunchOpHasKernelForDevice(const DeviceType & device_type)42 static Status LaunchOpHasKernelForDevice(const DeviceType& device_type) {
43 const OpDef* op_def;
44 TF_RETURN_IF_ERROR(OpRegistry::Global()->LookUpOpDef("XlaLaunch", &op_def));
45 NodeDef node_def;
46 node_def.set_name("_XlaLaunch-op");
47 node_def.set_op("XlaLaunch");
48 string kernel_class_name;
49 TF_RETURN_IF_ERROR(FindKernelDef(device_type, node_def, /*KernelDef*/ nullptr,
50 &kernel_class_name));
51 VLOG(1) << "LaunchOpHasKernelForDevice"
52 << " kernel_class_name: " << kernel_class_name;
53 return OkStatus();
54 }
55
56 XlaOpRegistry::XlaOpRegistry() = default;
57 XlaOpRegistry::~XlaOpRegistry() = default;
58
59 // TODO(b/64575122) consider adding more sophisticated definitions of
60 // compatibility if needed by future use cases.
IsCompatible(const OpRegistration & x,const OpRegistration & y)61 /* static */ bool XlaOpRegistry::IsCompatible(const OpRegistration& x,
62 const OpRegistration& y) {
63 if (x.name != y.name) return true;
64 if (x.label != y.label) return true;
65 // The registrations refer to the same Op: ensures they are compatible and
66 // are restricted to different device allowlists.
67 if (x.compilation_only != y.compilation_only) {
68 LOG(WARNING) << "Registrations of " << x.name
69 << " have incompatible compilation_only settings.";
70 return false;
71 }
72 if (x.allow_resource_types != y.allow_resource_types) {
73 LOG(WARNING) << "Registrations of " << x.name
74 << " have incompatible allow_resource_types settings.";
75 return false;
76 }
77 if (x.allow_variant_types != y.allow_variant_types) {
78 LOG(WARNING) << "Registrations of " << x.name
79 << " have incompatible allow_variant_types settings.";
80 return false;
81 }
82 if (x.allow_string_type != y.allow_string_type) {
83 LOG(WARNING) << "Registrations of " << x.name
84 << " have incompatible allow_string_type settings.";
85 return false;
86 }
87 if (!x.has_device_allowlist && !y.has_device_allowlist) {
88 LOG(WARNING) << "Duplicate registrations of " << x.name
89 << "with no device allowlists.";
90 return false;
91 }
92 if (x.has_device_allowlist && y.has_device_allowlist) {
93 for (const auto& device : x.device_allowlist) {
94 if (y.device_allowlist.count(device) != 0) {
95 LOG(WARNING) << "Multiple registrations of " << x.name << " on device "
96 << device;
97 return false;
98 }
99 }
100 }
101 if (x.compile_time_constant_inputs != y.compile_time_constant_inputs) {
102 LOG(WARNING) << "Registrations of " << x.name
103 << " have incompatible compile time constant inputs.";
104 return false;
105 }
106 if (x.is_metadata_op != y.is_metadata_op) {
107 LOG(WARNING) << "Registrations of " << x.name
108 << " have incompatible values for is_metadata_op.";
109 return false;
110 }
111 return true;
112 }
113
RegisterCompilationDevice(const string & device_name,const DeviceRegistration & registration)114 /* static */ void XlaOpRegistry::RegisterCompilationDevice(
115 const string& device_name, const DeviceRegistration& registration) {
116 XlaOpRegistry& registry = Instance();
117 mutex_lock lock(registry.mutex_);
118 auto result =
119 registry.compilation_devices_.emplace(device_name, registration);
120 CHECK(result.second || result.first->second.compilation_device_name ==
121 registration.compilation_device_name);
122 }
123
RegisterBackend(const string & compilation_device_name,absl::Span<const DataType> supported_types,BackendOpFilter op_filter)124 /* static */ void XlaOpRegistry::RegisterBackend(
125 const string& compilation_device_name,
126 absl::Span<const DataType> supported_types, BackendOpFilter op_filter) {
127 XlaOpRegistry& registry = Instance();
128 mutex_lock lock(registry.mutex_);
129 auto result = registry.backends_.emplace(compilation_device_name, Backend());
130 CHECK(result.second) << "Duplicate XLA backend registration "
131 << compilation_device_name;
132 result.first->second.supported_types.insert(supported_types.begin(),
133 supported_types.end());
134 result.first->second.op_filter = op_filter;
135 }
136
IsCompilationDevice(const string & device_name)137 /* static */ bool XlaOpRegistry::IsCompilationDevice(
138 const string& device_name) {
139 XlaOpRegistry& registry = Instance();
140 mutex_lock lock(registry.mutex_);
141 return registry.backends_.find(device_name) != registry.backends_.end();
142 }
143
GetCompilationDevice(const string & device_name,const DeviceRegistration ** registration)144 /* static */ bool XlaOpRegistry::GetCompilationDevice(
145 const string& device_name, const DeviceRegistration** registration) {
146 XlaOpRegistry& registry = Instance();
147
148 // Lazily register the CPU and GPU JIT devices the first time
149 // GetCompilationDevice is called.
150 static void* registration_init = [®istry]() {
151 MarkForCompilationPassFlags* flags = GetMarkForCompilationPassFlags();
152 bool cpu_global_jit = flags->tf_xla_cpu_global_jit;
153 VLOG(2) << "tf_xla_cpu_global_jit = " << cpu_global_jit;
154
155 mutex_lock lock(registry.mutex_);
156 if (LaunchOpHasKernelForDevice(DeviceType(DEVICE_CPU)).ok()) {
157 DeviceRegistration& registration =
158 registry.compilation_devices_[DEVICE_CPU];
159 registration.compilation_device_name = DEVICE_CPU_XLA_JIT;
160 registration.autoclustering_policy =
161 cpu_global_jit
162 ? XlaOpRegistry::AutoclusteringPolicy::kIfEnabledGlobally
163 : XlaOpRegistry::AutoclusteringPolicy::kIfExplicitlyRequested;
164 }
165 if (LaunchOpHasKernelForDevice(DeviceType(DEVICE_GPU)).ok()) {
166 DeviceRegistration& registration =
167 registry.compilation_devices_[DEVICE_GPU];
168 registration.compilation_device_name = DEVICE_GPU_XLA_JIT;
169 registration.autoclustering_policy =
170 XlaOpRegistry::AutoclusteringPolicy::kIfEnabledGlobally;
171 }
172 return nullptr;
173 }();
174 (void)registration_init;
175
176 mutex_lock lock(registry.mutex_);
177 auto it = registry.compilation_devices_.find(device_name);
178 if (it == registry.compilation_devices_.end()) return false;
179 *registration = &it->second;
180 return true;
181 }
182
RegisterCompilationKernels()183 void XlaOpRegistry::RegisterCompilationKernels() {
184 XlaOpRegistry& registry = Instance();
185 mutex_lock lock(registry.mutex_);
186
187 if (registry.jit_kernels_registered_) return;
188 registry.jit_kernels_registered_ = true;
189
190 OpRegistryInterface* op_registry = OpRegistry::Global();
191 // Order of op registration:
192 // The goal is to allow the co-existence of backend-specific kernels and
193 // generic kernels. To achieve this, we enforce the following order of
194 // registrations for one op:
195 // 1. Process op registration with device allowlists:
196 // this pass registers backend-specific kernels for this op.
197 // 2. Process op registration without device allowlists:
198 // this pass registers the kernels for all the other supported backends.
199 for (auto& ops : registry.ops_) {
200 const string& op_name = ops.first;
201 std::vector<std::unique_ptr<OpRegistration>>& op_registrations = ops.second;
202 // Partition the op registration so that the ones with device allowlists
203 // precede the one without device allowlist.
204 std::partition(op_registrations.begin(), op_registrations.end(),
205 [](const std::unique_ptr<OpRegistration>& op_reg) {
206 return op_reg->has_device_allowlist;
207 });
208
209 // Collect a set of backend registered by ops with device allowlists.
210 // The op registration without allowlists will register a generic kernel
211 // for all other backends not in this set.
212 std::unordered_set<string> allowlisted_backend;
213 for (auto& op_registration : op_registrations) {
214 if (op_registration->has_device_allowlist) {
215 allowlisted_backend.insert(op_registration->device_allowlist.begin(),
216 op_registration->device_allowlist.end());
217 }
218 }
219
220 for (auto& op_registration : op_registrations) {
221 const OpDef* op_def;
222 Status lookup_status = op_registry->LookUpOpDef(op_name, &op_def);
223 if (!lookup_status.ok()) {
224 LOG(ERROR) << lookup_status.error_message();
225 XLA_LOG_LINES(
226 ERROR,
227 "Ops registered: \n" +
228 dynamic_cast<OpRegistry*>(op_registry)->DebugString(true));
229 }
230 TF_CHECK_OK(lookup_status);
231
232 std::unordered_set<string> type_attrs;
233 for (const OpDef::AttrDef& attr_def : op_def->attr()) {
234 if (attr_def.type() == "type" || attr_def.type() == "list(type)") {
235 type_attrs.insert(attr_def.name());
236 }
237 }
238
239 // Checks there are no type constraints referring to unknown attributes.
240 for (const auto& constraint : op_registration->type_constraints) {
241 if (type_attrs.find(constraint.first) == type_attrs.end()) {
242 LOG(FATAL) << "Unknown type attribute " << constraint.first
243 << " in XLA op registration for " << op_name;
244 }
245 }
246
247 for (auto& backend : registry.backends_) {
248 // If the operator has a device allowlist, only register on allowlisted
249 // devices.
250 if (op_registration->has_device_allowlist &&
251 op_registration->device_allowlist.find(backend.first) ==
252 op_registration->device_allowlist.end()) {
253 continue;
254 }
255
256 // If the operator does NOT has a device allowlist, skip all devices
257 // that has already been registered.
258 if (!op_registration->has_device_allowlist &&
259 allowlisted_backend.find(backend.first) !=
260 allowlisted_backend.end()) {
261 continue;
262 }
263
264 std::unique_ptr<KernelDef> kdef(new KernelDef);
265 kdef->set_op(op_registration->name);
266 kdef->set_device_type(backend.first);
267 kdef->set_label(op_registration->label);
268
269 // Constrain each type attribute to the intersection of:
270 // a) the types supported by the backend, and
271 // b) the types allowed by the OpDef, and
272 // c) the type constraints.
273 bool unsatisfiable_type_constraint = false;
274 for (const string& type_attr : type_attrs) {
275 KernelDef::AttrConstraint* attr_constraint = kdef->add_constraint();
276 attr_constraint->set_name(type_attr);
277 auto* allowed_values =
278 attr_constraint->mutable_allowed_values()->mutable_list();
279
280 const OpDef::AttrDef& op_def_attr = *FindAttr(type_attr, *op_def);
281 const auto* op_def_allowed_types =
282 op_def_attr.has_allowed_values()
283 ? &op_def_attr.allowed_values().list().type()
284 : nullptr;
285 auto constraint_it =
286 op_registration->type_constraints.find(type_attr);
287 const std::set<DataType>* type_constraints =
288 constraint_it != op_registration->type_constraints.end()
289 ? &constraint_it->second
290 : nullptr;
291 for (DataType dtype : backend.second.supported_types) {
292 // Filter out types that aren't allowed by the OpDef.
293 if (op_def_allowed_types != nullptr &&
294 std::find(op_def_allowed_types->begin(),
295 op_def_allowed_types->end(),
296 dtype) == op_def_allowed_types->end()) {
297 continue;
298 }
299 // Filter out types based on the type constraints.
300 if (type_constraints != nullptr &&
301 type_constraints->find(dtype) == type_constraints->end()) {
302 continue;
303 }
304 // Passed all the filters, this type is allowed.
305 allowed_values->add_type(dtype);
306 }
307 if (op_registration->allow_resource_types) {
308 allowed_values->add_type(DT_RESOURCE);
309 }
310 if (op_registration->allow_variant_types) {
311 allowed_values->add_type(DT_VARIANT);
312 }
313 if (op_registration->allow_string_type) {
314 allowed_values->add_type(DT_STRING);
315 }
316 // Don't build KernelDefs that have unsatisfiable type constraints.
317 if (allowed_values->type().empty()) {
318 unsatisfiable_type_constraint = true;
319 break;
320 }
321 }
322 if (unsatisfiable_type_constraint) continue;
323
324 if (backend.second.op_filter != nullptr &&
325 !backend.second.op_filter(kdef.get())) {
326 continue;
327 }
328 VLOG(2) << "XLA op registration: device: " << backend.first
329 << " op: " << op_name;
330 registry.kernel_registrars_.emplace_back(
331 new kernel_factory::OpKernelRegistrar(
332 new KernelDef(*kdef), "XlaJitOp", op_registration->factory));
333 backend.second.kernel_defs.push_back(std::move(kdef));
334 }
335 }
336 }
337 }
338
DeviceKernels(const string & compilation_device_name,bool include_compilation_only_kernels)339 std::vector<const KernelDef*> XlaOpRegistry::DeviceKernels(
340 const string& compilation_device_name,
341 bool include_compilation_only_kernels) {
342 // Ensure compilation kernels registered.
343 RegisterCompilationKernels();
344 std::vector<const KernelDef*> kernels;
345 XlaOpRegistry& registry = Instance();
346 mutex_lock lock(registry.mutex_);
347 auto it = registry.backends_.find(compilation_device_name);
348 CHECK(it != registry.backends_.end())
349 << "Unknown backend " << compilation_device_name;
350 for (const std::unique_ptr<KernelDef>& k : it->second.kernel_defs) {
351 auto op_iter = registry.ops_.find(k->op());
352 CHECK(op_iter != registry.ops_.end() && !op_iter->second.empty());
353 // The test in IsCompatible ensures that if there are multiple matching
354 // registrations for this op name, they all have the same value of
355 // compilation_only, so only the first match needs to be tested.
356 if (include_compilation_only_kernels ||
357 !op_iter->second.front()->compilation_only) {
358 kernels.push_back(k.get());
359 }
360 }
361 return kernels;
362 }
363
GetAllRegisteredOps()364 /*static*/ std::vector<string> XlaOpRegistry::GetAllRegisteredOps() {
365 std::vector<string> ops;
366 XlaOpRegistry& registry = Instance();
367 mutex_lock lock(registry.mutex_);
368 for (const auto& pair : registry.ops_) {
369 ops.push_back(pair.first);
370 }
371 std::sort(ops.begin(), ops.end());
372 return ops;
373 }
374
375 /*static*/ const std::unordered_set<std::string>*
CompileTimeConstantInputArgNames(const string & op)376 XlaOpRegistry::CompileTimeConstantInputArgNames(const string& op) {
377 XlaOpRegistry& registry = Instance();
378 mutex_lock lock(registry.mutex_);
379 auto it = registry.ops_.find(op);
380 static auto empty_set = new std::unordered_set<std::string>;
381 if (it == registry.ops_.end() || it->second.empty()) {
382 return empty_set;
383 } else {
384 return &it->second.front()->compile_time_constant_inputs;
385 }
386 }
387
CompileTimeConstantInputs(const NodeDef & node_def,const OpKernel * op_kernel,const OpDef * op_def,std::vector<int> * result)388 /* static */ Status XlaOpRegistry::CompileTimeConstantInputs(
389 const NodeDef& node_def, const OpKernel* op_kernel, const OpDef* op_def,
390 std::vector<int>* result) {
391 result->clear();
392
393 DCHECK(op_def != nullptr || op_kernel != nullptr);
394
395 std::unordered_set<string> compile_time_constant_inputs_from_attr;
396 std::vector<string> compile_time_constant_inputs_vect_from_attr;
397
398 const std::unordered_set<string>* compile_time_constant_inputs;
399
400 if (TryGetNodeAttr(node_def, kXlaCompileTimeConstantInputsAttr,
401 &compile_time_constant_inputs_vect_from_attr)) {
402 absl::c_copy(compile_time_constant_inputs_vect_from_attr,
403 std::inserter(compile_time_constant_inputs_from_attr,
404 compile_time_constant_inputs_from_attr.end()));
405 compile_time_constant_inputs = &compile_time_constant_inputs_from_attr;
406 } else {
407 compile_time_constant_inputs =
408 CompileTimeConstantInputArgNames(node_def.op());
409 if (compile_time_constant_inputs->empty()) {
410 return OkStatus();
411 }
412 }
413
414 VLOG(3) << "For operation "
415 << (op_def != nullptr ? op_def->name() : op_kernel->name())
416 << " required constants are: "
417 << absl::StrJoin(*compile_time_constant_inputs, ", ");
418
419 for (const string& input : *compile_time_constant_inputs) {
420 if (op_def) {
421 NameRangeMap input_name_ranges;
422 TF_RETURN_IF_ERROR(
423 NameRangesForNode(node_def, *op_def, &input_name_ranges, nullptr));
424 auto name_range = input_name_ranges.find(input);
425 if (name_range == input_name_ranges.end()) {
426 continue;
427 }
428
429 for (int i = name_range->second.first; i < name_range->second.second;
430 i++) {
431 result->push_back(i);
432 }
433 } else {
434 int start, stop;
435 TF_CHECK_OK(op_kernel->InputRange(input, &start, &stop));
436 for (int i = start; i < stop; ++i) {
437 result->push_back(i);
438 }
439 }
440 }
441
442 absl::c_sort(*result);
443 return OkStatus();
444 }
445
IsMetadataOp(const string & op)446 /*static*/ bool XlaOpRegistry::IsMetadataOp(const string& op) {
447 XlaOpRegistry& registry = Instance();
448 mutex_lock lock(registry.mutex_);
449 auto it = registry.ops_.find(op);
450 if (it == registry.ops_.end() || it->second.empty()) {
451 return false;
452 }
453
454 // The test in IsCompatible ensures that if there are multiple matching
455 // registrations for this op name, they all have the same value of
456 // is_metadata_op, so only the first match is returned.
457 return it->second.front()->is_metadata_op;
458 }
459
BackendNames()460 std::vector<string> XlaOpRegistry::BackendNames() {
461 std::vector<string> names;
462 XlaOpRegistry& registry = Instance();
463 mutex_lock lock(registry.mutex_);
464 for (const auto& backend_pair : registry.backends_) {
465 names.push_back(backend_pair.first);
466 }
467 return names;
468 }
469
IsBackendRegistered(const string & name)470 bool XlaOpRegistry::IsBackendRegistered(const string& name) {
471 XlaOpRegistry& registry = Instance();
472 mutex_lock lock(registry.mutex_);
473 return registry.backends_.find(name) != registry.backends_.end();
474 }
475
Instance()476 XlaOpRegistry& XlaOpRegistry::Instance() {
477 static XlaOpRegistry* r = new XlaOpRegistry;
478 return *r;
479 }
480
XlaOpRegistrationBuilder(absl::string_view name)481 XlaOpRegistrationBuilder::XlaOpRegistrationBuilder(absl::string_view name) {
482 registration_.reset(new XlaOpRegistry::OpRegistration);
483 registration_->name = string(name);
484 }
485
Name(absl::string_view name)486 XlaOpRegistrationBuilder XlaOpRegistrationBuilder::Name(
487 absl::string_view name) {
488 XlaOpRegistrationBuilder registration(name);
489 return registration;
490 }
491
Device(absl::Span<const absl::string_view> devices)492 XlaOpRegistrationBuilder& XlaOpRegistrationBuilder::Device(
493 absl::Span<const absl::string_view> devices) {
494 registration_->has_device_allowlist = true;
495 for (absl::string_view device : devices) {
496 registration_->device_allowlist.emplace(device);
497 }
498 return *this;
499 }
500
Device(absl::string_view device)501 XlaOpRegistrationBuilder& XlaOpRegistrationBuilder::Device(
502 absl::string_view device) {
503 registration_->has_device_allowlist = true;
504 registration_->device_allowlist.emplace(device);
505 return *this;
506 }
507
CompilationOnly()508 XlaOpRegistrationBuilder& XlaOpRegistrationBuilder::CompilationOnly() {
509 registration_->compilation_only = true;
510 return *this;
511 }
512
AllowResourceTypes()513 XlaOpRegistrationBuilder& XlaOpRegistrationBuilder::AllowResourceTypes() {
514 registration_->allow_resource_types = true;
515 return *this;
516 }
517
AllowVariantTypes()518 XlaOpRegistrationBuilder& XlaOpRegistrationBuilder::AllowVariantTypes() {
519 registration_->allow_variant_types = true;
520 return *this;
521 }
522
AllowStringType()523 XlaOpRegistrationBuilder& XlaOpRegistrationBuilder::AllowStringType() {
524 registration_->allow_string_type = true;
525 return *this;
526 }
527
TypeConstraint(absl::string_view attr_name,DataType allowed)528 XlaOpRegistrationBuilder& XlaOpRegistrationBuilder::TypeConstraint(
529 absl::string_view attr_name, DataType allowed) {
530 std::set<DataType>& types =
531 registration_->type_constraints[string(attr_name)];
532 types.insert(allowed);
533 return *this;
534 }
535
TypeConstraint(absl::string_view attr_name,absl::Span<const DataType> allowed)536 XlaOpRegistrationBuilder& XlaOpRegistrationBuilder::TypeConstraint(
537 absl::string_view attr_name, absl::Span<const DataType> allowed) {
538 std::set<DataType>& types =
539 registration_->type_constraints[string(attr_name)];
540 for (DataType t : allowed) {
541 types.insert(t);
542 }
543 return *this;
544 }
545
CompileTimeConstantInput(absl::string_view input_name)546 XlaOpRegistrationBuilder& XlaOpRegistrationBuilder::CompileTimeConstantInput(
547 absl::string_view input_name) {
548 registration_->compile_time_constant_inputs.emplace(input_name);
549 return *this;
550 }
551
IsMetadataOp()552 XlaOpRegistrationBuilder& XlaOpRegistrationBuilder::IsMetadataOp() {
553 registration_->is_metadata_op = true;
554 return *this;
555 }
556
Label(std::string label)557 XlaOpRegistrationBuilder& XlaOpRegistrationBuilder::Label(std::string label) {
558 registration_->label = label;
559 return *this;
560 }
561
Build(XlaOpRegistry::Factory factory)562 std::unique_ptr<XlaOpRegistry::OpRegistration> XlaOpRegistrationBuilder::Build(
563 XlaOpRegistry::Factory factory) {
564 registration_->factory = factory;
565 return std::move(registration_);
566 }
567
XlaOpRegistrar(std::unique_ptr<XlaOpRegistry::OpRegistration> registration)568 XlaOpRegistrar::XlaOpRegistrar(
569 std::unique_ptr<XlaOpRegistry::OpRegistration> registration) {
570 XlaOpRegistry& registry = XlaOpRegistry::Instance();
571 mutex_lock lock(registry.mutex_);
572 auto& existing_ops = registry.ops_[registration->name];
573 for (auto& existing : existing_ops) {
574 if (!XlaOpRegistry::IsCompatible(*existing, *registration)) {
575 LOG(FATAL)
576 << "XLA op registration " << registration->name
577 << " is incompatible with existing registration of the same name.";
578 }
579 }
580 existing_ops.emplace_back(std::move(registration));
581 }
582
XlaBackendRegistrar(absl::string_view name,absl::Span<const DataType> types,XlaOpRegistry::BackendOpFilter op_filter)583 XlaBackendRegistrar::XlaBackendRegistrar(
584 absl::string_view name, absl::Span<const DataType> types,
585 XlaOpRegistry::BackendOpFilter op_filter) {
586 XlaOpRegistry& registry = XlaOpRegistry::Instance();
587 registry.RegisterBackend(string(name), types, op_filter);
588
589 AddSymbolicExecutionDevice(name);
590 }
591
592 } // namespace tensorflow
593