/external/tensorflow/tensorflow/compiler/xla/service/ |
D | hlo_liveness_analysis.cc | 55 index_tree.ForEachElement([&](const ShapeIndex& shape_index, bool live) { in ForEachLiveIndex() argument 57 func(shape_index); in ForEachLiveIndex() 67 const ShapeIndex& shape_index, in MarkLiveAtIndex() argument 77 if (it->second.element(shape_index) == false) { in MarkLiveAtIndex() 79 *it->second.mutable_element(shape_index) = true; in MarkLiveAtIndex() 81 << " shape_index: " << shape_index.ToString(); in MarkLiveAtIndex() 99 [&](const Shape& sub_shape, const ShapeIndex& shape_index) { in MarkLiveAtAllIndices() argument 100 if (it->second.element(shape_index) == false) { in MarkLiveAtAllIndices() 102 *it->second.mutable_element(shape_index) = true; in MarkLiveAtAllIndices() 104 << " shape_index: " << shape_index.ToString(); in MarkLiveAtAllIndices() [all …]
|
D | hlo_liveness_analysis.h | 52 const ShapeIndex& shape_index) const;
|
D | instruction_fusion.cc | 176 [&output_rank](const Shape& subshape, const ShapeIndex& shape_index) { in EffectivelyAtMostUnary() argument 275 [&size](const Shape& subshape, const ShapeIndex& shape_index) { in ComputeGloballyUnfusible() argument
|
D | layout_assignment.cc | 1313 [&](const Shape& subshape, const ShapeIndex& shape_index) { in PropagateOperandConstraint() argument 1331 user, shape_index)); in PropagateOperandConstraint() 1350 user->shape(), [&](const Shape& subshape, const ShapeIndex& shape_index) { in PropagateOperandConstraint() argument 1360 shape_index)); in PropagateOperandConstraint()
|
D | buffer_assignment.h | 378 const HloInstruction* hlo, const ShapeIndex& shape_index) const;
|
/external/tensorflow/tensorflow/compiler/xla/ |
D | literal.h | 78 absl::Span<const NativeT> data(const ShapeIndex& shape_index = {}) const; 83 const ShapeIndex& shape_index = {}) const; 88 const void* untyped_data(const ShapeIndex& shape_index = {}) const; 89 int64 size_bytes(const ShapeIndex& shape_index = {}) const; 114 const ShapeIndex& shape_index) const; 128 const ShapeIndex& shape_index = {}) const; 132 const ShapeIndex& shape_index = {}) const; 142 int64 sparse_element_number, const ShapeIndex& shape_index = {}) const; 150 const ShapeIndex& shape_index = {}) const; 267 const ShapeIndex& shape_index = {}) const; [all …]
|
D | literal.cc | 199 const ShapeIndex& shape_index) const { in sparse_indices() 200 return piece(shape_index).sparse_indices(); in sparse_indices() 204 const ShapeIndex& shape_index) { in sparse_indices() argument 205 return piece(shape_index).sparse_indices(); in sparse_indices() 579 const ShapeIndex& shape_index) const { in Relayout() 582 Shape* subshape = ShapeUtil::GetMutableSubshape(&new_shape, shape_index); in Relayout() 789 const ShapeIndex& shape_index) const { in GetAsString() 790 const Shape& subshape = ShapeUtil::GetSubshape(shape(), shape_index); in GetAsString() 794 return Get<bool>(multi_index, shape_index) ? "true" : "false"; in GetAsString() 796 return StrCat(Get<int8>(multi_index, shape_index)); in GetAsString() [all …]
|
D | shape_tree.h | 416 ShapeIndex shape_index = node->data.first; in InitChildren() local 417 shape_index.push_back(0); in InitChildren() 427 shape_index[shape_index.size() - 1] = i; in InitChildren() 433 nodes_.emplace_back(shape_index, init_value); in InitChildren() 452 ShapeIndex shape_index = node->data.first; in InitChildren() local 453 shape_index.push_back(0); in InitChildren() 463 shape_index[shape_index.size() - 1] = i; in InitChildren() 469 nodes_.emplace_back(shape_index); in InitChildren()
|
D | shape_layout.cc | 71 ShapeIndexView shape_index) { in ResetLayout() argument 73 *ShapeUtil::GetMutableSubshape(&shape_, shape_index)->mutable_layout() = in ResetLayout()
|
D | shape_util.h | 123 ShapeIndexView(const ShapeIndex& shape_index, int64 offset = 0) 124 : indices_(shape_index.data() + offset, shape_index.size() - offset) { 125 CHECK_LE(offset, shape_index.size()); 164 std::ostream& operator<<(std::ostream& out, const ShapeIndex& shape_index); 165 std::ostream& operator<<(std::ostream& out, const ShapeIndexView& shape_index);
|
D | literal_util.cc | 64 [&](const Shape& subshape, const ShapeIndex& shape_index) { in ConvertType() argument 68 auto src = literal.data<FromNativeT>(shape_index); in ConvertType() 69 auto dest = result.data<ToNativeT>(shape_index); in ConvertType() 75 /*dest_shape_index=*/shape_index, in ConvertType() 76 /*src_shape_index=*/shape_index)); in ConvertType()
|
D | shape_layout.h | 77 void ResetLayout(const Layout& layout, ShapeIndexView shape_index);
|
D | shape_util.cc | 68 std::ostream& operator<<(std::ostream& out, const ShapeIndex& shape_index) { in operator <<() argument 69 out << shape_index.ToString(); in operator <<() 73 std::ostream& operator<<(std::ostream& out, const ShapeIndexView& shape_index) { in operator <<() argument 74 out << shape_index.ToString(); in operator <<()
|
D | literal_comparison.cc | 741 const ShapeIndex& shape_index) { in NearHelper() argument 749 ShapeIndex element_index = shape_index; in NearHelper() 766 if (!return_status.ok() && shape_index.empty()) { in NearHelper()
|
/external/tensorflow/tensorflow/compiler/xla/service/gpu/ |
D | hlo_to_ir_bindings.cc | 154 ShapeIndexView shape_index, in GetTypedIrValue() argument 157 ShapeUtil::GetSubshape(hlo.shape(), shape_index), module_); in GetTypedIrValue() 178 ShapeIndexView shape_index) { in BindHloToIrValue() argument 182 llvm::Value* typed_ir_value = GetTypedIrValue(hlo, shape_index, ir_value); in BindHloToIrValue() 188 *(base_ptrs_[&hlo].mutable_element(shape_index)) = typed_ir_value; in BindHloToIrValue() 215 const ShapeIndex& shape_index) { in GetIrArray() argument 216 llvm::Value* base_ptr = GetBasePointer(hlo, shape_index); in GetIrArray() 218 << "Buffer not assigned for shape_index " << shape_index.ToString() in GetIrArray() 221 ShapeUtil::GetSubshape(hlo.shape(), shape_index)); in GetIrArray() 222 alias_analysis_.AddAliasingInformationToIrArray(hlo, &ir_array, shape_index); in GetIrArray()
|
D | hlo_to_ir_bindings.h | 54 ShapeIndexView shape_index = {}); 74 ShapeIndexView shape_index = {}) const { 77 return it->second.element(shape_index); 89 const ShapeIndex& shape_index = {}); 100 ShapeIndexView shape_index,
|
D | ir_emitter.h | 124 const ShapeIndex& shape_index = {}) { 125 return bindings_.GetIrArray(inst, consumer, shape_index);
|
D | ir_emitter_unnested.cc | 1214 ShapeIndex shape_index = in HandleSort() local 1221 keys_shape, ShapeUtil::GetSubshape(sort->shape(), shape_index))); in HandleSort() 1225 auto destination_buffer = GetAllocationSlice(*sort, shape_index); in HandleSort() 1329 ShapeIndex shape_index = in HandleSort() local 1331 values_arrays.push_back(GetIrArray(*sort, *sort, shape_index)); in HandleSort()
|
/external/tensorflow/tensorflow/compiler/xrt/kernels/ |
D | xrt_execute_op.cc | 259 xla::ShapeIndex shape_index; in DoWork() local 260 shape_index.push_back(i); in DoWork() 264 output_tuple, shape_index, &suballocation, in DoWork()
|
D | xrt_state_ops.h | 337 xla::ShapeIndex shape_index; in Compute() local 339 shape_index.push_back(subtuple_info.vec<int32>()(i)); in Compute() 358 ctx, XRTTupleAllocation::MakeSubBuffer(allocation, shape_index, in Compute()
|
/external/tensorflow/tensorflow/compiler/xla/python/ |
D | xrt.cc | 267 auto shape_index = tensorflow::ops::Placeholder(root, tensorflow::DT_INT32); in DestructureXrtAllocationTuple() local 268 auto subtuple = tensorflow::ops::XRTSubTuple(root, base_handle, shape_index); in DestructureXrtAllocationTuple() 277 inputs.insert({shape_index, {i}}); in DestructureXrtAllocationTuple()
|
/external/tensorflow/tensorflow/core/protobuf/tpu/ |
D | dynamic_padding.proto | 14 int32 shape_index = 2; field
|
/external/tensorflow/tensorflow/compiler/xla/tests/ |
D | hlo_test_base.h | 282 ShapeIndexView shape_index) { in ForceResultLayout() argument 285 ->ResetLayout(layout, shape_index); in ForceResultLayout()
|
/external/tensorflow/tensorflow/python/compiler/tensorrt/test/ |
D | tf_trt_integration_test_base.py | 281 for shape_index in range(len(inputs_data)): 285 self._GetFeedDict(inputs_data, shape_index)) 286 output_len = len(params.expected_output_dims[shape_index]) 290 list(params.expected_output_dims[shape_index][i]),
|
/external/tensorflow/tensorflow/core/grappler/costs/ |
D | graph_properties.cc | 1340 int shape_index = IsSwitch(node) ? 0 : i; in UpdateOutputShapesUsingAnnotatedInformation() local 1341 if (shape_index >= output_size) { in UpdateOutputShapesUsingAnnotatedInformation() 1351 attr.at(kOutputShapes).list().shape(shape_index); in UpdateOutputShapesUsingAnnotatedInformation()
|