Home
last modified time | relevance | path

Searched refs:Layout (Results 1 – 25 of 1636) sorted by relevance

12345678910>>...66

/external/tensorflow/tensorflow/compiler/xla/
Dlayout_test.cc35 EXPECT_EQ(Layout().ToString(), "{}"); in TEST_F()
36 EXPECT_EQ(Layout({4, 5, 6}).ToString(), "{4,5,6}"); in TEST_F()
37 EXPECT_EQ(Layout({4, 5, 6}).ToString(), "{4,5,6}"); in TEST_F()
39 Layout({3, 2, 1, 0}, {}, {Tile({42, 123}), Tile({4, 5})}).ToString(), in TEST_F()
41 EXPECT_EQ(Layout({1, 0}, {}, {Tile({2, 55})}) in TEST_F()
45 EXPECT_EQ(Layout({3, 2, 1, 0}, {}, {Tile({42, 123}), Tile({4, 5})}) in TEST_F()
49 EXPECT_EQ(Layout({1, 0}, {}, {Tile({-2, 55})}) in TEST_F()
64 oss << Layout({0, 1, 2}); in TEST_F()
70 EXPECT_EQ(Layout(), Layout()); in TEST_F()
72 EXPECT_EQ(Layout(empty_dims), Layout(empty_dims)); in TEST_F()
[all …]
Dlayout_util.h38 static Layout MakeLayout(absl::Span<const int64_t> minor_to_major,
45 static Layout MakeLayoutFromMajorToMinor(
50 static Layout MakeDescendingLayout(int64_t rank);
54 static Layout MakeAscendingLayout(int64_t rank);
57 static Layout GetDefaultLayoutForShape(const Shape& shape);
60 static Layout GetDefaultLayoutForRank(int64_t rank);
61 static Layout GetDefaultLayoutForR2();
62 static Layout GetDefaultLayoutForR3();
63 static Layout GetDefaultLayoutForR4();
84 static Status ValidateLayoutForShape(const Layout& layout,
[all …]
/external/pytorch/aten/src/ATen/native/xnnpack/
DMaxPooling.cpp59 input.size(Layout::Activation4D::height), in use_max_pool2d()
60 parameters.kernel[Layout::Parameter::height], in use_max_pool2d()
61 parameters.padding[Layout::Parameter::height], in use_max_pool2d()
62 parameters.stride[Layout::Parameter::height], in use_max_pool2d()
63 parameters.dilation[Layout::Parameter::height], in use_max_pool2d()
66 input.size(Layout::Activation4D::width), in use_max_pool2d()
67 parameters.kernel[Layout::Parameter::width], in use_max_pool2d()
68 parameters.padding[Layout::Parameter::width], in use_max_pool2d()
69 parameters.stride[Layout::Parameter::width], in use_max_pool2d()
70 parameters.dilation[Layout::Parameter::width], in use_max_pool2d()
[all …]
DConvolution.cpp41 (weight.size(Layout::Filter::height) > 0) && in available()
42 (weight.size(Layout::Filter::width) > 0) && in available()
47 ((transposed ? (weight.size(Layout::Filter::input) == in available()
49 : (weight.size(Layout::Filter::output) == ((*bias_sizes_opt)[0]))))) in available()
52 (padding[Layout::Parameter::height] >= 0) && in available()
53 (padding[Layout::Parameter::width] >= 0) && in available()
55 (stride[Layout::Parameter::height] > 0) && in available()
56 (stride[Layout::Parameter::width] > 0) && in available()
58 (dilation[Layout::Parameter::height] > 0) && in available()
59 (dilation[Layout::Parameter::width] > 0) && in available()
[all …]
/external/tensorflow/tensorflow/lite/delegates/gpu/common/
Dshape.cc30 template <Layout T>
38 template <Layout T>
46 template <Layout T>
78 std::string ToString(Layout layout) { in ToString()
80 case Layout::SCALAR: in ToString()
82 case Layout::LINEAR: in ToString()
84 case Layout::HW: in ToString()
86 case Layout::HWD: in ToString()
88 case Layout::CHW: in ToString()
90 case Layout::HWC: in ToString()
[all …]
Dshape.h47 enum class Layout { enum
66 std::string ToString(Layout l);
69 template <Layout T>
73 int Size(Layout layout);
76 template <Layout T>
80 Axis GetAxis(Layout layout, int32_t index);
83 template <Layout T>
87 int GetAxisIndex(Layout layout, Axis axis);
90 template <Layout T>
94 bool HasAxis(Layout layout, Axis axis);
[all …]
/external/tensorflow/tensorflow/dtensor/mlir/expansions/
Din_top_k_spmd_expander.cc36 StatusOr<Layout> GetSuggestedPredictionsLayout(const Layout& layout) { in GetSuggestedPredictionsLayout()
40 layout_specs[1].set_sharding_spec(Layout::kUnshardedDim); in GetSuggestedPredictionsLayout()
42 return Layout::GetLayout(layout_specs, layout.mesh()); in GetSuggestedPredictionsLayout()
47 StatusOr<Layout> MatchBatchDim(const Layout& layout, in MatchBatchDim()
48 const Layout& other_layout) { in MatchBatchDim()
55 return Layout::GetLayout(layout_specs, layout.mesh()); in MatchBatchDim()
64 TF_ASSIGN_OR_RETURN(const Layout predictions_layout, in ExpandOp()
67 TF_ASSIGN_OR_RETURN(const Layout targets_layout, in ExpandOp()
70 TF_ASSIGN_OR_RETURN(const Layout precision_layout, in ExpandOp()
75 Layout new_predictions_layout = predictions_layout; in ExpandOp()
[all …]
Dmatmul_spmd_expander.cc59 TF_ASSIGN_OR_RETURN(const Layout left_layout, in ExpandOp()
61 TF_ASSIGN_OR_RETURN(const Layout right_layout, in ExpandOp()
63 TF_ASSIGN_OR_RETURN(const Layout output_layout, in ExpandOp()
69 Layout layout_after_matmul; in ExpandOp()
86 if (Layout::IsShardedDimension(reduce_dim)) { in ExpandOp()
102 StatusOr<Layout> MatMulSPMDExpander::OutputLayoutAndReducedDims( in OutputLayoutAndReducedDims()
105 absl::optional<Layout>* left, absl::optional<Layout>* right) { in OutputLayoutAndReducedDims()
107 Layout left_layout; in OutputLayoutAndReducedDims()
108 Layout right_layout; in OutputLayoutAndReducedDims()
113 Layout batch_layout; in OutputLayoutAndReducedDims()
[all …]
Dmeta_spmd_expander.h33 StatusOr<llvm::DenseMap<int, Layout>> ComputeLayoutForward(
35 const llvm::DenseMap<int, Layout>& input_layouts) override;
37 StatusOr<llvm::DenseMap<int, Layout>> ComputeLayoutBackward(
39 const llvm::DenseMap<int, Layout>& output_layouts) override;
46 StatusOr<llvm::DenseMap<int, Layout>> ComputeLayoutForward(
48 const llvm::DenseMap<int, Layout>& input_layouts) override;
50 StatusOr<llvm::DenseMap<int, Layout>> ComputeLayoutBackward(
52 const llvm::DenseMap<int, Layout>& output_layouts) override;
59 StatusOr<llvm::DenseMap<int, Layout>> ComputeLayoutForward(
61 const llvm::DenseMap<int, Layout>& input_layouts) override;
[all …]
Dsplit_spmd_expander.cc39 StatusOr<Layout> MergeLayoutsForSplitOutput( in MergeLayoutsForSplitOutput()
40 int64_t split_dim, const llvm::DenseMap<int, Layout>& layouts) { in MergeLayoutsForSplitOutput()
42 const Layout& first_layout = layouts.begin()->getSecond(); in MergeLayoutsForSplitOutput()
50 const Layout& output_layout = it->getSecond(); in MergeLayoutsForSplitOutput()
52 if (Layout::IsShardedDimension(output_layout.dim(dim).sharding_spec()) && in MergeLayoutsForSplitOutput()
53 Layout::IsShardedDimension(sharding_specs[dim].sharding_spec()) && in MergeLayoutsForSplitOutput()
56 sharding_specs[dim].set_sharding_spec(Layout::kUnshardedDim); in MergeLayoutsForSplitOutput()
61 sharding_specs[split_dim].set_sharding_spec(Layout::kUnshardedDim); in MergeLayoutsForSplitOutput()
62 return Layout::GetLayout(sharding_specs, first_layout.mesh()); in MergeLayoutsForSplitOutput()
86 TF_ASSIGN_OR_RETURN(const Layout input_layout, in ExpandOp()
[all …]
Dsoftmax_spmd_expander.h31 StatusOr<llvm::DenseMap<int, Layout>> ComputeLayoutForward(
33 const llvm::DenseMap<int, Layout>& input_layouts) override;
35 StatusOr<llvm::DenseMap<int, Layout>> ComputeLayoutBackward(
37 const llvm::DenseMap<int, Layout>& output_layouts) override;
45 StatusOr<llvm::DenseMap<int, Layout>> ComputeLayoutForward(
47 const llvm::DenseMap<int, Layout>& input_layouts) override;
49 StatusOr<llvm::DenseMap<int, Layout>> ComputeLayoutBackward(
51 const llvm::DenseMap<int, Layout>& output_layouts) override;
57 StatusOr<Layout> MaybeRelayoutInputs(mlir::Operation* op, bool is_sparse,
58 const Layout& features_layout,
[all …]
Dmeta_spmd_expander.cc68 StatusOr<llvm::DenseMap<int, Layout>> LayoutsFromPackedTensor( in LayoutsFromPackedTensor()
69 int axis, const Layout& packed_layout, size_t num_unpacked_tensors) { in LayoutsFromPackedTensor()
73 const Layout unpacked_layout = in LayoutsFromPackedTensor()
75 llvm::DenseMap<int, Layout> layouts(num_unpacked_tensors); in LayoutsFromPackedTensor()
84 StatusOr<llvm::DenseMap<int, Layout>> LayoutFromUnpackedTensors( in LayoutFromUnpackedTensors()
85 int axis, const llvm::DenseMap<int, Layout>& unpacked_layouts) { in LayoutFromUnpackedTensors()
86 if (unpacked_layouts.empty()) return llvm::DenseMap<int, Layout>(); in LayoutFromUnpackedTensors()
89 const Layout& first_layout = it->getSecond(); in LayoutFromUnpackedTensors()
102 inferred_packed_layout_specs.push_back(Layout::kUnshardedDim); in LayoutFromUnpackedTensors()
108 std::string dimension = Layout::kUnshardedDim; in LayoutFromUnpackedTensors()
[all …]
Dgather_spmd_expander.cc86 Layout::kUnshardedDim); in ExpandOp()
105 if (!Layout::IsUnshardedDimension(params_layout->sharding_spec(axis))) { in ExpandOp()
109 Layout::FromProto(tgt_params_layout).ValueOrDie())); in ExpandOp()
154 Layout::FromProto(tgt_indices_layout).ValueOrDie())); in ExpandOp()
167 StatusOr<llvm::DenseMap<int, Layout>>
169 mlir::Operation* op, const llvm::DenseMap<int, Layout>& input_layouts) { in ComputeLayoutForward()
176 absl::optional<Layout> params_layout; in ComputeLayoutForward()
179 absl::optional<Layout> indices_layout; in ComputeLayoutForward()
201 !Layout::IsUnshardedDimension(params_layout->sharding_spec(i))) in ComputeLayoutForward()
205 auto add_mesh_dim_if = [&](const absl::optional<Layout>& input_layout, in ComputeLayoutForward()
[all …]
Dscatter_spmd_expander.cc33 StatusOr<Layout> GetOutputLayout(const absl::optional<Layout>& tensor_layout, in GetOutputLayout()
35 const absl::optional<Layout>& updates_layout, in GetOutputLayout()
48 output_specs[i].set_sharding_spec(Layout::kUnshardedDim); in GetOutputLayout()
55 if (Layout::IsShardedSpec(output_specs[i])) in GetOutputLayout()
65 if (Layout::IsUnshardedSpec(output_specs[i]) && in GetOutputLayout()
66 Layout::IsShardedSpec(update_spec) && in GetOutputLayout()
72 return Layout::GetLayout(output_specs, mesh); in GetOutputLayout()
109 Layout::ReplicatedOnMesh(indices_layout->mesh(), in TensorScatterOpExpand()
118 Layout pre_output_layout, in TensorScatterOpExpand()
123 updates_specs[0].set_sharding_spec(Layout::kUnshardedDim); in TensorScatterOpExpand()
[all …]
Dbias_add_spmd_expander.cc40 int get_c_dimension_idx(const Layout& layout, llvm::StringRef data_format) { in get_c_dimension_idx()
62 TF_ASSIGN_OR_RETURN(Layout input_layout, in ExpandOp()
67 TF_ASSIGN_OR_RETURN(const Layout bias_layout, in ExpandOp()
82 const Layout new_input_layout, in ExpandOp()
83 Layout::GetLayout(input_new_specs, input_layout.mesh())); in ExpandOp()
97 TF_ASSIGN_OR_RETURN(const Layout new_bias_layout, in ExpandOp()
98 Layout::GetLayout(bias_new_specs, bias_layout.mesh())); in ExpandOp()
116 StatusOr<llvm::DenseMap<int, Layout>> BiasAddExpander::ComputeLayoutForward( in ComputeLayoutForward()
117 mlir::Operation* op, const llvm::DenseMap<int, Layout>& input_layouts) { in ComputeLayoutForward()
120 return llvm::DenseMap<int, Layout>(); in ComputeLayoutForward()
[all …]
Dtrivial_spmd_expander.h32 StatusOr<llvm::DenseMap<int, Layout>> ComputeLayoutForward( in ComputeLayoutForward()
34 const llvm::DenseMap<int, Layout>& input_layouts) override { in ComputeLayoutForward()
35 return llvm::DenseMap<int, Layout>(); in ComputeLayoutForward()
38 StatusOr<llvm::DenseMap<int, Layout>> ComputeLayoutBackward( in ComputeLayoutBackward()
40 const llvm::DenseMap<int, Layout>& output_layouts) override { in ComputeLayoutBackward()
41 return llvm::DenseMap<int, Layout>(); in ComputeLayoutBackward()
49 StatusOr<llvm::DenseMap<int, Layout>> ComputeLayoutForward( in ComputeLayoutForward()
51 const llvm::DenseMap<int, Layout>& input_layouts) override { in ComputeLayoutForward()
52 return llvm::DenseMap<int, Layout>(); in ComputeLayoutForward()
55 StatusOr<llvm::DenseMap<int, Layout>> ComputeLayoutBackward( in ComputeLayoutBackward()
[all …]
Dslice_spmd_expander.h29 StatusOr<llvm::DenseMap<int, Layout>> ComputeLayoutForward(
31 const llvm::DenseMap<int, Layout>& input_layouts) override;
33 StatusOr<llvm::DenseMap<int, Layout>> ComputeLayoutBackward(
35 const llvm::DenseMap<int, Layout>& output_layouts) override;
42 StatusOr<llvm::DenseMap<int, Layout>> ComputeLayoutForward(
44 const llvm::DenseMap<int, Layout>& input_layouts) override;
46 StatusOr<llvm::DenseMap<int, Layout>> ComputeLayoutBackward(
48 const llvm::DenseMap<int, Layout>& output_layouts) override;
55 StatusOr<llvm::DenseMap<int, Layout>> ComputeLayoutForward(
57 const llvm::DenseMap<int, Layout>& input_layouts) override;
[all …]
Dsegmentation_spmd_expander.cc33 StatusOr<llvm::DenseMap<int, Layout>>
35 mlir::Operation* op, const llvm::DenseMap<int, Layout>& input_layouts) { in ComputeLayoutForward()
45 return llvm::DenseMap<int, Layout>( in ComputeLayoutForward()
52 return llvm::DenseMap<int, Layout>( in ComputeLayoutForward()
53 {{0, Layout::ReplicatedOnMesh(mesh, output_rank)}}); in ComputeLayoutForward()
56 StatusOr<llvm::DenseMap<int, Layout>>
58 mlir::Operation* op, const llvm::DenseMap<int, Layout>& output_layouts) { in ComputeLayoutBackward()
62 Layout segment_ids_layout = in ComputeLayoutBackward()
63 Layout::ReplicatedOnMesh(mesh, ValueRank(op->getOperand(1))); in ComputeLayoutBackward()
64 Layout num_segments_layout = Layout::ReplicatedOnMesh(mesh, /*rank=*/0); in ComputeLayoutBackward()
[all …]
Dbroadcast_to_spmd_expander.cc40 TF_ASSIGN_OR_RETURN(const Layout shape_layout, in ExpandOp()
48 TF_ASSIGN_OR_RETURN(const Layout input_layout, in ExpandOp()
50 TF_ASSIGN_OR_RETURN(const Layout output_layout, in ExpandOp()
75 if (output_layout_dim != Layout::kUnshardedDim) { in ExpandOp()
86 const Layout all_to_all_input_layout = in ExpandOp()
87 Layout::ReplicatedOnMesh(mesh, input_layout.rank()); in ExpandOp()
100 if (output_layout.sharding_spec(i) != Layout::kUnshardedDim) in ExpandOp()
123 StatusOr<llvm::DenseMap<int, Layout>>
125 mlir::Operation* op, const llvm::DenseMap<int, Layout>& input_layouts) { in ComputeLayoutForward()
128 return llvm::DenseMap<int, Layout>(); in ComputeLayoutForward()
[all …]
Ddataparallel_spmd_expander.cc50 bool AllReplicated(const std::vector<Layout>& layouts) { in AllReplicated()
58 bool AllBatchParallel(const std::vector<Layout>& layouts, in AllBatchParallel()
66 bool SameBatchRank(const std::vector<Layout>& layouts, in SameBatchRank()
79 bool AnyLayoutExist(const llvm::DenseMap<int, Layout>& layouts, in AnyLayoutExist()
94 StatusOr<Layout> MergeBatchLayouts( in MergeBatchLayouts()
95 const llvm::DenseMap<int, Layout>& layouts, in MergeBatchLayouts()
107 std::vector<std::string> merged_specs(batch_rank, Layout::kUnshardedDim); in MergeBatchLayouts()
116 if (spec != Layout::kUnshardedDim) { in MergeBatchLayouts()
123 merged_specs[i] = Layout::kUnshardedDim; in MergeBatchLayouts()
132 spec = Layout::kUnshardedDim; in MergeBatchLayouts()
[all …]
/external/eigen/unsupported/test/
Dcxx11_tensor_block_eval.cpp37 template <int Layout, int NumDims>
51 DSizes<Index, NumDims> strides = Eigen::internal::strides<Layout>(dims); in RandomBlock()
61 template <int Layout, int NumDims>
64 using BlockMapper = internal::TensorBlockMapper<NumDims, Layout, Index>; in SkewedInnerBlock()
75 auto strides = internal::strides<Layout>(dims); in SkewedInnerBlock()
80 if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) { in SkewedInnerBlock()
122 template <typename T, int NumDims, int Layout, typename Expression,
140 Tensor<T, NumDims, Layout> block(block_params.desc.dimensions()); in VerifyBlockEvaluator()
154 Tensor<T, NumDims, Layout> dst(dst_dims); in VerifyBlockEvaluator()
157 block_params.desc.template AddDestinationBuffer<Layout>( in VerifyBlockEvaluator()
[all …]
/external/tensorflow/tensorflow/dtensor/tests/
Dtensor_layout_test.cc64 Layout BatchLayout() { in BatchLayout()
65 return Layout::FromString("sharding_specs:x,batch, mesh:|x=4,batch=8|*TPU") in BatchLayout()
77 Layout layout = Layout::Empty(); in TEST_F()
81 EqualsProto(Layout::FromString(layout_str).ValueOrDie().ToProto())); in TEST_F()
85 Layout layout = BatchLayout(); in TEST_F()
89 EqualsProto(Layout::FromString(layout_str).ValueOrDie().ToProto())); in TEST_F()
93 std::string layout_str = "sharding_specs:x," + string(Layout::kUnshardedDim) + in TEST_F()
95 EXPECT_EQ(layout_str, Layout::FromString(layout_str)->ToString()); in TEST_F()
101 EXPECT_EQ(layout_str, Layout::FromString(layout_str)->ToString()); in TEST_F()
111 EXPECT_EQ(exp_layout_str, Layout::FromString(layout_str)->ToString()); in TEST_F()
[all …]
/external/pigweed/pw_allocator/public/pw_allocator/
Dlayout.h56 class Layout {
58 constexpr Layout() : Layout(0) {} in Layout() function
59 constexpr explicit Layout(size_t size) in Layout() function
60 : Layout(size, alignof(std::max_align_t)) {} in Layout()
61 constexpr Layout(size_t size, size_t alignment) in Layout() function
66 static constexpr std::enable_if_t<!std::is_array_v<T>, Layout> Of() { in Of()
67 return Layout(sizeof(T), alignof(T)); in Of()
72 static constexpr std::enable_if_t<internal::is_bounded_array_v<T>, Layout>
74 return Layout(sizeof(T), alignof(std::remove_extent_t<T>)); in Of()
79 static constexpr std::enable_if_t<internal::is_unbounded_array_v<T>, Layout>
[all …]
/external/pytorch/aten/src/ATen/native/vulkan/ops/
DPool.cpp28 self_arg.size(Layout::Activation4D::batch), in adaptive_avg_pool2d()
29 self_arg.size(Layout::Activation4D::channels), in adaptive_avg_pool2d()
30 output_size[Layout::Activation4D::batch], in adaptive_avg_pool2d()
31 output_size[Layout::Activation4D::channels], in adaptive_avg_pool2d()
117 input_size[Layout::Activation4D::height], in pool2d()
118 kernel[Layout::Parameter::height], in pool2d()
119 padding[Layout::Parameter::height], in pool2d()
120 stride[Layout::Parameter::height], in pool2d()
121 dilation[Layout::Parameter::height], in pool2d()
125 input_size[Layout::Activation4D::width], in pool2d()
[all …]
/external/pytorch/c10/core/
DLayout.h10 enum class Layout : int8_t { enum
22 constexpr auto kStrided = Layout::Strided;
23 constexpr auto kSparse = Layout::Sparse;
24 constexpr auto kSparseCsr = Layout::SparseCsr;
25 constexpr auto kMkldnn = Layout::Mkldnn;
26 constexpr auto kSparseCsc = Layout::SparseCsc;
27 constexpr auto kSparseBsr = Layout::SparseBsr;
28 constexpr auto kSparseBsc = Layout::SparseBsc;
29 constexpr auto kJagged = Layout::Jagged;
31 inline Layout layout_from_backend(Backend backend) { in layout_from_backend()
[all …]

12345678910>>...66