Home
last modified time | relevance | path

Searched refs:split_dimension (Results 1 – 25 of 67) sorted by relevance

123

/external/tensorflow/tensorflow/core/ops/
Dtpu_cross_replica_ops.cc44 int split_dimension; in __anon385994aa0102() local
70 TF_RETURN_IF_ERROR(c->GetAttr("split_dimension", &split_dimension)); in __anon385994aa0102()
71 if (split_dimension < 0 || split_dimension >= rank) { in __anon385994aa0102()
72 return errors::InvalidArgument("split_dimension ", split_dimension, in __anon385994aa0102()
77 !c->ValueKnown(c->Dim(input, split_dimension))) { in __anon385994aa0102()
90 if (i == split_dimension) { in __anon385994aa0102()
Darray_ops.cc575 DimensionHandle split_dimension; in __anon847f0b680a02() local
578 0, c->Rank(input), &split_dimension)); in __anon847f0b680a02()
581 if (!c->ValueKnown(split_dimension)) { in __anon847f0b680a02()
588 int64_t split_dim = c->Value(split_dimension); in __anon847f0b680a02()
611 DimensionHandle split_dimension; in __anon847f0b680b02() local
614 2, c->Rank(input), &split_dimension)); in __anon847f0b680b02()
628 } else if (size_splits == nullptr && c->ValueKnown(split_dimension)) { in __anon847f0b680b02()
634 c->Value(split_dimension), in __anon847f0b680b02()
638 } else if (size_splits == nullptr && !c->ValueKnown(split_dimension)) { in __anon847f0b680b02()
649 int64_t split_dim = c->Value(split_dimension); in __anon847f0b680b02()
/external/ComputeLibrary/arm_compute/runtime/
DIScheduler.h77 …Hints(unsigned int split_dimension, StrategyHint strategy = StrategyHint::STATIC, int threshold = …
78 : _split_dimension(split_dimension), _strategy(strategy), _threshold(threshold) in _split_dimension() argument
87 Hints &set_split_dimension(unsigned int split_dimension) in set_split_dimension() argument
89 _split_dimension = split_dimension; in set_split_dimension()
96 unsigned int split_dimension() const in split_dimension() function
229 …std::size_t adjust_num_of_windows(const Window &window, std::size_t split_dimension, std::size_t i…
/external/ComputeLibrary/src/runtime/
DIScheduler.cpp62 if(hints.split_dimension() == IScheduler::split_dimensions_all) in schedule_common()
103 const unsigned int num_iterations = max_window.num_iterations(hints.split_dimension()); in schedule_common()
143 …num_windows = adjust_num_of_windows(max_window, hints.split_dimension(), num_windows, *kernel, cpu… in schedule_common()
151 Window win = max_window.split_window(hints.split_dimension(), t, num_windows); in schedule_common()
178 std::size_t IScheduler::adjust_num_of_windows(const Window &window, std::size_t split_dimension, st… in adjust_num_of_windows() argument
181 if(window.num_iterations(split_dimension) < init_num_windows ) in adjust_num_of_windows()
191 …suitable dimension to split the workload. Recommended: %zu recommended_split_dim", split_dimension, in adjust_num_of_windows()
198 if((window.num_iterations(split_dimension) / kernel.get_mws(cpu_info, t)) >= t) in adjust_num_of_windows()
/external/tensorflow/tensorflow/compiler/xla/experimental/xla_sharding/
Dxla_sharding.py170 def split(cls, tensor, split_dimension, num_devices, input_shape=None): argument
190 if (shape[split_dimension] is not None and
191 shape[split_dimension] < num_devices):
194 (shape, split_dimension, num_devices))
197 tile_assignment_dims[split_dimension] = num_devices
349 split_dimension, argument
364 return Sharding.split(tensor, split_dimension, num_devices,
/external/ComputeLibrary/src/core/helpers/
DWindowHelpers.cpp244 size_t split_dimension = Window::DimY; in calculate_squashed_or_max_window() local
264 split_dimension = Window::DimX; in calculate_squashed_or_max_window()
283 return std::make_pair(win, split_dimension); in calculate_squashed_or_max_window()
293 size_t split_dimension = Window::DimY; in calculate_squashed_or_max_window() local
309 split_dimension = Window::DimX; in calculate_squashed_or_max_window()
325 return std::make_pair(win, split_dimension); in calculate_squashed_or_max_window()
/external/tensorflow/tensorflow/core/api_def/base_api/
Dapi_def_AllToAll.pbtxt37 name: "split_dimension"
52 `split_dimension` and send to the other replicas given group_assignment. After
62 split_dimension=1
/external/tensorflow/tensorflow/core/ops/compat/ops_history_v2/
DAllToAll.pbtxt30 name: "split_dimension"
83 name: "split_dimension"
136 name: "split_dimension"
/external/ComputeLibrary/src/runtime/CPP/
DSingleThreadScheduler.cpp42 if(hints.split_dimension() != IScheduler::split_dimensions_all) in schedule()
44 const unsigned int num_iterations = max_window.num_iterations(hints.split_dimension()); in schedule()
/external/ComputeLibrary/src/cpu/operators/
DCpuAdd.cpp53 …const auto split_dimension = static_cast<kernels::CpuAddKernel *>(_kernel.get())->get_split_dimens… in run() local
55 NEScheduler::get().schedule_op(_kernel.get(), split_dimension, _kernel->window(), tensors); in run()
DCpuSub.cpp53 …const auto split_dimension = static_cast<kernels::CpuSubKernel *>(_kernel.get())->get_split_dimens… in run() local
55 NEScheduler::get().schedule_op(_kernel.get(), split_dimension, _kernel->window(), tensors); in run()
DCpuActivation.cpp53 …auto split_dimension = static_cast<kernels::CpuActivationKernel *>(_kernel.get())->get_split_dimen… in run() local
54 NEScheduler::get().schedule_op(_kernel.get(), split_dimension, _kernel->window(), tensors); in run()
DCpuMul.cpp57 …auto split_dimension = static_cast<kernels::CpuMulKernel *>(_kernel.get())->get_split_dimension_hi… in run() local
58 NEScheduler::get().schedule_op(_kernel.get(), split_dimension, _kernel->window(), tensors); in run()
/external/tensorflow/tensorflow/python/tpu/ops/
Dtpu_ops.py42 split_dimension, argument
68 split_dimension=split_dimension,
83 split_dimension=op.get_attr("concat_dimension"),
/external/tensorflow/tensorflow/core/ops/compat/ops_history_v1/
DAllToAll.pbtxt30 name: "split_dimension"
83 name: "split_dimension"
/external/tensorflow/tensorflow/python/tpu/
Dtpu_test.py179 split_dimension=0,
189 split_dimension=0,
200 split_dimension=0,
210 split_dimension=0,
/external/ComputeLibrary/src/runtime/OMP/
DOMPScheduler.cpp63 const unsigned int num_iterations = max_window.num_iterations(hints.split_dimension()); in schedule_op()
80 Window win = max_window.split_window(hints.split_dimension(), t, num_windows); in schedule_op()
/external/tensorflow/tensorflow/compiler/mlir/tensorflow/utils/
Dxla_sharding_util.cc46 const int split_dimension, in CreateSplitOp() argument
55 mlir::DenseElementsAttr::get(split_dim_type, split_dimension); in CreateSplitOp()
65 if (input_type.getShape()[split_dimension] == in CreateSplitOp()
70 if (shape[split_dimension] % num_split != 0) { in CreateSplitOp()
76 split_dimension, num_split)); in CreateSplitOp()
79 shape[split_dimension] = shape[split_dimension] / num_split; in CreateSplitOp()
/external/tensorflow/tensorflow/dtensor/mlir/
Dspmd_expander_common.cc97 Status CreateSplitOp(const int num_split, const int split_dimension, in CreateSplitOp() argument
104 mlir::DenseElementsAttr::get(split_dim_type, split_dimension); in CreateSplitOp()
114 if (input_type.getShape()[split_dimension] == in CreateSplitOp()
119 if (shape[split_dimension] % num_split != 0) { in CreateSplitOp()
124 split_dimension, num_split) in CreateSplitOp()
128 shape[split_dimension] = shape[split_dimension] / num_split; in CreateSplitOp()
Dspmd_expander_common.h65 Status CreateSplitOp(const int num_split, const int split_dimension,
/external/ComputeLibrary/docs/contributor_guide/
Dimplementation_topics.dox88 const unsigned int num_iterations = max_window.num_iterations(split_dimension);
107 Window win = max_window.split_window(split_dimension, t, info.num_threads);
113 Window win = max_window.split_window(split_dimension, t, info.num_threads);
/external/tensorflow/tensorflow/security/advisory/
Dtfsa-2021-176.md18 split_dimension=0,
/external/armnn/src/backends/neon/
DNeonInterceptorScheduler.cpp28 m_RealScheduler.schedule(kernel, hints.split_dimension()); in schedule()
/external/tensorflow/tensorflow/compiler/xla/service/
Dall_to_all_decomposer.cc58 int64_t split_dim = *all_to_all->split_dimension(); in ExpandInstruction()
/external/tensorflow/tensorflow/compiler/xla/client/
Dxla_builder.cc3077 XlaOp XlaBuilder::AllToAll(XlaOp operand, int64_t split_dimension, in AllToAll() argument
3084 return AllToAllTuple(operand, split_dimension, concat_dimension, in AllToAll()
3087 return AllToAllArray(operand, split_dimension, concat_dimension, split_count, in AllToAll()
3091 XlaOp XlaBuilder::AllToAllArray(XlaOp operand, int64_t split_dimension, in AllToAllArray() argument
3098 ShapeInference::InferAllToAllShape(*operand_shape, split_dimension, in AllToAllArray()
3112 instr.add_dimensions(split_dimension); in AllToAllArray()
3116 if (split_dimension == concat_dimension) { in AllToAllArray()
3121 if (i != split_dimension) { in AllToAllArray()
3134 int64_t dim_after_reshape = i >= split_dimension ? i + 1 : i; in AllToAllArray()
3136 permutation.push_back(split_dimension); in AllToAllArray()
[all …]

123