Home
last modified time | relevance | path

Searched refs:half_to_float (Results 1 – 25 of 37) sorted by relevance

12

/external/pytorch/aten/src/ATen/native/mkldnn/
DSoftMax.cpp19 const bool half_to_float) { in mkldnn_softmax() argument
36 const bool half_to_float) { in mkldnn_softmax() argument
38 !half_to_float, in mkldnn_softmax()
/external/executorch/kernels/portable/cpu/test/
Dscalar_utils_test.cpp28 template <typename T1, bool half_to_float>
46 promote_type_with_scalar_type<T1, T2, half_to_float>::type>::value; in testOne()
51 scalarType1, scalar_value, half_to_float); in testOne()
54 << " given half_to_float = " << half_to_float << " expected " in testOne()
/external/executorch/kernels/portable/cpu/util/
Dactivation_ops_util.cpp60 bool half_to_float, in check_log_softmax_args() argument
63 !half_to_float, "half to float conversion is not supported on CPU"); in check_log_softmax_args()
74 bool half_to_float, in check_softmax_args() argument
76 return check_log_softmax_args(in, dim, half_to_float, out); in check_softmax_args()
Dactivation_ops_util.h23 bool half_to_float,
29 bool half_to_float,
/external/pytorch/aten/src/ATen/native/vulkan/ops/
DSoftmax.cpp91 const bool half_to_float) { in softmax_internal() argument
188 const bool half_to_float) { in softmax() argument
189 return softmax_internal(input_arg, dim, half_to_float); in softmax()
195 const bool half_to_float) { in log_softmax() argument
204 return softmax_internal(input_arg, dim, half_to_float).add(epsilon).log(); in log_softmax()
/external/pytorch/aten/src/ATen/native/
DSoftMax.cpp41 (const Tensor& input, const int64_t dim, const bool half_to_float) { in TORCH_META_FUNC()
47 if (half_to_float) { in TORCH_META_FUNC()
62 const bool half_to_float) { in TORCH_META_FUNC()
68 if (half_to_float) { in TORCH_META_FUNC()
93 bool half_to_float = grad.scalar_type() != input_dtype; in TORCH_META_FUNC() local
94 if (half_to_float) { in TORCH_META_FUNC()
125 bool half_to_float = grad.scalar_type() != input_dtype; in TORCH_META_FUNC() local
126 if (half_to_float) { in TORCH_META_FUNC()
338 const bool half_to_float, in TORCH_IMPL_FUNC()
340 TORCH_CHECK(!half_to_float, "softmax with half to float conversion is not supported on CPU"); in TORCH_IMPL_FUNC()
[all …]
/external/executorch/kernels/portable/cpu/
Dscalar_utils.h83 template <typename T1, typename T2, bool half_to_float = false>
115 half_to_float &&
140 bool half_to_float = false) {
141 if (half_to_float && t == ScalarType::Half) {
Dop_softmax.cpp26 bool half_to_float, in softmax_out() argument
32 check_softmax_args(in, dim, half_to_float, out), in softmax_out()
Dop_log_softmax.cpp26 bool half_to_float, in log_softmax_out() argument
32 check_log_softmax_args(in, dim, half_to_float, out), in log_softmax_out()
/external/pytorch/aten/src/ATen/native/sparse/
DParamUtils.cpp19 const bool half_to_float, in softmax_sparse_input_preprocessing() argument
23 !half_to_float, in softmax_sparse_input_preprocessing()
DParamUtils.h12 const bool half_to_float,
DSoftMax.cpp538 const bool half_to_float) { in softmax_sparse_cpu() argument
542 input_, dim_, half_to_float, "softmax"); in softmax_sparse_cpu()
555 const bool half_to_float) { in log_softmax_sparse_cpu() argument
559 input_, dim_, half_to_float, "log_softmax"); in log_softmax_sparse_cpu()
/external/executorch/runtime/core/exec_aten/util/test/
Dscalar_type_util_test.cpp179 template <typename T1, bool half_to_float>
194 typename executorch::runtime::promote_types<T1, T2, half_to_float>:: in testOne()
201 scalarType1, scalarType2, half_to_float); in testOne()
204 << " (half to float: " << half_to_float << ')'; in testOne()
/external/pytorch/aten/src/ATen/native/cuda/
DSoftMax.cu820 Tensor host_softmax(const Tensor & input_, const int64_t dim_, const bool half_to_float, const Tens… in host_softmax() argument
821 if (half_to_float) { in host_softmax()
846 if (!half_to_float) { in host_softmax()
931 if (!half_to_float) { in host_softmax()
958 …ckward(const Tensor &grad_, const Tensor &output_, int64_t dim_, bool half_to_float, const Tensor … in host_softmax_backward() argument
982 if (!half_to_float) { in host_softmax_backward()
1037 if (!half_to_float) { in host_softmax_backward()
1070 const bool half_to_float, in TORCH_IMPL_FUNC()
1072 host_softmax<LogSoftMaxForwardEpilogue,true>(input, dim, half_to_float, output); in TORCH_IMPL_FUNC()
1081 bool half_to_float = grad.scalar_type() != input_dtype; in TORCH_IMPL_FUNC() local
[all …]
/external/pytorch/torch/distributed/tensor/parallel/
Dloss.py127 def _log_softmax(x, dim, half_to_float, mesh, mesh_dim): argument
129 if half_to_float:
149 if not half_to_float:
161 half_to_float = cast(bool, args[2])
168 res = _log_softmax(x._local_tensor, dim, half_to_float, spec.mesh, mesh_dim)
/external/igt-gpu-tools/lib/
Digt_halffloat.c191 static void half_to_float(const uint16_t *h, float *f, unsigned int num) in half_to_float() function
213 return half_to_float; in resolve_half_to_float()
/external/executorch/kernels/optimized/cpu/
Dop_log_softmax.cpp131 bool half_to_float, in opt_log_softmax_out() argument
137 check_log_softmax_args(self, dim, half_to_float, out), in opt_log_softmax_out()
/external/executorch/kernels/test/
Dop_softmax_test.cpp30 bool half_to_float, in op_softmax_out() argument
33 context_, self, dim, half_to_float, out); in op_softmax_out()
Dop_log_softmax_test.cpp31 bool half_to_float, in op_log_softmax_out() argument
34 context_, self, dim, half_to_float, out); in op_log_softmax_out()
/external/executorch/backends/apple/mps/runtime/operations/
DActivationOps.mm122 …ET_CHECK_MSG(!graphNode->half_to_float(), "softmax with half to float conversion is not supported …
140 …ET_CHECK_MSG(!graphNode->half_to_float(), "softmax with half to float conversion is not supported …
/external/pytorch/aten/src/ATen/native/sparse/cuda/
DSoftMax.cu590 const bool half_to_float) { in softmax_sparse_cuda() argument
594 input_, dim_, half_to_float, "softmax"); in softmax_sparse_cuda()
607 const bool half_to_float) { in log_softmax_sparse_cuda() argument
611 input_, dim_, half_to_float, "log_softmax"); in log_softmax_sparse_cuda()
/external/pytorch/aten/src/ATen/native/mps/operations/
DSoftMax.mm40 (const Tensor& input_, const int64_t dim, const bool half_to_float, const Tensor& output) {
41 TORCH_CHECK(!half_to_float, "softmax with half to float conversion is not supported on MPS");
/external/executorch/backends/apple/mps/operators/
Dactivation_ops.py97 mps_node.mpsnode_union.half_to_float = cast(bool, node.args[2])
/external/executorch/backends/apple/mps/serialization/
Dmps_graph_schema.py145 half_to_float: bool = False variable in MPSSoftmax
151 half_to_float: bool = False variable in MPSLogSoftmax
/external/pytorch/test/distributed/_tensor/experimental/
Dtest_register_sharding.py33 half_to_float: torch.dtype,

12