| /external/pytorch/aten/src/ATen/native/mkldnn/ |
| D | SoftMax.cpp | 19 const bool half_to_float) { in mkldnn_softmax() argument 36 const bool half_to_float) { in mkldnn_softmax() argument 38 !half_to_float, in mkldnn_softmax()
|
| /external/executorch/kernels/portable/cpu/test/ |
| D | scalar_utils_test.cpp | 28 template <typename T1, bool half_to_float> 46 promote_type_with_scalar_type<T1, T2, half_to_float>::type>::value; in testOne() 51 scalarType1, scalar_value, half_to_float); in testOne() 54 << " given half_to_float = " << half_to_float << " expected " in testOne()
|
| /external/executorch/kernels/portable/cpu/util/ |
| D | activation_ops_util.cpp | 60 bool half_to_float, in check_log_softmax_args() argument 63 !half_to_float, "half to float conversion is not supported on CPU"); in check_log_softmax_args() 74 bool half_to_float, in check_softmax_args() argument 76 return check_log_softmax_args(in, dim, half_to_float, out); in check_softmax_args()
|
| D | activation_ops_util.h | 23 bool half_to_float, 29 bool half_to_float,
|
| /external/pytorch/aten/src/ATen/native/vulkan/ops/ |
| D | Softmax.cpp | 91 const bool half_to_float) { in softmax_internal() argument 188 const bool half_to_float) { in softmax() argument 189 return softmax_internal(input_arg, dim, half_to_float); in softmax() 195 const bool half_to_float) { in log_softmax() argument 204 return softmax_internal(input_arg, dim, half_to_float).add(epsilon).log(); in log_softmax()
|
| /external/pytorch/aten/src/ATen/native/ |
| D | SoftMax.cpp | 41 (const Tensor& input, const int64_t dim, const bool half_to_float) { in TORCH_META_FUNC() 47 if (half_to_float) { in TORCH_META_FUNC() 62 const bool half_to_float) { in TORCH_META_FUNC() 68 if (half_to_float) { in TORCH_META_FUNC() 93 bool half_to_float = grad.scalar_type() != input_dtype; in TORCH_META_FUNC() local 94 if (half_to_float) { in TORCH_META_FUNC() 125 bool half_to_float = grad.scalar_type() != input_dtype; in TORCH_META_FUNC() local 126 if (half_to_float) { in TORCH_META_FUNC() 338 const bool half_to_float, in TORCH_IMPL_FUNC() 340 TORCH_CHECK(!half_to_float, "softmax with half to float conversion is not supported on CPU"); in TORCH_IMPL_FUNC() [all …]
|
| /external/executorch/kernels/portable/cpu/ |
| D | scalar_utils.h | 83 template <typename T1, typename T2, bool half_to_float = false> 115 half_to_float && 140 bool half_to_float = false) { 141 if (half_to_float && t == ScalarType::Half) {
|
| D | op_softmax.cpp | 26 bool half_to_float, in softmax_out() argument 32 check_softmax_args(in, dim, half_to_float, out), in softmax_out()
|
| D | op_log_softmax.cpp | 26 bool half_to_float, in log_softmax_out() argument 32 check_log_softmax_args(in, dim, half_to_float, out), in log_softmax_out()
|
| /external/pytorch/aten/src/ATen/native/sparse/ |
| D | ParamUtils.cpp | 19 const bool half_to_float, in softmax_sparse_input_preprocessing() argument 23 !half_to_float, in softmax_sparse_input_preprocessing()
|
| D | ParamUtils.h | 12 const bool half_to_float,
|
| D | SoftMax.cpp | 538 const bool half_to_float) { in softmax_sparse_cpu() argument 542 input_, dim_, half_to_float, "softmax"); in softmax_sparse_cpu() 555 const bool half_to_float) { in log_softmax_sparse_cpu() argument 559 input_, dim_, half_to_float, "log_softmax"); in log_softmax_sparse_cpu()
|
| /external/executorch/runtime/core/exec_aten/util/test/ |
| D | scalar_type_util_test.cpp | 179 template <typename T1, bool half_to_float> 194 typename executorch::runtime::promote_types<T1, T2, half_to_float>:: in testOne() 201 scalarType1, scalarType2, half_to_float); in testOne() 204 << " (half to float: " << half_to_float << ')'; in testOne()
|
| /external/pytorch/aten/src/ATen/native/cuda/ |
| D | SoftMax.cu | 820 Tensor host_softmax(const Tensor & input_, const int64_t dim_, const bool half_to_float, const Tens… in host_softmax() argument 821 if (half_to_float) { in host_softmax() 846 if (!half_to_float) { in host_softmax() 931 if (!half_to_float) { in host_softmax() 958 …ckward(const Tensor &grad_, const Tensor &output_, int64_t dim_, bool half_to_float, const Tensor … in host_softmax_backward() argument 982 if (!half_to_float) { in host_softmax_backward() 1037 if (!half_to_float) { in host_softmax_backward() 1070 const bool half_to_float, in TORCH_IMPL_FUNC() 1072 host_softmax<LogSoftMaxForwardEpilogue,true>(input, dim, half_to_float, output); in TORCH_IMPL_FUNC() 1081 bool half_to_float = grad.scalar_type() != input_dtype; in TORCH_IMPL_FUNC() local [all …]
|
| /external/pytorch/torch/distributed/tensor/parallel/ |
| D | loss.py | 127 def _log_softmax(x, dim, half_to_float, mesh, mesh_dim): argument 129 if half_to_float: 149 if not half_to_float: 161 half_to_float = cast(bool, args[2]) 168 res = _log_softmax(x._local_tensor, dim, half_to_float, spec.mesh, mesh_dim)
|
| /external/igt-gpu-tools/lib/ |
| D | igt_halffloat.c | 191 static void half_to_float(const uint16_t *h, float *f, unsigned int num) in half_to_float() function 213 return half_to_float; in resolve_half_to_float()
|
| /external/executorch/kernels/optimized/cpu/ |
| D | op_log_softmax.cpp | 131 bool half_to_float, in opt_log_softmax_out() argument 137 check_log_softmax_args(self, dim, half_to_float, out), in opt_log_softmax_out()
|
| /external/executorch/kernels/test/ |
| D | op_softmax_test.cpp | 30 bool half_to_float, in op_softmax_out() argument 33 context_, self, dim, half_to_float, out); in op_softmax_out()
|
| D | op_log_softmax_test.cpp | 31 bool half_to_float, in op_log_softmax_out() argument 34 context_, self, dim, half_to_float, out); in op_log_softmax_out()
|
| /external/executorch/backends/apple/mps/runtime/operations/ |
| D | ActivationOps.mm | 122 …ET_CHECK_MSG(!graphNode->half_to_float(), "softmax with half to float conversion is not supported … 140 …ET_CHECK_MSG(!graphNode->half_to_float(), "softmax with half to float conversion is not supported …
|
| /external/pytorch/aten/src/ATen/native/sparse/cuda/ |
| D | SoftMax.cu | 590 const bool half_to_float) { in softmax_sparse_cuda() argument 594 input_, dim_, half_to_float, "softmax"); in softmax_sparse_cuda() 607 const bool half_to_float) { in log_softmax_sparse_cuda() argument 611 input_, dim_, half_to_float, "log_softmax"); in log_softmax_sparse_cuda()
|
| /external/pytorch/aten/src/ATen/native/mps/operations/ |
| D | SoftMax.mm | 40 (const Tensor& input_, const int64_t dim, const bool half_to_float, const Tensor& output) { 41 TORCH_CHECK(!half_to_float, "softmax with half to float conversion is not supported on MPS");
|
| /external/executorch/backends/apple/mps/operators/ |
| D | activation_ops.py | 97 mps_node.mpsnode_union.half_to_float = cast(bool, node.args[2])
|
| /external/executorch/backends/apple/mps/serialization/ |
| D | mps_graph_schema.py | 145 half_to_float: bool = False variable in MPSSoftmax 151 half_to_float: bool = False variable in MPSLogSoftmax
|
| /external/pytorch/test/distributed/_tensor/experimental/ |
| D | test_register_sharding.py | 33 half_to_float: torch.dtype,
|