Home
last modified time | relevance | path

Searched defs:half_to_float (Results 1 – 20 of 20) sorted by relevance

/external/pytorch/aten/src/ATen/native/mkldnn/
DSoftMax.cpp19 const bool half_to_float) { in mkldnn_softmax()
36 const bool half_to_float) { in mkldnn_softmax()
/external/pytorch/aten/src/ATen/native/vulkan/ops/
DSoftmax.cpp91 const bool half_to_float) { in softmax_internal()
188 const bool half_to_float) { in softmax()
195 const bool half_to_float) { in log_softmax()
/external/executorch/kernels/portable/cpu/util/
Dactivation_ops_util.cpp60 bool half_to_float, in check_log_softmax_args()
74 bool half_to_float, in check_softmax_args()
/external/pytorch/aten/src/ATen/native/sparse/
DParamUtils.cpp19 const bool half_to_float, in softmax_sparse_input_preprocessing()
DSoftMax.cpp538 const bool half_to_float) { in softmax_sparse_cpu()
555 const bool half_to_float) { in log_softmax_sparse_cpu()
/external/executorch/kernels/portable/cpu/
Dop_softmax.cpp26 bool half_to_float, in softmax_out()
Dop_log_softmax.cpp26 bool half_to_float, in log_softmax_out()
/external/executorch/kernels/optimized/cpu/
Dop_log_softmax.cpp131 bool half_to_float, in opt_log_softmax_out()
/external/igt-gpu-tools/lib/
Digt_halffloat.c191 static void half_to_float(const uint16_t *h, float *f, unsigned int num) in half_to_float() function
/external/pytorch/torch/distributed/tensor/parallel/
Dloss.py127 def _log_softmax(x, dim, half_to_float, mesh, mesh_dim): argument
/external/pytorch/aten/src/ATen/native/
DSoftMax.cpp93 bool half_to_float = grad.scalar_type() != input_dtype; in TORCH_META_FUNC() local
125 bool half_to_float = grad.scalar_type() != input_dtype; in TORCH_META_FUNC() local
/external/executorch/backends/apple/mps/serialization/
Dmps_graph_schema.py145 half_to_float: bool = False variable in MPSSoftmax
151 half_to_float: bool = False variable in MPSLogSoftmax
/external/pytorch/aten/src/ATen/native/cuda/
DSoftMax.cu820 Tensor host_softmax(const Tensor & input_, const int64_t dim_, const bool half_to_float, const Tens… in host_softmax()
958 …backward(const Tensor &grad_, const Tensor &output_, int64_t dim_, bool half_to_float, const Tenso… in host_softmax_backward()
1081 bool half_to_float = grad.scalar_type() != input_dtype; in TORCH_IMPL_FUNC() local
1105 bool half_to_float = grad.scalar_type() != input_dtype; in TORCH_IMPL_FUNC() local
/external/executorch/kernels/test/
Dop_softmax_test.cpp30 bool half_to_float, in op_softmax_out()
Dop_log_softmax_test.cpp31 bool half_to_float, in op_log_softmax_out()
/external/pytorch/aten/src/ATen/native/sparse/cuda/
DSoftMax.cu590 const bool half_to_float) { in softmax_sparse_cuda()
607 const bool half_to_float) { in log_softmax_sparse_cuda()
/external/pytorch/aten/src/ATen/native/nested/
DNestedTensorMath.cpp507 const bool half_to_float) { in softmax_nested()
/external/pytorch/torch/csrc/jit/runtime/static/
Dgenerated_ops.cpp1214 const auto half_to_float = p_node->Input(2).toBool(); in __anon7ebb59397802() local
1752 const auto half_to_float = p_node->Input(2).toBool(); in __anon7ebb5939ad02() local
Dops.cpp2067 auto half_to_float = in_t.scalar_type() == at::ScalarType::Half && in __anon5bf8650b6302() local
/external/pytorch/torch/onnx/
Dsymbolic_opset9.py2240 def _log_softmax(g: jit_utils.GraphContext, input, dim, half_to_float): argument