Home
last modified time | relevance | path

Searched full:argmax (Results 1 – 25 of 416) sorted by relevance

12345678910>>...17

/external/tensorflow/tensorflow/python/kernel_tests/math_ops/
Dargmax_op_test.py63 # Check that argmin and argmax match numpy along the primary axis
64 self._testBothArg(math_ops.argmax, x, 0, x.argmax())
70 # Check that argmin and argmax match numpy along the primary axis for
72 self._testBothArg(math_ops.argmax, x, 0, x.argmax())
75 # Check that argmin and argmax match numpy along axis=1 for
78 self._testBothArg(math_ops.argmax, x, 1, x.argmax(axis=1))
89 # Check that argmin and argmax match numpy along all axes
91 self._testBothArg(math_ops.argmax, x, axis, x.argmax(axis))
101 expected_values = x.argmax()
103 ans = math_ops.argmax(x, axis=0, output_type=dtypes.int32)
[all …]
/external/tensorflow/tensorflow/core/kernels/
Dargmax_op.cc102 errors::InvalidArgument("Argmax and Argmin only support up " in Compute()
116 : public ArgOp<Device, T, Tout, functor::ArgMax<Device, T, Tout> > {
119 : ArgOp<Device, T, Tout, functor::ArgMax<Device, T, Tout> >(context) {} in ArgMaxOp()
131 REGISTER_KERNEL_BUILDER(Name("ArgMax") \
143 REGISTER_KERNEL_BUILDER(Name("ArgMax") \
155 REGISTER_KERNEL_BUILDER(Name("ArgMax") \
161 REGISTER_KERNEL_BUILDER(Name("ArgMax") \
179 void ArgMax<GPUDevice, T, Tout>::Reduce##Dims( \
204 extern template struct ArgMax<GPUDevice, T, int64_t>; \
206 extern template struct ArgMax<GPUDevice, T, int32>; \
[all …]
Ddilation_ops.h38 // To avoid storing the argmax values during forward computation, we recompute
39 // the argmax during backward computation, which is the reason why we provide
52 // To avoid storing the argmax values during forward computation, we recompute
53 // the argmax during backward computation, which is the reason why we provide
/external/tensorflow/tensorflow/core/ops/compat/ops_history_v2/
DArgMax.pbtxt2 name: "ArgMax"
52 name: "ArgMax"
115 name: "ArgMax"
180 name: "ArgMax"
246 name: "ArgMax"
312 name: "ArgMax"
379 name: "ArgMax"
448 name: "ArgMax"
DMaxPoolWithArgmax.pbtxt12 name: "argmax"
75 name: "argmax"
142 name: "argmax"
211 name: "argmax"
281 name: "argmax"
351 name: "argmax"
DMaxPoolGradWithArgmax.pbtxt12 name: "argmax"
76 name: "argmax"
144 name: "argmax"
214 name: "argmax"
285 name: "argmax"
356 name: "argmax"
DMaxPoolGradGradWithArgmax.pbtxt12 name: "argmax"
80 name: "argmax"
150 name: "argmax"
221 name: "argmax"
292 name: "argmax"
/external/pytorch/aten/src/ATen/native/quantized/cpu/qnnpack/src/
Dsoftargmax.c39 … "failed to create Soft ArgMax operator with %zu channels: number of channels must be non-zero", in pytorch_qnnp_create_softargmax_nc_q8()
46 … "failed to create Soft ArgMax operator with %.7g input scale: scale must be finite and positive", in pytorch_qnnp_create_softargmax_nc_q8()
53 … "failed to create Soft ArgMax operator with %.7g output scale: scale must be finite and positive", in pytorch_qnnp_create_softargmax_nc_q8()
62 …"failed to create Soft ArgMax operator with %.7g output scale: only output scale of 1/256 is suppo… in pytorch_qnnp_create_softargmax_nc_q8()
69 "failed to create Soft ArgMax operator with %" PRIu8 in pytorch_qnnp_create_softargmax_nc_q8()
88 "failed to allocate 256 bytes for Soft ArgMax lookup table"); in pytorch_qnnp_create_softargmax_nc_q8()
/external/executorch/examples/mediatek/executor_runner/llama_runner/
DUtils.h70 static uint64_t argmax(const void* logits_buffer, const size_t vocab_size) { in argmax() function
83 static uint64_t argmax( in argmax() function
89 return argmax<int16_t>(logits_buffer, vocab_size); in argmax()
91 return argmax<__fp16>(logits_buffer, vocab_size); in argmax()
93 return argmax<float>(logits_buffer, vocab_size); in argmax()
97 "Unsupported logits type for argmax: %s", in argmax()
/external/tensorflow/tensorflow/compiler/tests/
Dargminmax_test.py15 """Functional tests for ArgMin and ArgMax Ops."""
33 op: argmin or argmax operator to test.
48 # Complex numbers do not support argmin/argmax.
56 math_ops.argmax,
62 math_ops.argmax,
68 math_ops.argmax,
/external/tensorflow/tensorflow/core/ops/compat/ops_history_v1/
DArgMax.pbtxt2 name: "ArgMax"
52 name: "ArgMax"
115 name: "ArgMax"
180 name: "ArgMax"
246 name: "ArgMax"
312 name: "ArgMax"
DMaxPoolWithArgmax.pbtxt12 name: "argmax"
75 name: "argmax"
142 name: "argmax"
211 name: "argmax"
281 name: "argmax"
351 name: "argmax"
DMaxPoolGradWithArgmax.pbtxt12 name: "argmax"
76 name: "argmax"
144 name: "argmax"
214 name: "argmax"
285 name: "argmax"
356 name: "argmax"
DMaxPoolGradGradWithArgmax.pbtxt12 name: "argmax"
80 name: "argmax"
150 name: "argmax"
221 name: "argmax"
292 name: "argmax"
/external/pytorch/aten/src/ATen/native/cuda/
DAdaptiveMaxPooling3d.cu43 * 4D input, 4D output, 4D argmax x and y
99 int64_t argmax = istartT*isizeH*isizeW + istartH*isizeW + istartW; in adaptivemaxpool() local
109 argmax = (it+istartT)*isizeH*isizeW + (ih+istartH)*isizeW + iw+istartW; in adaptivemaxpool()
115 // Update output and argmax in adaptivemaxpool()
117 *ptr_ind = argmax; in adaptivemaxpool()
157 * each input pixel can only be argmax of one output pixel.
192 // Compute the gradients for the argmax input pixel in adaptivemaxgradinput()
196 int argmax = (*ptr_ind); in adaptivemaxgradinput() local
197 gradInput_d[argmax] += grad_delta; in adaptivemaxgradinput()
268 // Compute the gradients for the argmax input pixel in atomicadaptivemaxgradinput()
[all …]
DAdaptiveMaxPooling2d.cu43 * 4D input, 4D output, 4D argmax x and y
87 int argmax = istartH * isizeW + istartW; in adaptivemaxpool() local
95 argmax = (ih+istartH)*isizeW + iw+istartW; in adaptivemaxpool()
100 // Update output and argmax in adaptivemaxpool()
102 *ptr_ind = argmax; in adaptivemaxpool()
146 int argmax = (*ptr_ind); in adaptivemaxgradinput() local
148 gradInput[argmax] += z; in adaptivemaxgradinput()
193 int argmax = (*ptr_ind); in atomicadaptivemaxgradinput() local
196 gpuAtomicAddNoReturn(&(gradInput[argmax]), z); in atomicadaptivemaxgradinput()
/external/pytorch/test/torch_np/
Dtest_ndarray_methods.py221 @parametrize("method", [np.argmax, np.argmin])
304 @parametrize("method", ["argmax", "argmin"])
330 @parametrize("method", ["argmax", "argmin"])
339 "arr_method, np_method", [("argmax", np.argmax), ("argmin", np.argmin)]
342 # make sure both ndarray.argmax/argmin and
343 # numpy.argmax/argmin support out/axis args
355 "arr_method, np_method", [("argmax", np.argmax), ("argmin", np.argmin)]
463 assert_equal(np.argmax(arr), pos) # , err_msg="%r" % arr)
464 assert_equal(arr[np.argmax(arr)], val) # , err_msg="%r" % arr)
469 assert_equal(np.argmax(rarr), rpos, err_msg=f"{rarr!r}")
[all …]
/external/tensorflow/tensorflow/tools/compatibility/
DREADME.md43 Added keyword 'input' to reordered function 'tf.argmax'
46 Old: tf.argmax([[1, 3, 2]], dimension=0)
48 New: tf.argmax(input=[[1, 3, 2]], axis=0)
55 particular, functions that have had reordered arguments like `tf.argmax`
/external/pytorch/test/distributed/_tensor/experimental/
Dtest_register_sharding.py83 @register_sharding(aten.argmax.default)
104 aten.argmax.default
113 local_y = torch.argmax(x, dim=1, keepdim=True)
114 dist_y = torch.argmax(dist_x, dim=1, keepdim=True)
/external/tensorflow/tensorflow/core/api_def/base_api/
Dapi_def_MaxPoolWithArgmax.pbtxt16 name: "argmax"
43 Whether to include batch dimension in flattened index of `argmax`.
48 The indices in `argmax` are flattened, so that a maximum value at position
/external/tensorflow/tensorflow/core/tpu/kernels/xla/
Dindex_ops.cc24 // This registration is needed here because the ArgMax Op is defined in
26 // specific TPU whitelist, but ArgMax does because it has a separate CustomCall
28 REGISTER_XLA_OP(Name("ArgMax")
/external/tensorflow/tensorflow/security/advisory/
Dtfsa-2021-062.md15 argmax = tf.constant([], shape=[0], dtype=tf.int64)
20 input=input, grad=grad, argmax=argmax, ksize=ksize, strides=strides,
Dtfsa-2021-057.md15 argmax = tf.constant([1], shape=[1], dtype=tf.int64)
20 input=input, grad=grad, argmax=argmax, ksize=ksize, strides=strides,
/external/tensorflow/tensorflow/compiler/xla/client/lib/
Darithmetic.cc160 XlaOp argmax = GetTupleElement(max_argmax, 1); in ArgMinMax() local
162 argmax = ConvertElementType(argmax, output_type); in ArgMinMax()
164 return argmax; in ArgMinMax()
168 XlaOp ArgMax(XlaOp input, PrimitiveType output_type, int axis) { in ArgMax() function
/external/tensorflow/tensorflow/core/api_def/java_api/
Dapi_def_ArgMax.pbtxt2 graph_op_name: "ArgMax"
4 name: "math.ArgMax"

12345678910>>...17