Home
last modified time | relevance | path

Searched refs:batch_idx (Results 1 – 23 of 23) sorted by relevance

/external/tensorflow/tensorflow/core/kernels/
Din_topk_op.h71 for (int batch_idx = 0; batch_idx < num_targets; batch_idx++) {
72 auto target = internal::SubtleMustCopy(targets(batch_idx));
75 !std::isfinite(predictions(batch_idx, target));
79 const T target_prediction = predictions(batch_idx, target);
82 T pred = predictions(batch_idx, class_idx);
92 output(batch_idx) = cannot_say ? false : (more_probable_classes < k_val);
Dgather_functor.h61 SliceIndex batch_idx = static_cast<SliceIndex>(start / indices_size); in HandleCopies() local
66 while ((batch_idx < batch_idx_end) || in HandleCopies()
67 (batch_idx == batch_idx_end && indices_idx < indices_idx_end)) { in HandleCopies()
69 SliceIndex b_next = batch_idx + 1; in HandleCopies()
70 if ((batch_idx == batch_idx_end && i_next < indices_idx_end) || in HandleCopies()
73 &params(batch_idx, indices(i_next), 0)); in HandleCopies()
74 port::prefetch<port::PREFETCH_HINT_T0>(&out(batch_idx, i_next, 0)); in HandleCopies()
75 b_next = batch_idx; in HandleCopies()
93 out_base + (batch_idx * indices_size + indices_idx) * slice_elems, in HandleCopies()
94 params_base + (batch_idx * static_cast<SliceIndex>(limit) + in HandleCopies()
[all …]
Dgather_functor_batched.h63 SliceIndex batch_idx = static_cast<SliceIndex>( in HandleCopiesBatched() local
68 SliceIndex batch_offset = batch_idx * indices_size; in HandleCopiesBatched()
72 SliceIndex b_next = batch_idx; in HandleCopiesBatched()
102 &out(batch_idx, outer_idx, indices_idx, 0), in HandleCopiesBatched()
103 &params(batch_idx, outer_idx, static_cast<SliceIndex>(index), 0), in HandleCopiesBatched()
107 out.template chip<0>(batch_idx) in HandleCopiesBatched()
110 params.template chip<0>(batch_idx) in HandleCopiesBatched()
117 batch_idx = b_next; in HandleCopiesBatched()
Dcount_ops.cc287 int batch_idx = 0; in Compute() local
290 while (idx >= splits_values(batch_idx)) { in Compute()
291 batch_idx++; in Compute()
296 per_batch_counts[batch_idx - 1][value] = 1; in Compute()
298 per_batch_counts[batch_idx - 1][value] += weight_values(idx); in Compute()
300 per_batch_counts[batch_idx - 1][value]++; in Compute()
Dparameterized_truncated_normal_op.cc371 int64 batch_idx = output_idx / samples_per_batch; in operator ()() local
376 T* const output_batch_offset = output_flat + batch_idx; in operator ()()
381 mean = means(means_batch_indices[batch_idx]); in operator ()()
382 stddev = stddevs(stddevs_batch_indices[batch_idx]); in operator ()()
383 minval = minvals(minvals_batch_indices[batch_idx]); in operator ()()
384 maxval = maxvals(maxvals_batch_indices[batch_idx]); in operator ()()
386 mean = means(batch_idx); in operator ()()
387 stddev = stddevs(batch_idx); in operator ()()
388 minval = minvals(batch_idx); in operator ()()
389 maxval = maxvals(batch_idx); in operator ()()
Drandom_binomial_op.cc202 int64 batch_idx = output_idx / samples_per_batch; in operator ()() local
203 U* const output_batch_offset = output_flat + batch_idx; in operator ()()
208 count = counts(counts_batch_indices[batch_idx]); in operator ()()
209 prob = probs(probs_batch_indices[batch_idx]); in operator ()()
211 count = counts(batch_idx); in operator ()()
212 prob = probs(batch_idx); in operator ()()
Dbincount_op.cc421 int batch_idx = 0; in Compute() local
431 while (idx >= splits(batch_idx)) { in Compute()
432 batch_idx++; in Compute()
439 out(batch_idx - 1, bin) = T(1); in Compute()
442 out(batch_idx - 1, bin) += value; in Compute()
Dresource_variable_ops.cc722 for (int64 batch_idx = 0, dest_idx = 0; batch_idx < batch_size; in AddBatchOffsets() local
723 ++batch_idx) { in AddBatchOffsets()
725 indices_flat(dest_idx++) += batch_offset * batch_idx; in AddBatchOffsets()
/external/tensorflow/tensorflow/lite/toco/graph_transformations/
Dunroll_batch_matmul.cc59 for (int batch_idx = 0; batch_idx < batch_size; ++batch_idx) { in SliceInput() local
61 absl::StrCat(base_name, "_b", batch_idx, "/slice_", input_name); in SliceInput()
66 {batch_idx, 0, 0}), in SliceInput()
212 for (int batch_idx = 0; batch_idx < bcast.output_batch_size(); ++batch_idx) { in Run() local
214 absl::StrCat(batch_op->outputs[0], "_b", batch_idx); in Run()
216 ? bcast.x_batch_indices()[batch_idx] in Run()
217 : batch_idx; in Run()
219 ? bcast.y_batch_indices()[batch_idx] in Run()
220 : batch_idx; in Run()
/external/tensorflow/tensorflow/compiler/mlir/tensorflow/transforms/
Dunroll_batch_matmul.cc100 for (int batch_idx = 0; batch_idx < batch_size; ++batch_idx) { in sliceInput() local
104 DenseElementsAttr::get<int64_t>(vector3_type, {batch_idx, 0, 0}); in sliceInput()
162 for (int batch_idx = 0; batch_idx < bcast.output_batch_size(); ++batch_idx) { in createMatMulOps() local
165 lhs_batch_idx = bcast.x_batch_indices()[batch_idx]; in createMatMulOps()
166 rhs_batch_idx = bcast.y_batch_indices()[batch_idx]; in createMatMulOps()
168 lhs_batch_idx = batch_idx; in createMatMulOps()
169 rhs_batch_idx = batch_idx; in createMatMulOps()
/external/tensorflow/tensorflow/core/kernels/sparse/
Dkernels.cc91 for (int batch_idx = 0; batch_idx < batch_size; ++batch_idx) { in operator ()() local
92 auto* row_ptr_batch = csr_row_ptr.data() + batch_idx * (num_rows + 1); in operator ()()
Dsparse_mat_mul_op.cc187 for (int64 batch_idx = batch_begin; batch_idx < batch_end; in Compute() local
188 ++batch_idx) { in Compute()
191 auto a_ref = GetSparseMatrixRef(*input_matrix_a, rank, batch_idx, in Compute()
193 auto b_ref = GetSparseMatrixRef(*input_matrix_b, rank, batch_idx, in Compute()
199 output_matrices[batch_idx] = a_ref * b_ref; in Compute()
203 batch_ptr_vec(batch_idx + 1) = in Compute()
204 output_matrices[batch_idx].nonZeros(); in Compute()
229 for (int64 batch_idx = batch_begin; batch_idx < batch_end; in Compute() local
230 ++batch_idx) { in Compute()
231 const SparseMatrix& output_matrix = output_matrices[batch_idx]; in Compute()
[all …]
Dcsr_sparse_matrix_to_sparse_tensor_op.cc100 for (int64 batch_idx = batch_begin; batch_idx < batch_end; ++batch_idx) { in Compute() local
101 const int64 csr_batch_offset = batch_ptrs(batch_idx); in Compute()
104 const int64 row_offset = batch_idx * (num_rows + 1) + row_idx; in Compute()
118 indices_flat(indices_offset) = batch_idx; in Compute()
Dcsr_sparse_matrix_to_dense_op.cc96 for (int64 batch_idx = batch_begin; batch_idx < batch_end; ++batch_idx) { in Compute() local
97 const int64 csr_batch_offset = batch_ptrs(batch_idx); in Compute()
98 const int64 dense_batch_offset = batch_idx * num_rows * num_cols; in Compute()
101 const int64 row_offset = batch_idx * (num_rows + 1) + row_idx; in Compute()
Dmat_mul_op.cc307 [&](int64 batch_idx, int64 row_begin, int64 row_end) { in SparseDenseMatMulWithoutTransposedLHS() argument
314 lhs, batch_idx, row_begin, num_shard_rows, &row_ptrs); in SparseDenseMatMulWithoutTransposedLHS()
317 ConstMatrixMap rhs_map(rhs.flat<T>().data() + batch_idx * in SparseDenseMatMulWithoutTransposedLHS()
325 batch_idx * num_lhs_rows * num_rhs_cols + in SparseDenseMatMulWithoutTransposedLHS()
378 [&](int64 batch_idx, int64 row_begin, int64 row_end) { in SparseDenseMatMulWithTransposedLHS() argument
385 lhs, batch_idx, row_begin, num_shard_rows, &row_ptrs); in SparseDenseMatMulWithTransposedLHS()
391 batch_idx * num_rhs_rows * num_rhs_cols + in SparseDenseMatMulWithTransposedLHS()
399 batch_idx * num_lhs_rows * num_rhs_cols, in SparseDenseMatMulWithTransposedLHS()
433 for (int64 batch_idx = batch_begin; batch_idx <= batch_end_inclusive; in HandleBatchAndRowRange() local
434 ++batch_idx) { in HandleBatchAndRowRange()
[all …]
/external/tensorflow/tensorflow/security/advisory/
Dtfsa-2020-016.md15 int batch_idx = 0;
17 while (idx >= splits_values(batch_idx)) {
18 batch_idx++;
22 per_batch_counts[batch_idx - 1][value] = 1;
28 However, if the first element of `splits_values` is not 0, `batch_idx` will
Dtfsa-2020-015.md14 while (idx >= splits_values(batch_idx)) {
15 batch_idx++;
23 `split_values` once `batch_idx` grows too large.
/external/tensorflow/tensorflow/core/kernels/image/
Dnon_max_suppression_op.cc304 void DoNMSPerClass(int batch_idx, int class_idx, const float* boxes_data, in DoNMSPerClass() argument
387 const int batch_idx, int total_size_per_batch, in SelectResultPerBatch() argument
406 final_valid_detections[batch_idx] = max_detections; in SelectResultPerBatch()
473 int batch_idx = idx / num_classes; in BatchedNonMaxSuppressionOp() local
475 DoNMSPerClass(batch_idx, class_idx, in BatchedNonMaxSuppressionOp()
476 boxes_data + boxes_per_batch * batch_idx, in BatchedNonMaxSuppressionOp()
477 scores_data + scores_per_batch * batch_idx, num_boxes, q, in BatchedNonMaxSuppressionOp()
479 result_candidate_vec[batch_idx]); in BatchedNonMaxSuppressionOp()
510 for (int batch_idx = begin; batch_idx < end; ++batch_idx) { in BatchedNonMaxSuppressionOp() local
512 nmsed_boxes[batch_idx], nmsed_scores[batch_idx], in BatchedNonMaxSuppressionOp()
[all …]
/external/tensorflow/tensorflow/compiler/tf2xla/kernels/
Dreverse_sequence_op.cc76 xla::XlaOp batch_idx = xla::Iota( in Compile() local
92 batch_idx = xla::Transpose(batch_idx, {1, 0, 2}); in Compile()
97 xla::ConcatInDim(builder, {batch_idx, reverse_idx}, in Compile()
/external/perfetto/src/trace_processor/containers/
Dbit_vector_iterators.cc118 uint32_t batch_idx = set_bit_count_until_i++ % kBatchSize; in ReadSetBitBatch() local
119 batch_[batch_idx] = i; in ReadSetBitBatch()
123 if (PERFETTO_UNLIKELY(batch_idx == kBatchSize - 1)) in ReadSetBitBatch()
/external/mesa3d/src/gallium/drivers/iris/
Diris_query.c68 int batch_idx; member
132 struct iris_batch *batch = &ice->batches[q->batch_idx]; in mark_available()
170 struct iris_batch *batch = &ice->batches[q->batch_idx]; in write_value()
449 q->batch_idx = IRIS_BATCH_COMPUTE; in iris_create_query()
451 q->batch_idx = IRIS_BATCH_RENDER; in iris_create_query()
555 struct iris_batch *batch = &ice->batches[q->batch_idx]; in iris_end_query()
627 struct iris_batch *batch = &ice->batches[q->batch_idx]; in iris_get_query_result()
660 struct iris_batch *batch = &ice->batches[q->batch_idx]; in iris_get_query_result_resource()
/external/tensorflow/tensorflow/stream_executor/
Ddnn.cc195 int depth_idx, batch_idx, spatial_idx; in GetDimIndices() local
199 batch_idx = data_dims - 2; in GetDimIndices()
205 batch_idx = data_dims - 1; in GetDimIndices()
211 batch_idx = 0; in GetDimIndices()
218 batch_idx = 0; in GetDimIndices()
226 return std::make_tuple(depth_idx, batch_idx, spatial_idx); in GetDimIndices()
/external/tensorflow/tensorflow/python/ops/
Dctc_ops.py495 batch_idx = array_ops.zeros_like(label_states[2:])
496 indices = array_ops.stack([batch_idx, label_states[2:], label_states[1:-1]],
500 batch_idx = array_ops.expand_dims(math_ops.range(batch_size), 1) * [1, 0, 0]
501 indices += array_ops.expand_dims(batch_idx, 1)