Home
last modified time | relevance | path

Searched refs:feature_index (Results 1 – 25 of 40) sorted by relevance

12

/external/tensorflow/tensorflow/compiler/xla/service/
Dbatchnorm_expander.cc113 HloInstruction* operand, int64 feature_index, in DynamicElementCountPerFeature() argument
120 if (i == feature_index) { in DynamicElementCountPerFeature()
208 int64 feature_index = batch_norm->feature_index(); in HandleBatchNormTraining() local
226 if (i != feature_index) { in HandleBatchNormTraining()
232 add(DynamicElementCountPerFeature(operand, feature_index, add)); in HandleBatchNormTraining()
235 HloInstruction::CreateBroadcast(operand_shape, scale, {feature_index})); in HandleBatchNormTraining()
238 HloInstruction::CreateBroadcast(operand_shape, offset, {feature_index})); in HandleBatchNormTraining()
260 HloInstruction::CreateBroadcast(operand_shape, mean, {feature_index})); in HandleBatchNormTraining()
274 add(HloInstruction::CreateBroadcast(operand_shape, var, {feature_index})); in HandleBatchNormTraining()
334 int64 feature_index = batch_norm->feature_index(); in HandleBatchNormInference() local
[all …]
Dshape_inference.h91 int64 feature_index);
98 const Shape& variance_shape, int64 feature_index);
106 int64 feature_index);
Dshape_inference.cc1168 const Shape& offset_shape, int64 feature_index) { in InferBatchNormTrainingShape() argument
1183 if (feature_index >= operand_shape.rank()) { in InferBatchNormTrainingShape()
1188 feature_index, operand_shape.rank()); in InferBatchNormTrainingShape()
1191 if (feature_index < 0) { in InferBatchNormTrainingShape()
1195 feature_index); in InferBatchNormTrainingShape()
1246 const int64 feature_count = operand_shape.dimensions(feature_index); in InferBatchNormTrainingShape()
1274 const Shape& variance_shape, int64 feature_index) { in InferBatchNormInferenceShape() argument
1289 if (feature_index >= operand_shape.rank()) { in InferBatchNormInferenceShape()
1294 feature_index, operand_shape.rank()); in InferBatchNormInferenceShape()
1297 if (feature_index < 0) { in InferBatchNormInferenceShape()
[all …]
Dhlo_instructions.cc75 HloInstruction* scale, float epsilon, int64 feature_index) in HloBatchNormInstruction() argument
78 feature_index_(feature_index) { in HloBatchNormInstruction()
88 return feature_index() == casted_other.feature_index() && in IdenticalSlowPath()
102 StrCat("feature_index=", feature_index())}; in ExtraAttributesToStringImpl()
107 HloInstruction* offset, float epsilon, int64 feature_index) in HloBatchNormTrainingInstruction() argument
109 scale, epsilon, feature_index) { in HloBatchNormTrainingInstruction()
120 feature_index()); in CloneWithNewOperandsImpl()
126 float epsilon, int64 feature_index) in HloBatchNormInferenceInstruction() argument
128 scale, epsilon, feature_index) { in HloBatchNormInferenceInstruction()
141 new_operands[4], epsilon(), feature_index()); in CloneWithNewOperandsImpl()
[all …]
Dhlo_instruction.h639 HloInstruction* offset, float epsilon, int64 feature_index);
645 float epsilon, int64 feature_index);
651 HloInstruction* grad_output, float epsilon, int64 feature_index);
1378 int64 feature_index() const;
/external/tensorflow/tensorflow/compiler/xla/tests/
Dbatch_normalization_test.cc373 int64 feature_index; member
381 os << "feature_index=" << p.feature_index << ", "; in operator <<()
408 auto add_testcase = [&](std::vector<int64> bounds, int64 feature_index, in BuildBatchNormTestParams()
410 BatchNormTestParam p{bounds, feature_index, random_value_mean, in BuildBatchNormTestParams()
457 const int64 feature_index = GetParam().feature_index; in XLA_TEST_P() local
459 Product(bounds) / bounds[feature_index]; in XLA_TEST_P()
460 const int64 feature_bound = bounds[feature_index]; in XLA_TEST_P()
468 if (i != feature_index) { in XLA_TEST_P()
503 *ReferenceUtil::Broadcast1DTo4D(mean, bounds, feature_index); in XLA_TEST_P()
504 auto var4D = *ReferenceUtil::Broadcast1DTo4D(var, bounds, feature_index); in XLA_TEST_P()
[all …]
/external/tensorflow/tensorflow/compiler/tf2xla/kernels/
Dbatch_norm_op.cc53 int feature_index = in Compile() local
63 input, ctx->Input(1), ctx->Input(2), epsilon_, feature_index); in Compile()
92 epsilon_, feature_index); in Compile()
142 const int feature_index = in Compile() local
167 epsilon_, feature_index); in Compile()
175 std::iota(reduction_dims.begin(), reduction_dims.begin() + feature_index, in Compile()
177 std::iota(reduction_dims.begin() + feature_index, reduction_dims.end(), in Compile()
178 feature_index + 1); in Compile()
198 xla::Mul(grad_backprop, xla::Sub(activations, mean, {feature_index})); in Compile()
206 xla::Mul(grad_backprop, xla::Mul(scratch1, scale), {feature_index}); in Compile()
/external/harfbuzz_ng/src/
Dhb-ot-layout.cc461 unsigned int *feature_index) in hb_ot_layout_table_find_feature() argument
470 if (feature_index) *feature_index = i; in hb_ot_layout_table_find_feature()
475 if (feature_index) *feature_index = HB_OT_LAYOUT_NO_FEATURE_INDEX; in hb_ot_layout_table_find_feature()
544 unsigned int *feature_index) in hb_ot_layout_language_get_required_feature_index() argument
550 feature_index, in hb_ot_layout_language_get_required_feature_index()
564 unsigned int *feature_index, in hb_ot_layout_language_get_required_feature() argument
571 if (feature_index) *feature_index = index; in hb_ot_layout_language_get_required_feature()
623 unsigned int *feature_index) in hb_ot_layout_language_find_feature() argument
634 if (feature_index) *feature_index = f_index; in hb_ot_layout_language_find_feature()
639 if (feature_index) *feature_index = HB_OT_LAYOUT_NO_FEATURE_INDEX; in hb_ot_layout_language_find_feature()
[all …]
Dhb-ot-layout.h191 unsigned int *feature_index);
198 unsigned int *feature_index,
225 unsigned int *feature_index);
230 unsigned int feature_index,
302 unsigned int feature_index,
378 unsigned int feature_index,
389 unsigned int feature_index,
Dhb-ot-map.cc96 unsigned int feature_index, in add_lookups() argument
114 feature_index, in add_lookups()
223 unsigned int feature_index[2]; in compile() local
234 &feature_index[table_index]); in compile()
243 &feature_index[table_index]); in compile()
253 map->index[0] = feature_index[0]; in compile()
254 map->index[1] = feature_index[1]; in compile()
Dhb-ot-shape-complex-arabic-fallback.hh50 unsigned int feature_index) in arabic_fallback_synthesize_lookup_single() argument
59 hb_codepoint_t s = shaping_table[u - SHAPING_TABLE_FIRST][feature_index]; in arabic_fallback_synthesize_lookup_single()
176 unsigned int feature_index) in arabic_fallback_synthesize_lookup() argument
178 if (feature_index < 4) in arabic_fallback_synthesize_lookup()
179 return arabic_fallback_synthesize_lookup_single (plan, font, feature_index); in arabic_fallback_synthesize_lookup()
/external/tensorflow/tensorflow/compiler/xla/service/gpu/
Dcudnn_batchnorm_rewriter.cc73 HloInstruction* feature_index = in HandleBatchNormInference() local
75 LiteralUtil::CreateR0(batch_norm->feature_index()))); in HandleBatchNormInference()
80 operands.push_back(feature_index); in HandleBatchNormInference()
109 HloInstruction* feature_index = in HandleBatchNormTraining() local
111 LiteralUtil::CreateR0(batch_norm->feature_index()))); in HandleBatchNormTraining()
116 operands.push_back(feature_index); in HandleBatchNormTraining()
178 HloInstruction* feature_index = in HandleBatchNormGrad() local
180 LiteralUtil::CreateR0(batch_norm->feature_index()))); in HandleBatchNormGrad()
198 operands.push_back(feature_index); in HandleBatchNormGrad()
Dcudnn_batchnorm_thunk.cc35 MakeDescriptors(const Shape& shape, int64 feature_index) { in MakeDescriptors() argument
51 for (physical_dim = 0; physical_dim != logical_to_physical[feature_index]; in MakeDescriptors()
64 .set_feature_map_count(shape.dimensions(feature_index)) in MakeDescriptors()
82 const BufferAllocation::Slice& variance, float epsilon, int64 feature_index, in CudnnBatchNormForwardInferenceThunk() argument
91 feature_index_(feature_index), in CudnnBatchNormForwardInferenceThunk()
138 float epsilon, int64 feature_index, in CudnnBatchNormForwardTrainingThunk() argument
148 feature_index_(feature_index), in CudnnBatchNormForwardTrainingThunk()
219 int64 feature_index, const BufferAllocation::Slice& output_grad_data, in CudnnBatchNormBackwardThunk() argument
230 feature_index_(feature_index), in CudnnBatchNormBackwardThunk()
Dgpu_layout_assignment_test.cc142 auto* feature_index = in TEST_F() local
148 {operand, scale, offset, mean, variance, epsilon, feature_index}, in TEST_F()
212 auto* feature_index = in TEST_F() local
217 batchnorm_shape, {operand, scale, offset, epsilon, feature_index}, in TEST_F()
290 auto* feature_index = in TEST_F() local
298 feature_index}, in TEST_F()
Dcudnn_batchnorm_thunk.h54 float epsilon, int64 feature_index,
83 const BufferAllocation::Slice& offset, float epsilon, int64 feature_index,
117 float epsilon, int64 feature_index,
/external/harfbuzz_ng/test/api/
Dtest-ot-name.c36 unsigned int feature_index; in test_ot_layout_feature_get_name_ids_and_characters() local
50 &feature_index)) in test_ot_layout_feature_get_name_ids_and_characters()
53 if (!hb_ot_layout_feature_get_name_ids (face, HB_OT_TAG_GSUB, feature_index, in test_ot_layout_feature_get_name_ids_and_characters()
64 all_chars = hb_ot_layout_feature_get_characters (face, HB_OT_TAG_GSUB, feature_index, in test_ot_layout_feature_get_name_ids_and_characters()
/external/tensorflow/tensorflow/core/tpu/
Dtpu_embedding_output_layout_utils.cc46 for (int feature_index = 0; feature_index < table.num_features(); in AddDefaultEmbeddingOutputLayoutIfNeeded() local
47 ++feature_index) { in AddDefaultEmbeddingOutputLayoutIfNeeded()
53 output_location->set_dim0_offset(feature_index); in AddDefaultEmbeddingOutputLayoutIfNeeded()
/external/tensorflow/tensorflow/contrib/libsvm/kernels/
Ddecode_libsvm_op.cc70 int64 feature_index; in Compute() local
72 ctx, strings::safe_strto64(piece.substr(0, p), &feature_index), in Compute()
74 OP_REQUIRES(ctx, (feature_index >= 0), in Compute()
76 "Feature index should be >= 0, got ", feature_index)); in Compute()
87 out_indices.emplace_back(std::pair<int64, int64>(i, feature_index)); in Compute()
/external/tensorflow/tensorflow/core/kernels/
Dsdca_internal.cc170 const int64 feature_index = (*sparse_features.indices)(k); in ComputeWxAndWeightedExampleNorm() local
175 const float sparse_weight = sparse_weights.nominals(l, feature_index); in ComputeWxAndWeightedExampleNorm()
178 sparse_weights.deltas(l, feature_index) * num_loss_partitions; in ComputeWxAndWeightedExampleNorm()
431 const int64 feature_index = (*sparse_features->indices)(k); in CreateSparseFeatureRepresentation() local
432 if (!weights.SparseIndexValid(i, feature_index)) { in CreateSparseFeatureRepresentation()
518 const int64 feature_index = (*sparse_features.indices)(k); in ComputeSquaredNormPerExample() local
519 if (previous_indices.insert(feature_index).second == false) { in ComputeSquaredNormPerExample()
/external/tensorflow/tensorflow/contrib/boosted_trees/kernels/
Dquantile_ops.cc87 int32 GetFeatureDimension(const int32 feature_index, const int64 instance, in GetFeatureDimension() argument
91 return (*indices_list)[feature_index].matrix<int64>()(instance, 1); in GetFeatureDimension()
110 for (int32 feature_index = 0; feature_index < values_list.size(); in QuantizeFeatures() local
111 ++feature_index) { in QuantizeFeatures()
112 const Tensor& values_tensor = values_list[feature_index]; in QuantizeFeatures()
118 context, output_list.allocate(feature_index, in QuantizeFeatures()
124 GetBuckets(feature_index, buckets_list); in QuantizeFeatures()
129 << "Got empty buckets for feature " << feature_index; in QuantizeFeatures()
141 GetFeatureDimension(feature_index, instance, indices_list); in QuantizeFeatures()
/external/tensorflow/tensorflow/contrib/tensor_forest/kernels/
Dtree_utils.h229 const int64 feature_index = indices(index, 0); in GetNumSparseFeatures() local
230 if (feature_index == input_index) { in GetNumSparseFeatures()
233 } else if (feature_index < input_index) { in GetNumSparseFeatures()
/external/tensorflow/tensorflow/compiler/xla/client/
Dxla_builder.h571 int64 feature_index);
576 int64 feature_index);
581 int64 feature_index);
980 int64 feature_index);
984 int64 feature_index);
988 int64 feature_index);
1884 int64 feature_index);
1899 int64 feature_index);
1913 int64 feature_index);
Dxla_builder.cc2047 int64 feature_index) { in BatchNormTraining() argument
2057 operand_shape, scale_shape, offset_shape, feature_index)); in BatchNormTraining()
2061 instr.set_feature_index(feature_index); in BatchNormTraining()
2071 int64 feature_index) { in BatchNormInference() argument
2083 variance_shape, feature_index)); in BatchNormInference()
2087 instr.set_feature_index(feature_index); in BatchNormInference()
2097 int64 feature_index) { in BatchNormGrad() argument
2109 batch_var_shape, grad_output_shape, feature_index)); in BatchNormGrad()
2113 instr.set_feature_index(feature_index); in BatchNormGrad()
3488 int64 feature_index) { in BatchNormTraining() argument
[all …]
/external/tensorflow/tensorflow/core/util/
Dtensor_format.h527 int feature_index = GetTensorFeatureDimIndex(dims, format); in ShapeFromFormat() local
534 dim_sizes[feature_index] = C; in ShapeFromFormat()
/external/libaom/libaom/av1/encoder/
Dencodeframe.c2643 int feature_index = 0; in ml_prune_2pass_split_partition() local
2644 features[feature_index++] = (float)pc_tree_stats->split; in ml_prune_2pass_split_partition()
2645 features[feature_index++] = (float)pc_tree_stats->skip; in ml_prune_2pass_split_partition()
2648 features[feature_index++] = (float)rd_valid; in ml_prune_2pass_split_partition()
2650 features[feature_index++] = (float)pc_tree_stats->sub_block_split[i]; in ml_prune_2pass_split_partition()
2651 features[feature_index++] = (float)pc_tree_stats->sub_block_skip[i]; in ml_prune_2pass_split_partition()
2655 features[feature_index++] = (float)sub_rd_valid; in ml_prune_2pass_split_partition()
2660 features[feature_index++] = rd_ratio; in ml_prune_2pass_split_partition()
2662 assert(feature_index == FEATURE_SIZE); in ml_prune_2pass_split_partition()
2795 int feature_index = 0; in ml_prune_ab_partition() local
[all …]

12