Home
last modified time | relevance | path

Searched refs:batch_norm (Results 1 – 25 of 46) sorted by relevance

12

/external/tensorflow/tensorflow/compiler/xla/service/gpu/
Dcudnn_batchnorm_rewriter.cc39 Status HandleBatchNormInference(HloInstruction* batch_norm) override;
40 Status HandleBatchNormTraining(HloInstruction* batch_norm) override;
41 Status HandleBatchNormGrad(HloInstruction* batch_norm) override;
50 bool EpsilonInRange(HloInstruction* batch_norm) { in EpsilonInRange() argument
51 return batch_norm->epsilon() >= 1e-5; in EpsilonInRange()
54 Status Visitor::HandleBatchNormInference(HloInstruction* batch_norm) { in HandleBatchNormInference() argument
55 if (batch_norm->operand(0)->shape().element_type() != F32) { in HandleBatchNormInference()
57 << batch_norm->ToString(); in HandleBatchNormInference()
62 if (ShapeUtil::ElementsIn(batch_norm->operand(0)->shape()) == 0) { in HandleBatchNormInference()
66 if (!EpsilonInRange(batch_norm)) { in HandleBatchNormInference()
[all …]
Dir_emitter.h100 Status HandleBatchNormInference(HloInstruction* batch_norm) override;
101 Status HandleBatchNormTraining(HloInstruction* batch_norm) override;
102 Status HandleBatchNormGrad(HloInstruction* batch_norm) override;
/external/tensorflow/tensorflow/compiler/xla/service/
Dbatchnorm_expander.cc56 Status HandleBatchNormTraining(HloInstruction* batch_norm) override;
58 Status HandleBatchNormInference(HloInstruction* batch_norm) override;
60 Status HandleBatchNormGrad(HloInstruction* batch_norm) override;
186 HloInstruction* batch_norm) { in HandleBatchNormTraining() argument
194 added_inst->set_metadata(batch_norm->metadata()); in HandleBatchNormTraining()
205 HloInstruction* operand = batch_norm->mutable_operand(0); in HandleBatchNormTraining()
208 int64 feature_index = batch_norm->feature_index(); in HandleBatchNormTraining()
210 HloInstruction* scale = batch_norm->mutable_operand(1); in HandleBatchNormTraining()
211 HloInstruction* offset = batch_norm->mutable_operand(2); in HandleBatchNormTraining()
218 auto epsilon_literal = LiteralUtil::CreateR0(batch_norm->epsilon()); in HandleBatchNormTraining()
[all …]
Dhlo_element_type_converter_test.cc111 ::testing::Matcher<const ::xla::HloInstruction*> batch_norm = in TEST_F()
114 op::Tuple(op::Convert(op::GetTupleElement(batch_norm, 0)), in TEST_F()
115 op::Convert(op::GetTupleElement(batch_norm, 1)), in TEST_F()
116 op::Convert(op::GetTupleElement(batch_norm, 2)))); in TEST_F()
/external/tensorflow/tensorflow/contrib/distributions/python/kernel_tests/bijectors/
Dbatch_normalization_test.py78 batch_norm = BatchNormalization(
81 norm_x = batch_norm.inverse(x)
82 with ops.control_dependencies(batch_norm.batchnorm.updates):
83 moving_mean = array_ops.identity(batch_norm.batchnorm.moving_mean)
84 moving_var = array_ops.identity(batch_norm.batchnorm.moving_variance)
85 denorm_x = batch_norm.forward(array_ops.identity(norm_x))
86 fldj = batch_norm.forward_log_det_jacobian(
89 ildj = batch_norm.inverse_log_det_jacobian(
109 self.assertEqual("batch_normalization", batch_norm.name)
133 expected_batch_var + batch_norm.batchnorm.epsilon))
[all …]
/external/tensorflow/tensorflow/contrib/quantize/python/
Dfold_batch_norms_test.py39 batch_norm = layers.batch_norm variable
112 node = batch_norm(
129 normalizer_fn=batch_norm,
210 layer1 = batch_norm(
220 normalizer_fn=batch_norm,
225 layer2 = batch_norm(
301 normalizer_fn=batch_norm,
378 node = batch_norm(
394 normalizer_fn=batch_norm,
474 node = batch_norm(
[all …]
Dcommon_test.py33 batch_norm = layers.batch_norm variable
110 normalizer_fn=batch_norm,
Dquantize_parameterized_test.py34 batch_norm = layers.batch_norm variable
522 normalizer_fn=batch_norm,
576 normalizer_fn=batch_norm,
635 normalizer_fn=batch_norm,
694 normalizer_fn=batch_norm,
759 normalizer_fn=batch_norm,
770 normalizer_fn=batch_norm,
Dgraph_matcher_test.py44 [layers.batch_norm], fused=True, is_training=True, trainable=True):
52 normalizer_fn=layers.batch_norm,
/external/tensorflow/tensorflow/core/grappler/optimizers/
Dremapper.cc343 const NodeDef* batch_norm, in FindConv2DWithBatchNorm() argument
348 if (!batch_norm || !IsFusedBatchNorm(*batch_norm)) return false; in FindConv2DWithBatchNorm()
351 if (batch_norm->op() == "FusedBatchNormV2" && in FindConv2DWithBatchNorm()
352 !HasDataType(batch_norm, DT_FLOAT, "U")) in FindConv2DWithBatchNorm()
356 const auto& attr = batch_norm->attr(); in FindConv2DWithBatchNorm()
360 if (HasControlFaninOrFanout(ctx.graph_view, batch_norm) || in FindConv2DWithBatchNorm()
361 HasFanouts(ctx.graph_view, batch_norm, 1) || // batch_mean in FindConv2DWithBatchNorm()
362 HasFanouts(ctx.graph_view, batch_norm, 2) || // batch_variance in FindConv2DWithBatchNorm()
363 HasFanouts(ctx.graph_view, batch_norm, 3) || // reserve_space_1 in FindConv2DWithBatchNorm()
364 HasFanouts(ctx.graph_view, batch_norm, 4)) // reserve_space_2 in FindConv2DWithBatchNorm()
[all …]
Dremapper_test.cc254 auto batch_norm = ops::FusedBatchNorm(s.WithOpName("batch_norm"), conv, scale, in TEST_F() local
256 auto fetch = ops::Identity(s.WithOpName("fetch"), batch_norm.y); in TEST_F()
331 auto batch_norm = ops::FusedBatchNorm(s.WithOpName("batch_norm"), conv, scale, in TEST_F() local
333 auto relu = ops::Relu(s.WithOpName("relu"), batch_norm.y); in TEST_F()
/external/tensorflow/tensorflow/contrib/slim/python/slim/nets/
Dresnet_v2.py100 preact = layers.batch_norm(
202 with arg_scope([layers.batch_norm], is_training=is_training):
220 net = layers.batch_norm(
Dresnet_utils.py260 normalizer_fn=layers.batch_norm,
262 with arg_scope([layers.batch_norm], **batch_norm_params):
/external/tensorflow/tensorflow/contrib/layers/python/layers/
Dlayers_test.py530 normalizer_fn=_layers.batch_norm,
546 normalizer_fn=_layers.batch_norm,
1777 normalizer_fn=_layers.batch_norm,
1795 normalizer_fn=_layers.batch_norm,
1815 _layers.batch_norm(
1822 _layers.batch_norm(inputs)
1829 _layers.batch_norm(inputs, data_format='CHWN')
1836 _layers.batch_norm(inputs, data_format='NHWC')
1843 _layers.batch_norm(inputs, data_format='NCHW')
1852 output = _layers.batch_norm(images, fused=fused)
[all …]
/external/tensorflow/tensorflow/python/ops/
Dbatch_norm_benchmark.py62 batch_norm = (tensor - mean) * math_ops.rsqrt(variance + 0.001)
64 batch_norm *= gamma
65 return batch_norm + beta
/external/tensorflow/tensorflow/contrib/eager/python/examples/revnet/
Dblocks.py431 self.batch_norm = tf.keras.layers.BatchNormalization(
446 net = self.batch_norm(net, training=training)
486 self.batch_norm = tf.keras.layers.BatchNormalization(
499 net = self.batch_norm(net, training=training)
/external/tensorflow/tensorflow/tools/api/golden/v2/
Dtensorflow.estimator.experimental.pbtxt21 …hidden_units\', \'feature_columns\', \'activation_fn\', \'dropout\', \'batch_norm\'], varargs=None…
Dtensorflow.estimator.-d-n-n-regressor.pbtxt24 …', \'dropout\', \'config\', \'warm_start_from\', \'loss_reduction\', \'batch_norm\'], varargs=None…
Dtensorflow.estimator.-d-n-n-estimator.pbtxt24 …\', \'activation_fn\', \'dropout\', \'config\', \'warm_start_from\', \'batch_norm\'], varargs=None…
Dtensorflow.estimator.-d-n-n-linear-combined-regressor.pbtxt24 …eight_column\', \'config\', \'warm_start_from\', \'loss_reduction\', \'batch_norm\', \'linear_spar…
Dtensorflow.estimator.-d-n-n-classifier.pbtxt24 …', \'dropout\', \'config\', \'warm_start_from\', \'loss_reduction\', \'batch_norm\'], varargs=None…
Dtensorflow.estimator.-d-n-n-linear-combined-classifier.pbtxt24 …l_vocabulary\', \'config\', \'warm_start_from\', \'loss_reduction\', \'batch_norm\', \'linear_spar…
/external/tensorflow/tensorflow/core/grappler/costs/
Dvirtual_scheduler_test.cc269 auto batch_norm = ops::FusedBatchNorm( in CreateGrapplerItemWithBatchNorm() local
272 auto y = batch_norm.y; in CreateGrapplerItemWithBatchNorm()
273 auto batch_mean = batch_norm.batch_mean; in CreateGrapplerItemWithBatchNorm()
274 auto batch_var = batch_norm.batch_variance; in CreateGrapplerItemWithBatchNorm()
1557 auto batch_norm = ops::FusedBatchNorm( in CreateGrapplerItemWithInterDeviceTransfers() local
1560 auto y = batch_norm.y; in CreateGrapplerItemWithInterDeviceTransfers()
1561 auto batch_mean = batch_norm.batch_mean; in CreateGrapplerItemWithInterDeviceTransfers()
1562 auto batch_var = batch_norm.batch_variance; in CreateGrapplerItemWithInterDeviceTransfers()
/external/tensorflow/tensorflow/python/layers/
Dnormalization.py318 batch_norm = batch_normalization variable
/external/tensorflow/tensorflow/tools/api/golden/v1/
Dtensorflow.estimator.-d-n-n-estimator.pbtxt25 …out\', \'input_layer_partitioner\', \'config\', \'warm_start_from\', \'batch_norm\'], varargs=None…

12