| /external/tensorflow/tensorflow/compiler/xla/service/ |
| D | batchnorm_expander.cc | 50 Status HandleBatchNormTraining(HloInstruction* batch_norm) override; 52 Status HandleBatchNormInference(HloInstruction* batch_norm) override; 54 Status HandleBatchNormGrad(HloInstruction* batch_norm) override; 152 HloInstruction* batch_norm) { in HandleBatchNormTraining() argument 160 added_inst->set_metadata(batch_norm->metadata()); in HandleBatchNormTraining() 171 HloInstruction* operand = batch_norm->mutable_operand(0); in HandleBatchNormTraining() 174 int64_t feature_index = batch_norm->feature_index(); in HandleBatchNormTraining() 176 HloInstruction* scale = batch_norm->mutable_operand(1); in HandleBatchNormTraining() 177 HloInstruction* offset = batch_norm->mutable_operand(2); in HandleBatchNormTraining() 184 auto epsilon_literal = LiteralUtil::CreateR0(batch_norm->epsilon()); in HandleBatchNormTraining() [all …]
|
| D | hlo_element_type_converter_test.cc | 112 ::testing::Matcher<const ::xla::HloInstruction*> batch_norm = in TEST_F() 115 op::Tuple(op::Convert(op::GetTupleElement(batch_norm, 0)), in TEST_F() 116 op::Convert(op::GetTupleElement(batch_norm, 1)), in TEST_F() 117 op::Convert(op::GetTupleElement(batch_norm, 2)))); in TEST_F()
|
| /external/tensorflow/tensorflow/compiler/mlir/tensorflow/transforms/ |
| D | gpu_fusion.cc | 70 auto batch_norm = dyn_cast_or_null<FusedBatchNormV3Op>(relu_input); in matchAndRewrite() local 73 if (!batch_norm) { in matchAndRewrite() 79 batch_norm = in matchAndRewrite() 81 if (batch_norm) { in matchAndRewrite() 85 batch_norm = in matchAndRewrite() 87 if (!batch_norm) return failure(); in matchAndRewrite() 91 assert(batch_norm); in matchAndRewrite() 92 if (batch_norm.is_training()) return failure(); in matchAndRewrite() 93 if (!batch_norm.y().hasOneUse()) return failure(); in matchAndRewrite() 96 OperationState state(batch_norm.getLoc(), in matchAndRewrite() [all …]
|
| /external/pytorch/torch/csrc/api/include/torch/nn/functional/ |
| D | batchnorm.h | 13 inline Tensor batch_norm( in batch_norm() function 38 return torch::batch_norm( in batch_norm() 53 /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.batch_norm 62 /// F::batch_norm(input, mean, variance, 65 inline Tensor batch_norm( 70 return detail::batch_norm(
|
| /external/pytorch/torch/ao/nn/intrinsic/modules/ |
| D | fused.py | 169 def __init__(self, batch_norm, relu): argument 171 type_before_parametrizations(batch_norm) == BatchNorm2d 173 …), f"Incorrect types for input modules{type_before_parametrizations(batch_norm)}{type_before_param… 174 super().__init__(batch_norm, relu) 181 def __init__(self, batch_norm, relu): argument 183 type_before_parametrizations(batch_norm) == BatchNorm3d 185 …), f"Incorrect types for input modules{type_before_parametrizations(batch_norm)}{type_before_param… 186 super().__init__(batch_norm, relu)
|
| /external/tensorflow/tensorflow/python/ops/ |
| D | batch_norm_benchmark.py | 47 # batch_norm = (tensor - mean) * tf.math.rsqrt(variance + 0.001) 49 # batch_norm *= gamma 50 # return batch_norm + beta 58 batch_norm = (tensor - mean) * math_ops.rsqrt(variance + 0.001) 60 batch_norm *= gamma 61 return batch_norm + beta
|
| /external/pytorch/functorch/op_analysis/ |
| D | gen_data.py | 88 if "batch_norm" in op["name"]: 89 categorization["batch_norm"] += 1 90 op["meta"] = "batch_norm"
|
| /external/pytorch/torch/onnx/ |
| D | symbolic_opset14.py | 34 "batch_norm", 67 @_onnx_symbolic("aten::batch_norm") 69 def batch_norm( function 97 symbolic_helper.check_training_mode(training, "batch_norm")
|
| /external/pytorch/benchmarks/tensorexpr/ |
| D | pt_engine.py | 33 def batch_norm(self, data, mean, var, training): member in TorchTensorEngine 34 return torch.nn.functional.batch_norm(data, mean, var, training=training)
|
| /external/pytorch/torch/csrc/jit/passes/ |
| D | decompose_ops.cpp | 36 …"aten::batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? runnin… in isDecomposableNorm() 121 …"aten::batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? runnin… in DecomposeOps() 140 toGraphFunction(decompose_funcs.get_function("batch_norm")).graph(); in DecomposeOps() 199 …def batch_norm(input : Tensor, running_mean : Optional[Tensor], running_var : Optional[Tensor], tr… in DecomposeOps()
|
| /external/tensorflow/tensorflow/tools/compatibility/ |
| D | reorders_v2.py | 40 …'dropout', 'input_layer_partitioner', 'config', 'warm_start_from', 'loss_reduction', 'batch_norm'], 41 …ut_layer_partitioner', 'config', 'warm_start_from', 'loss_reduction', 'batch_norm', 'linear_sparse… 42 …ut_layer_partitioner', 'config', 'warm_start_from', 'loss_reduction', 'batch_norm', 'linear_sparse… 43 …'dropout', 'input_layer_partitioner', 'config', 'warm_start_from', 'loss_reduction', 'batch_norm'],
|
| /external/pytorch/aten/src/ATen/native/vulkan/ops/ |
| D | Batchnorm.cpp | 74 Tensor batch_norm( in batch_norm() function 99 m.impl(TORCH_SELECTIVE_NAME("aten::batch_norm"), TORCH_FN(batch_norm)); in TORCH_LIBRARY_IMPL()
|
| /external/pytorch/benchmarks/operator_benchmark/pt/ |
| D | batchnorm_test.py | 70 return F.batch_norm(input_one, mean, var, weight, bias, training) 121 return F.batch_norm(input_one, mean, var, weight, bias, training)
|
| /external/pytorch/torch/csrc/api/include/torch/nn/options/ |
| D | batchnorm.h | 68 /// Options for `torch::nn::functional::batch_norm`. 73 /// F::batch_norm(input, mean, variance,
|
| /external/pytorch/torch/jit/ |
| D | _freeze.py | 165 assert "batch_norm" in str(frozen_mod.graph) 167 assert "batch_norm" not in str(frozen_mod.graph) 210 assert "batch_norm" not in str(frozen_mod.graph)
|
| /external/pytorch/torch/testing/_internal/ |
| D | jit_metaprogramming_utils.py | 169 ('batch_norm', (S, S), 172 ('batch_norm', (0, S, S, S), 176 ('batch_norm', (0, S, S, S), 180 ('batch_norm', (S, S), 184 ('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), 187 ('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), 190 ('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), 193 ('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), 196 ('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), 199 ('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)),
|
| /external/pytorch/torch/utils/ |
| D | mobile_optimizer.py | 112 if "batch_norm" in op_name: 115 … "saving the module and call torch.utils.mobile_optimizer.optimize_for_mobile to drop batch_norm "
|
| /external/executorch/backends/xnnpack/_passes/ |
| D | fuse_batch_norm_with_conv.py | 33 # We want to discover a chain of conv -> batch_norm. 42 # The single user of conv op must be batch_norm. If not, bail.
|
| /external/executorch/backends/transforms/ |
| D | fuse_batch_norm_with_conv.py | 35 # We want to discover a chain of conv -> batch_norm. 44 # The single user of conv op must be batch_norm. If not, bail.
|
| /external/pytorch/aten/src/ATen/core/dispatch/ |
| D | backend_fallback_test.cpp | 93 Tensor b = batch_norm(a, {}, {}, {}, {}, true, 0.1, 1e-05, false); in TEST() 103 Tensor b = batch_norm(a, {}, {}, {}, {}, true, 0.1, 1e-05, false); in TEST()
|
| /external/tensorflow/tensorflow/compiler/xla/service/gpu/ |
| D | ir_emitter.h | 92 Status HandleBatchNormInference(HloInstruction* batch_norm) override; 93 Status HandleBatchNormTraining(HloInstruction* batch_norm) override; 94 Status HandleBatchNormGrad(HloInstruction* batch_norm) override;
|
| /external/pytorch/torch/ao/quantization/pt2e/ |
| D | utils.py | 174 torch.ops.aten.batch_norm.default, 283 if bn_node.target == torch.ops.aten.batch_norm.default: 284 # With the new training ir, instead of batch_norm + getitem, 285 # we only have the batch_norm node. 331 torch.ops.aten.batch_norm.default,
|
| D | export_utils.py | 118 return F.batch_norm( 129 return F.batch_norm(
|
| /external/pytorch/benchmarks/functional_autograd_benchmark/ |
| D | torchaudio_models.py | 187 batch_norm=True, argument 193 self.batch_norm = ( 194 SequenceWise(nn.BatchNorm1d(input_size)) if batch_norm else None 208 if self.batch_norm is not None: 209 x = self.batch_norm(x) 307 batch_norm=False,
|
| /external/pytorch/ios/TestApp/custom_build/ |
| D | mobilenetv2.yaml | 4 - aten::batch_norm
|