/external/pytorch/test/jit/ |
D | test_remove_mutation.py | 29 y.add_(2) 35 FileCheck().check("aten::add_").run(graph) 53 y.add_(2) 59 FileCheck().check("aten::add_").run(graph) 64 x.add_(1) 65 x.add_(3) 72 FileCheck().check_not("aten::add_").run(graph) 77 x.add_(1) 79 x.add_(3) 84 FileCheck().check_count("aten::add_", 2).run(graph) [all …]
|
D | test_functional_blocks.py | 30 z.add_(2) 53 # z + 1, z.add_(2) considered non functional, z = z * z should be considered functional 54 FileCheck().check("add").check("add_").check_not("mul").check(
|
/external/pytorch/torch/optim/ |
D | _functional.py | 67 exp_avg.add_(make_sparse(exp_avg_update_values)) 72 exp_avg_sq.add_(make_sparse(exp_avg_sq_update_values)) 75 numer = exp_avg_update_values.add_(old_exp_avg_values) 76 exp_avg_sq_update_values.add_(old_exp_avg_sq_values) 77 denom = exp_avg_sq_update_values.sqrt_().add_(eps) 84 param.add_(make_sparse(-step_size * numer.div_(denom)))
|
/external/pytorch/torch/csrc/api/src/nn/modules/ |
D | _functions.cpp | 46 scale_first.add_(input_square.select(1, c)); in forward() 60 scale_current.add_(square_next, 1); in forward() 65 scale_current.add_(square_previous, -1); in forward() 73 .add_(ctx->saved_data["k"].toInt()); in forward() 129 accum_ratio.add_(padded_ratio[c + ctx->saved_data["size"].toInt() - 1]); in backward() 131 accum_ratio.add_(padded_ratio[c], -1); in backward()
|
/external/pytorch/test/cpp/jit/ |
D | test_dce.cpp | 37 # CHECK: add_ in TEST() 38 %tot.3 : Tensor = aten::add_(%tot.6, %35, %12) in TEST() 41 # CHECK: add_ in TEST() 42 %46 : Tensor = aten::add_(%44, %12, %12) in TEST()
|
/external/pytorch/test/ |
D | test_fx_reinplace_pass.py | 20 # into add_() 37 add = torch.ops.aten.add_.Tensor(clone, 1); add = None 65 add_1 = torch.ops.aten.add_.Tensor(view, 1); add_1 = None 128 d.add_(1) 154 add = torch.ops.aten.add_.Tensor(view_2, 1); add = None 163 add_1 = torch.ops.aten.add_.Tensor(view_5, view_8); view_8 = add_1 = None 173 c.add_(1) 193 add = torch.ops.aten.add_.Tensor(select_1, 1); select_1 = add = None 233 add = torch.ops.aten.add_.Tensor(select_1, 1); select_1 = add = None 332 add = torch.ops.aten.add_.Tensor(diagonal, 1); diagonal = add = None [all …]
|
D | test_functionalization.py | 163 x.add_(1) 177 x.add_(1) 178 self.assertRaises(RuntimeError, lambda: y.add_(1)) 179 self.assertRaises(RuntimeError, lambda: z.add_(1)) 279 y.add_(tmp) 424 z.add_(1) 459 add = torch.ops.aten.add_.Tensor(view, 1); add = None 498 x.add_(tmp) 569 y.add_(1) 672 y.add_(tmp) [all …]
|
/external/pytorch/test/fx/ |
D | test_common_passes.py | 41 y.add_(1) 46 x.add_(1) 53 y.add_(1) 59 y.add_(1)
|
/external/rust/android-crates-io/crates/grpcio-sys/grpc/third_party/upb/protos_generator/ |
D | gen_repeated_fields.cc | 61 using $0Access::add_$1; in WriteRepeatedFieldUsingAccessors() 76 using $0Access::add_$1; in WriteRepeatedFieldUsingAccessors() 107 absl::StatusOr<$0> add_$2(); in WriteRepeatedFieldsInMessageHeader() 117 bool add_$1($0 val); in WriteRepeatedFieldsInMessageHeader() 146 absl::StatusOr<$1> $0::add_$2() { in WriteRepeatedMessageAccessor() 200 bool $0::add_$2($1 val) { in WriteRepeatedStringAccessor() 245 bool $0::add_$2($1 val) { return $3_add_$4(msg_, val, arena_); } in WriteRepeatedScalarAccessor()
|
/external/pytorch/aten/src/ATen/functorch/ |
D | PyTorchOperatorHacks.cpp | 69 return output.add_(*bias); in linear_hack() 98 auto log_weight = (pos_weight - 1).mul(target).add_(1); in binary_cross_entropy_with_logits_hack() 99 …nput).add(log_weight.mul(((-max_val).exp_().add((-input - max_val).exp_())).log_().add_(max_val))); in binary_cross_entropy_with_logits_hack() 101 …loss = (1 - target).mul(input).add_(max_val).add_((-max_val).exp_().add((-input -max_val).exp_()).… in binary_cross_entropy_with_logits_hack() 191 b = noise.add(-1).mul_(alpha * a).add_(alpha * a * p); in _dropout_impl() 200 return multiply<inplace>(input, noise).add_(b); in _dropout_impl()
|
/external/pytorch/torch/testing/_internal/ |
D | hop_db.py | 23 return [x[0].cos().add_(1.) * y0, (x[1] + y1.sin()).cos_().view(x[1].size())] 76 x.add_(5) 77 z.add_(5) 83 x.add_(5) 84 z.add_(5)
|
/external/pytorch/torch/nn/modules/ |
D | _functions.py | 247 scale_first.add_(input_square.select(1, c)) 257 scale_current.add_(square_next, alpha=1) 261 scale_current.add_(square_previous, alpha=-1) 263 ctx.scale.mul_(ctx.alpha / ctx.size).add_(ctx.k) 302 accum_ratio.add_(paddded_ratio[c + ctx.size - 1]) 306 accum_ratio.add_(paddded_ratio[c], alpha=-1)
|
/external/pytorch/torch/csrc/jit/passes/ |
D | fuse_relu.cpp | 33 %add_res = aten::add_(%a, %b, %alpha) in fuseAddReluImpl() 55 // NB: Patterns that are left out are add_ + relu and add_out + relu in fuseAddReluImpl() 56 // This is because inplace mutation of the tensor done by add_ will be lost if in fuseAddReluImpl()
|
/external/pytorch/torch/csrc/api/src/optim/ |
D | adagrad.cpp | 111 state.sum(state.sum().add_(make_sparse(grad_values.pow(2)))); in step() 113 const auto std_values = std._values().sqrt_().add_(options.eps()); in step() 115 p.add_(make_sparse(grad_values / std_values), -clr); in step() 118 const auto std = state.sum().sqrt().add_(options.eps()); in step()
|
D | rmsprop.cpp | 121 grad_avg.mul_(alpha).add_(grad, 1 - alpha); in step() 124 .add_(options.eps()); in step() 126 avg = square_avg.sqrt().add_(options.eps()); in step() 133 p.add_(buf, -options.lr()); in step()
|
/external/pytorch/c10/core/ |
D | InferenceMode.h | 34 // for `inference_tensor.add_(1)` case. 43 // k.add_(2); 45 // `k.add_(2)` still need to go through ADInplaceOrView kernel so that it's
|
/external/pytorch/test/cpp/api/ |
D | inference_mode.cpp | 465 b.add_(1), in TEST() 472 c.add_(1), in TEST() 490 b_el.add_(1), in TEST() 496 c_el.add_(1), in TEST() 561 // Testing both copy_ from VariableTypeManual and add_ from generated code. in TEST() 563 s.add_(t); in TEST() 564 t.add_(s); in TEST() 568 s.add_(t); in TEST() 574 t.add_(s), in TEST()
|
/external/executorch/backends/mediatek/quantizer/ |
D | annotator.py | 125 torch.ops.aten.add_.Scalar, 126 torch.ops.aten.add_.Tensor, 219 torch.ops.aten.add_.Scalar, 220 torch.ops.aten.add_.Tensor,
|
/external/angle/third_party/spirv-tools/src/source/opt/ |
D | modify_maximal_reconvergence.h | 34 explicit ModifyMaximalReconvergence(bool add = true) : Pass(), add_(add) {} in Pass() 48 bool add_; variable
|
/external/deqp-deps/SPIRV-Tools/source/opt/ |
D | modify_maximal_reconvergence.h | 34 explicit ModifyMaximalReconvergence(bool add = true) : Pass(), add_(add) {} in Pass() 48 bool add_; variable
|
/external/swiftshader/third_party/SPIRV-Tools/source/opt/ |
D | modify_maximal_reconvergence.h | 34 explicit ModifyMaximalReconvergence(bool add = true) : Pass(), add_(add) {} in Pass() 48 bool add_; variable
|
/external/pytorch/test/export/ |
D | test_verifier.py | 53 node.target = torch.ops.aten.add_.Tensor 95 node.target = torch.ops.aten.add_.Tensor 178 self.my_buffer2.add_(1.0) 201 self.my_buffer2.add_(1.0)
|
D | test_tools.py | 19 x.add_(5) 20 z.add_(5)
|
/external/OpenCL-CTS/test_conformance/subgroups/ |
D | test_subgroup_extended_types.cpp | 34 rft.run_impl<T, RED_NU<T, ArithmeticOp::add_>>("sub_group_reduce_add"); in run_scan_reduction_for_type() 39 error |= rft.run_impl<T, SCIN_NU<T, ArithmeticOp::add_>>( in run_scan_reduction_for_type() 45 error |= rft.run_impl<T, SCEX_NU<T, ArithmeticOp::add_>>( in run_scan_reduction_for_type()
|
/external/cronet/tot/third_party/protobuf/src/google/protobuf/compiler/cpp/ |
D | string_field.cc | 746 "$deprecated_attr$std::string* ${1$add_$name$$}$();\n" in GenerateAccessorDeclarations() 747 "$deprecated_attr$void ${1$add_$name$$}$(const std::string& value);\n" in GenerateAccessorDeclarations() 748 "$deprecated_attr$void ${1$add_$name$$}$(std::string&& value);\n" in GenerateAccessorDeclarations() 749 "$deprecated_attr$void ${1$add_$name$$}$(const char* value);\n", in GenerateAccessorDeclarations() 753 "$deprecated_attr$void ${1$add_$name$$}$(StringPiece value);\n", in GenerateAccessorDeclarations() 757 "$deprecated_attr$void ${1$add_$name$$}$(const $pointer_type$* " in GenerateAccessorDeclarations() 783 "inline std::string* $classname$::add_$name$() {\n" in GenerateInlineAccessorDefinitions() 853 "inline void $classname$::add_$name$(const std::string& value) {\n" in GenerateInlineAccessorDefinitions() 858 "inline void $classname$::add_$name$(std::string&& value) {\n" in GenerateInlineAccessorDefinitions() 863 "inline void $classname$::add_$name$(const char* value) {\n" in GenerateInlineAccessorDefinitions() [all …]
|