/external/tensorflow/tensorflow/cc/gradients/ |
D | math_grad.cc | 44 Output ConjugateHelper(const Scope& scope, const Output& out) { in ConjugateHelper() argument 47 return Conj(scope, out); in ConjugateHelper() 55 Status AbsGrad(const Scope& scope, const Operation& op, in AbsGrad() argument 59 grad_outputs->push_back(Mul(scope, grad_inputs[0], Sign(scope, op.input(0)))); in AbsGrad() 60 return scope.status(); in AbsGrad() 64 Status NegGrad(const Scope& scope, const Operation& op, in NegGrad() argument 68 grad_outputs->push_back(Neg(scope, grad_inputs[0])); in NegGrad() 69 return scope.status(); in NegGrad() 73 Status InvGrad(const Scope& scope, const Operation& op, in InvGrad() argument 78 internal::ReciprocalGrad(scope, op.output(0), grad_inputs[0])); in InvGrad() [all …]
|
D | array_grad.cc | 42 Status PackGrad(const Scope& scope, const Operation& op, in PackGrad() argument 51 auto grad_op = Unstack(scope, grad_inputs[0], N, Unstack::Axis(axis)); in PackGrad() 55 return scope.status(); in PackGrad() 59 Status UnpackGrad(const Scope& scope, const Operation& op, in UnpackGrad() argument 64 grad_outputs->push_back(Stack(scope, grad_inputs, Stack::Axis(axis))); in UnpackGrad() 65 return scope.status(); in UnpackGrad() 69 Status IdentityGrad(const Scope& scope, const Operation& op, in IdentityGrad() argument 72 grad_outputs->push_back(Identity(scope, grad_inputs[0])); in IdentityGrad() 73 return scope.status(); in IdentityGrad() 77 Status RefIdentityGrad(const Scope& scope, const Operation& op, in RefIdentityGrad() argument [all …]
|
D | nn_grad.cc | 27 Status SoftmaxGrad(const Scope& scope, const Operation& op, in SoftmaxGrad() argument 41 auto dyy = Mul(scope, grad_inputs[0], y); in SoftmaxGrad() 42 auto sum = Reshape(scope, Sum(scope, dyy, {1}), {-1, 1}); in SoftmaxGrad() 43 auto sub = Sub(scope, grad_inputs[0], sum); in SoftmaxGrad() 44 auto dx = Mul(scope, sub, y); in SoftmaxGrad() 46 return scope.status(); in SoftmaxGrad() 50 Status LogSoftmaxGrad(const Scope& scope, const Operation& op, in LogSoftmaxGrad() argument 53 auto softmax = Exp(scope, op.output(0)); in LogSoftmaxGrad() 54 auto sum = Sum(scope, grad_inputs[0], {1}, Sum::KeepDims(true)); in LogSoftmaxGrad() 55 auto mul = Mul(scope, sum, softmax); in LogSoftmaxGrad() [all …]
|
/external/tensorflow/tensorflow/compiler/tf2xla/ |
D | functionalize_control_flow_test.cc | 66 Scope scope = Scope::NewRootScope().ExitOnError(); in TEST() local 68 auto x = ops::Placeholder(scope.WithOpName("x"), DT_INT32); in TEST() 69 auto y = ops::Placeholder(scope.WithOpName("y"), DT_INT32); in TEST() 70 auto less = ops::Less(scope.WithOpName("cond/Less"), y, x); in TEST() 71 auto switch_1 = ops::Switch(scope.WithOpName("cond/Switch"), less, less); in TEST() 74 ops::Identity(scope.WithOpName("cond/Identity"), switch_1.output_true); in TEST() 76 scope.WithOpName("cond").WithControlDependencies(identity_t), 17); in TEST() 77 auto switch_2 = ops::Switch(scope.WithOpName("cond/Switch"), y, less); in TEST() 78 auto mul = ops::Multiply(scope.WithOpName("cond/Mul"), switch_2.output_true, in TEST() 82 ops::Identity(scope.WithOpName("cond/Identity"), switch_1.output_false); in TEST() [all …]
|
/external/llvm/test/CodeGen/PowerPC/ |
D | pr17168.ll | 60 …: DIFlagPrototyped, isOptimized: true, unit: !0, scopeLine: 74, file: !1, scope: !5, type: !6, var… 69 !13 = !DILocalVariable(name: "argc", line: 74, arg: 1, scope: !4, file: !5, type: !8) 70 !14 = !DILocalVariable(name: "argv", line: 74, arg: 2, scope: !4, file: !5, type: !9) 71 !15 = !DILocalVariable(name: "niter", line: 76, scope: !4, file: !5, type: !8) 72 !16 = !DILocalVariable(name: "step", line: 76, scope: !4, file: !5, type: !8) 73 !17 = !DILocalVariable(name: "n3", line: 76, scope: !4, file: !5, type: !8) 74 !18 = !DILocalVariable(name: "nthreads", line: 77, scope: !4, file: !5, type: !8) 75 !19 = !DILocalVariable(name: "navg", line: 78, scope: !4, file: !5, type: !20) 77 !21 = !DILocalVariable(name: "mflops", line: 78, scope: !4, file: !5, type: !20) 78 !22 = !DILocalVariable(name: "tmax", line: 80, scope: !4, file: !5, type: !20) [all …]
|
/external/tensorflow/tensorflow/contrib/slim/python/slim/nets/ |
D | inception_v3.py | 39 scope=None): argument 105 with variable_scope.variable_scope(scope, 'InceptionV3', [inputs]): 112 net = layers.conv2d(inputs, depth(32), [3, 3], stride=2, scope=end_point) 118 net = layers.conv2d(net, depth(32), [3, 3], scope=end_point) 125 net, depth(64), [3, 3], padding='SAME', scope=end_point) 131 net = layers_lib.max_pool2d(net, [3, 3], stride=2, scope=end_point) 137 net = layers.conv2d(net, depth(80), [1, 1], scope=end_point) 143 net = layers.conv2d(net, depth(192), [3, 3], scope=end_point) 149 net = layers_lib.max_pool2d(net, [3, 3], stride=2, scope=end_point) 165 net, depth(64), [1, 1], scope='Conv2d_0a_1x1') [all …]
|
D | inception_v1.py | 35 def inception_v1_base(inputs, final_endpoint='Mixed_5c', scope='InceptionV1'): argument 60 with variable_scope.variable_scope(scope, 'InceptionV1', [inputs]): 67 net = layers.conv2d(inputs, 64, [7, 7], stride=2, scope=end_point) 72 net = layers_lib.max_pool2d(net, [3, 3], stride=2, scope=end_point) 77 net = layers.conv2d(net, 64, [1, 1], scope=end_point) 82 net = layers.conv2d(net, 192, [3, 3], scope=end_point) 87 net = layers_lib.max_pool2d(net, [3, 3], stride=2, scope=end_point) 95 branch_0 = layers.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1') 97 branch_1 = layers.conv2d(net, 96, [1, 1], scope='Conv2d_0a_1x1') 99 branch_1, 128, [3, 3], scope='Conv2d_0b_3x3') [all …]
|
D | inception_v2.py | 39 scope=None): argument 81 with variable_scope.variable_scope(scope, 'InceptionV2', [inputs]): 109 scope=end_point) 115 net = layers_lib.max_pool2d(net, [3, 3], scope=end_point, stride=2) 124 scope=end_point, 131 net = layers.conv2d(net, depth(192), [3, 3], scope=end_point) 137 net = layers_lib.max_pool2d(net, [3, 3], scope=end_point, stride=2) 147 net, depth(64), [1, 1], scope='Conv2d_0a_1x1') 153 scope='Conv2d_0a_1x1') 155 branch_1, depth(64), [3, 3], scope='Conv2d_0b_3x3') [all …]
|
D | vgg.py | 79 scope='vgg_a'): argument 98 with variable_scope.variable_scope(scope, 'vgg_a', [inputs]) as sc: 105 inputs, 1, layers.conv2d, 64, [3, 3], scope='conv1') 106 net = layers_lib.max_pool2d(net, [2, 2], scope='pool1') 107 net = layers_lib.repeat(net, 1, layers.conv2d, 128, [3, 3], scope='conv2') 108 net = layers_lib.max_pool2d(net, [2, 2], scope='pool2') 109 net = layers_lib.repeat(net, 2, layers.conv2d, 256, [3, 3], scope='conv3') 110 net = layers_lib.max_pool2d(net, [2, 2], scope='pool3') 111 net = layers_lib.repeat(net, 2, layers.conv2d, 512, [3, 3], scope='conv4') 112 net = layers_lib.max_pool2d(net, [2, 2], scope='pool4') [all …]
|
/external/tensorflow/tensorflow/go/op/ |
D | wrappers.go | 101 func WriteImageSummary(scope *Scope, writer tf.Output, step tf.Output, tag tf.Output, tensor tf.Out… 102 if scope.Err() != nil { 116 return scope.AddOperation(opspec) 129 func ImportEvent(scope *Scope, writer tf.Output, event tf.Output) (o *tf.Operation) { 130 if scope.Err() != nil { 139 return scope.AddOperation(opspec) 153 func WriteSummary(scope *Scope, writer tf.Output, step tf.Output, tensor tf.Output, tag tf.Output, … 154 if scope.Err() != nil { 163 return scope.AddOperation(opspec) 186 func CreateSummaryDbWriter(scope *Scope, writer tf.Output, db_uri tf.Output, experiment_name tf.Out… [all …]
|
/external/tensorflow/tensorflow/cc/framework/ |
D | gradient_checker_test.cc | 40 Scope scope = Scope::NewRootScope(); in TEST() local 42 auto x = Placeholder(scope, DT_FLOAT, Placeholder::Shape(shape)); in TEST() 43 auto y = Square(scope, x); in TEST() 46 scope, {x}, {shape}, {y}, {shape}, &max_error))); in TEST() 51 Scope scope = Scope::NewRootScope(); in TEST() local 53 auto x = Placeholder(scope, DT_DOUBLE, Placeholder::Shape(shape)); in TEST() 54 auto y = Square(scope, x); in TEST() 57 scope, {x}, {shape}, {y}, {shape}, &max_error))); in TEST() 62 Scope scope = Scope::NewRootScope(); in TEST() local 64 auto x = Placeholder(scope, DT_COMPLEX64, Placeholder::Shape(shape)); in TEST() [all …]
|
D | gradients_test.cc | 89 const Scope& scope = expected ? scope_expected_ : scope_test_; in TEST_F() local 91 auto x = Const(scope, {{1.0, 2.0}, {3.0, 4.0}}); in TEST_F() 92 auto y = Const(scope, {{1.0, 0.0}, {0.0, 1.0}}); in TEST_F() 93 auto z = MatMul(scope, x, y); in TEST_F() 94 TF_ASSERT_OK(scope.status()); in TEST_F() 99 auto dz = Const(scope, {{1.0, 1.0}, {1.0, 1.0}}); in TEST_F() 100 auto dx = MatMul(scope, dz, y, MatMul::TransposeB(true)); in TEST_F() 101 auto dy = MatMul(scope, x, dz, MatMul::TransposeA(true)); in TEST_F() 104 auto dz = Const(scope, {{1.0, 1.0}, {1.0, 1.0}}); in TEST_F() 107 AddSymbolicGradients(scope, {z}, {x, y}, {dz}, &grad_outputs)); in TEST_F() [all …]
|
/external/python/cpython2/Lib/compiler/ |
D | symbols.py | 220 scope = self.module = self.scopes[node] = ModuleScope() 221 self.visit(node.node, scope) 231 scope = FunctionScope(node.name, self.module, self.klass) 233 scope.nested = 1 234 self.scopes[node] = scope 235 self._do_args(scope, node.argnames) 236 self.visit(node.code, scope) 237 self.handle_free_vars(scope, parent) 240 scope = GenExprScope(self.module, self.klass); 243 scope.nested = 1 [all …]
|
/external/tensorflow/tensorflow/contrib/quantize/python/ |
D | quantize_parameterized_test.py | 80 scope = 'test/test2' if with_bypass else 'test' 83 activation_fn=activation_fn, scope=scope) 93 weights_quant = graph.get_operation_by_name(scope + '/weights_quant/' + 97 scope + '/weights_quant/AssignMinLast', 98 scope + '/weights_quant/AssignMaxLast', scope + '/weights/read' 102 output_op_name = scope + '/weights_quant/delayed_quant/Switch_1' 104 output_op_name = scope + '/Conv2D' 109 conv_quant = graph.get_operation_by_name(scope + '/conv_quant/' + 113 scope + '/conv_quant/AssignMinEma', 114 scope + '/conv_quant/AssignMaxEma', scope + '/BiasAdd' [all …]
|
D | fold_batch_norms_test.py | 89 scope = 'test/test2' if with_bypass else 'test' 100 scope=scope) 108 folded_mul = g.get_operation_by_name(scope + '/mul_fold') 111 scope + '/correction_mult', 112 self._BatchNormMultiplierName(scope, has_scaling, fused_batch_norm) 114 self._AssertOutputGoesToOps(folded_mul, g, [scope + '/Conv2D_Fold']) 116 folded_conv = g.get_operation_by_name(scope + '/Conv2D_Fold') 119 [scope + '/mul_fold', inputs.op.name]) 120 self._AssertOutputGoesToOps(folded_conv, g, [scope + '/post_conv_mul']) 122 folded_add = g.get_operation_by_name(scope + '/add_fold') [all …]
|
/external/vixl/test/ |
D | test-code-generation-scopes.cc | 58 CodeBufferCheckScope scope(&masm, aarch32::kA32InstructionSizeInBytes); in TEST() local 72 CodeBufferCheckScope scope(&masm, aarch64::kInstructionSize); in TEST() local 86 CodeBufferCheckScope scope(&masm, 2 * aarch32::kA32InstructionSizeInBytes); in TEST() local 101 CodeBufferCheckScope scope(&masm, 2 * aarch64::kInstructionSize); in TEST() local 116 CodeBufferCheckScope scope; in TEST() local 118 scope.Open(&masm, aarch32::kA32InstructionSizeInBytes); in TEST() 132 CodeBufferCheckScope scope; in TEST() local 134 scope.Open(&masm, aarch64::kInstructionSize); in TEST() 148 CodeBufferCheckScope scope(&masm, aarch32::kA32InstructionSizeInBytes); in TEST() local 150 scope.Close(); in TEST() [all …]
|
/external/tensorflow/tensorflow/cc/ops/ |
D | const_op.cc | 24 Output ConstHelper(const Scope& scope, const T& value, DataType dtype) { in ConstHelper() argument 25 if (!scope.ok()) return Output(); in ConstHelper() 28 Graph* graph = scope.graph(); in ConstHelper() 29 const string unique_name = scope.GetUniqueNameForOp("Const"); in ConstHelper() 33 scope.UpdateBuilder(&builder); in ConstHelper() 34 scope.UpdateStatus(builder.Finalize(graph, &ret)); in ConstHelper() 35 if (!scope.ok()) return Output(); in ConstHelper() 37 scope.UpdateStatus(scope.DoShapeInference(ret)); in ConstHelper() 38 if (!scope.ok()) return Output(); in ConstHelper() 44 Output Const(const Scope& scope, const Input::Initializer& val) { in Const() argument [all …]
|
D | const_op.h | 29 Output Const(const Scope& scope, const Input::Initializer& val); 31 Output ConstFromProto(const Scope& scope, const TensorProto& proto); 33 NodeBuilder::NodeOut AsNodeOut(const Scope& scope, const Input& inp); 36 Output Const(const Scope& scope, const Input::Initializer& val) { in Const() argument 37 auto orig_const_output = Const(scope, val); in Const() 38 if (!scope.ok()) return Output(); in Const() 47 return Const(scope, Input::Initializer(t)); in Const() 52 auto orig_const = AsNodeOut(scope, orig_const_output); in Const() 53 const auto cast_op_name = scope.GetUniqueNameForOp("Cast"); in Const() 58 scope.UpdateBuilder(&cast_builder); in Const() [all …]
|
/external/llvm/test/CodeGen/ARM/ |
D | debug-info-blocks.ll | 115 …(tag: DW_TAG_enumeration_type, line: 248, size: 32, align: 32, file: !160, scope: !0, elements: !3) 119 …enumeration_type, name: "Mode", line: 79, size: 32, align: 32, file: !160, scope: !0, elements: !7) 123 …(tag: DW_TAG_enumeration_type, line: 15, size: 32, align: 32, file: !149, scope: !0, elements: !11) 128 …(tag: DW_TAG_enumeration_type, line: 20, size: 32, align: 32, file: !150, scope: !0, elements: !16) 133 …(tag: DW_TAG_enumeration_type, line: 14, size: 32, align: 32, file: !151, scope: !0, elements: !21) 137 … DIFlagPrototyped, isOptimized: false, unit: !0, scopeLine: 609, file: !152, scope: !24, type: !25) 141 !27 = !DILocalVariable(name: ".block_descriptor", line: 609, arg: 1, flags: DIFlagArtificial, scope… 142 !28 = !DIDerivedType(tag: DW_TAG_pointer_type, size: 32, scope: !0, baseType: !29) 143 … name: "__block_literal_14", line: 609, size: 256, align: 32, file: !152, scope: !24, elements: !3… 145 …DW_TAG_member, name: "__isa", line: 609, size: 32, align: 32, file: !152, scope: !24, baseType: !3… [all …]
|
/external/tensorflow/tensorflow/contrib/gan/python/losses/python/ |
D | losses_impl.py | 78 scope=None, argument 101 with ops.name_scope(scope, 'generator_wasserstein_loss', ( 102 discriminator_gen_outputs, weights)) as scope: 107 loss, weights, scope, loss_collection, reduction) 120 scope=None, argument 146 with ops.name_scope(scope, 'discriminator_wasserstein_loss', ( 148 generated_weights)) as scope: 155 discriminator_gen_outputs, generated_weights, scope, 158 discriminator_real_outputs, real_weights, scope, loss_collection=None, 180 scope=None, argument [all …]
|
/external/libxml2/doc/ |
D | search.php | 93 $scope = $HTTP_GET_VARS[ "scope" ]; variable 100 if ($scope == NULL) 101 $scope = "any"; variable 102 $scope = ltrim ($scope); variable 103 if ($scope == "") 104 $scope = "any"; variable 115 <option value="XML" <?php if ($scope == 'XML') print "selected"?>>XML resources</option> 116 <option value="XSLT" <?php if ($scope == 'XSLT') print "selected"?>>XSLT resources</option> 117 <option value="API" <?php if ($scope == 'API') print "selected"?>>Only the APIs</option> 118 … <option value="XMLAPI" <?php if ($scope == 'XMLAPI') print "selected"?>>Only the XML API</option> [all …]
|
/external/tensorflow/tensorflow/contrib/losses/python/losses/ |
D | loss_ops.py | 110 def compute_weighted_loss(losses, weights=1.0, scope=None): argument 126 with ops.name_scope(scope, "weighted_loss", [losses, weights]): 214 def get_losses(scope=None, loss_collection=ops.GraphKeys.LOSSES): argument 224 return ops.get_collection(loss_collection, scope) 228 def get_regularization_losses(scope=None): argument 237 return ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES, scope) 264 def absolute_difference(predictions, labels=None, weights=1.0, scope=None): argument 289 with ops.name_scope(scope, "absolute_difference", 290 [predictions, labels, weights]) as scope: 295 return compute_weighted_loss(losses, weights, scope=scope) [all …]
|
/external/llvm/test/CodeGen/AArch64/ |
D | aarch64-2014-08-11-MachineCombinerCrash.ll | 50 … DIFlagPrototyped, isOptimized: true, unit: !0, scopeLine: 141, file: !1, scope: !1, type: !6, var… 57 !13 = !DILocalVariable(name: "", line: 140, arg: 1, scope: !4, file: !1, type: !8) 58 !14 = !DILocalVariable(name: "", line: 142, scope: !4, file: !1, type: !15) 61 !18 = !DILocalVariable(name: "", line: 142, scope: !4, file: !1, type: !15) 62 !19 = !DILocalVariable(name: "", line: 142, scope: !4, file: !1, type: !15) 63 !20 = !DILocalVariable(name: "", line: 142, scope: !4, file: !1, type: !15) 64 !21 = !DILocalVariable(name: "", line: 142, scope: !4, file: !1, type: !15) 65 !22 = !DILocalVariable(name: "", line: 142, scope: !4, file: !1, type: !15) 66 !23 = !DILocalVariable(name: "", line: 142, scope: !4, file: !1, type: !15) 67 !24 = !DILocalVariable(name: "", line: 142, scope: !4, file: !1, type: !15) [all …]
|
/external/llvm/test/Transforms/SampleProfile/ |
D | propagate.ll | 219 !6 = distinct !DISubprogram(name: "foo", linkageName: "_Z3fooiil", scope: !1, file: !1, line: 3, ty… 224 !11 = !DILocalVariable(name: "x", arg: 1, scope: !6, file: !1, line: 3, type: !10) 226 !13 = !DILocation(line: 3, column: 14, scope: !6) 227 !14 = !DILocalVariable(name: "y", arg: 2, scope: !6, file: !1, line: 3, type: !10) 228 !15 = !DILocation(line: 3, column: 21, scope: !6) 229 !16 = !DILocalVariable(name: "N", arg: 3, scope: !6, file: !1, line: 3, type: !9) 230 !17 = !DILocation(line: 3, column: 29, scope: !6) 231 !18 = !DILocation(line: 4, column: 7, scope: !19) 232 !19 = distinct !DILexicalBlock(scope: !6, file: !1, line: 4, column: 7) 233 !20 = !DILocation(line: 4, column: 11, scope: !19) [all …]
|
D | gcc-simple.ll | 151 !4 = distinct !DISubprogram(name: "foo", linkageName: "_Z3fool", scope: !1, file: !1, line: 3, type… 156 !9 = distinct !DISubprogram(name: "main", scope: !1, file: !1, line: 7, type: !10, isLocal: false, … 163 !16 = !DILocalVariable(name: "i", arg: 1, scope: !4, file: !1, line: 3, type: !8) 165 !18 = !DILocation(line: 3, column: 24, scope: !4) 166 !19 = !DILocation(line: 4, column: 7, scope: !20) 167 !20 = distinct !DILexicalBlock(scope: !4, file: !1, line: 4, column: 7) 168 !21 = !DILocation(line: 4, column: 14, scope: !20) 169 !22 = !DILocation(line: 4, column: 7, scope: !4) 170 !23 = !DILocation(line: 4, column: 21, scope: !24) 171 !24 = !DILexicalBlockFile(scope: !20, file: !1, discriminator: 1) [all …]
|