/external/tensorflow/tensorflow/compiler/jit/tests/ |
D | opens2s_gnmt_mixed_precision.pbtxt.gz |
|
D | keras_imagenet_main_graph_mode.pbtxt | 8034 name: "training/SGD/gradients/loss_1/conv1/kernel/Regularizer/Square_grad/Mul" 8101 name: "training/SGD/gradients/loss_1/fc1000/bias/Regularizer/Square_grad/Mul" 8162 name: "training/SGD/gradients/loss_1/fc1000/kernel/Regularizer/Square_grad/Mul" 8274 name: "training/SGD/gradients/loss_1/res2a_branch2a/kernel/Regularizer/Square_grad/Mul" 8560 name: "training/SGD/gradients/loss_1/res2c_branch2a/kernel/Regularizer/Square_grad/Mul" 8636 name: "training/SGD/gradients/loss_1/res2c_branch2b/kernel/Regularizer/Square_grad/Mul" 8712 name: "training/SGD/gradients/loss_1/res2c_branch2c/kernel/Regularizer/Square_grad/Mul" 8788 name: "training/SGD/gradients/loss_1/res3a_branch1/kernel/Regularizer/Square_grad/Mul" 8864 name: "training/SGD/gradients/loss_1/res3a_branch2a/kernel/Regularizer/Square_grad/Mul" 9276 name: "training/SGD/gradients/loss_1/res3d_branch2a/kernel/Regularizer/Square_grad/Mul" [all …]
|
/external/skqp/tools/lua/ |
D | gradients.lua | 24 gradients = {} 35 gradients[i] = {} 37 gradients[i].filename = filename 47 gradients[i].boundsWidth = width 48 gradients[i].boundsHeight = height 50 gradients[i].colorCount = g.colorCount 51 gradients[i].type = g.type 52 gradients[i].tile = g.tile 60 gradients[i].isEvenlySpaced = isEvenlySpaced 68 gradients[i].numHardStops = numHardStops [all …]
|
/external/skia/gn/ |
D | effects.gni | 52 "$_src/shaders/gradients/Sk4fGradientBase.cpp", 53 "$_src/shaders/gradients/Sk4fGradientBase.h", 54 "$_src/shaders/gradients/Sk4fGradientPriv.h", 55 "$_src/shaders/gradients/Sk4fLinearGradient.cpp", 56 "$_src/shaders/gradients/Sk4fLinearGradient.h", 57 "$_src/shaders/gradients/SkGradientShader.cpp", 58 "$_src/shaders/gradients/SkGradientShaderPriv.h", 59 "$_src/shaders/gradients/SkLinearGradient.cpp", 60 "$_src/shaders/gradients/SkLinearGradient.h", 61 "$_src/shaders/gradients/SkRadialGradient.cpp", [all …]
|
/external/tensorflow/tensorflow/python/ops/ |
D | gradients_test.py | 46 from tensorflow.python.ops import gradients 75 w_grad = gradients.gradients(h, w)[0] 88 gw = gradients.gradients(c, [w])[0] 97 gw = gradients.gradients(wx, [w], colocate_gradients_with_ops=True)[0] 111 gw1 = gradients.gradients(z, [w], colocate_gradients_with_ops=True)[0] 114 gw2 = gradients.gradients(z, [w], colocate_gradients_with_ops=False)[0] 130 gw1 = gradients.gradients(z, [w], colocate_gradients_with_ops=True)[0] 133 gw2 = gradients.gradients(z, [w], colocate_gradients_with_ops=False)[0] 147 gz_x = gradients.gradients(z, [x], colocate_gradients_with_ops=True, 162 grads = gradients.gradients(z, [x]) [all …]
|
/external/skqp/gn/ |
D | effects.gni | 37 "$_src/shaders/gradients/Sk4fGradientBase.cpp", 38 "$_src/shaders/gradients/Sk4fGradientBase.h", 39 "$_src/shaders/gradients/Sk4fGradientPriv.h", 40 "$_src/shaders/gradients/Sk4fLinearGradient.cpp", 41 "$_src/shaders/gradients/Sk4fLinearGradient.h", 42 "$_src/shaders/gradients/SkGradientShader.cpp", 43 "$_src/shaders/gradients/SkGradientShaderPriv.h", 44 "$_src/shaders/gradients/SkLinearGradient.cpp", 45 "$_src/shaders/gradients/SkLinearGradient.h", 46 "$_src/shaders/gradients/SkRadialGradient.cpp", [all …]
|
D | sksl.gni | 48 "$_src/gpu/gradients/GrDualIntervalGradientColorizer.fp", 49 "$_src/gpu/gradients/GrSingleIntervalGradientColorizer.fp", 50 "$_src/gpu/gradients/GrTextureGradientColorizer.fp", 51 "$_src/gpu/gradients/GrUnrolledBinaryGradientColorizer.fp", 52 "$_src/gpu/gradients/GrLinearGradientLayout.fp", 53 "$_src/gpu/gradients/GrRadialGradientLayout.fp", 54 "$_src/gpu/gradients/GrSweepGradientLayout.fp", 55 "$_src/gpu/gradients/GrTwoPointConicalGradientLayout.fp", 56 "$_src/gpu/gradients/GrClampedGradientEffect.fp", 57 "$_src/gpu/gradients/GrTiledGradientEffect.fp",
|
/external/tensorflow/tensorflow/core/api_def/base_api/ |
D | api_def_FakeQuantWithMinMaxVarsPerChannelGradient.pbtxt | 4 name: "gradients" 6 Backpropagated gradients above the FakeQuantWithMinMaxVars operation, 14 same as `gradients`. 21 Backpropagated gradients w.r.t. inputs, shape same as 23 `gradients * (inputs >= min && inputs <= max)`. 29 Backpropagated gradients w.r.t. min parameter, shape `[d]`: 30 `sum_per_d(gradients * (inputs < min))`. 36 Backpropagated gradients w.r.t. max parameter, shape `[d]`: 37 `sum_per_d(gradients * (inputs > max))`. 52 summary: "Compute gradients for a FakeQuantWithMinMaxVarsPerChannel operation."
|
D | api_def_FakeQuantWithMinMaxVarsGradient.pbtxt | 4 name: "gradients" 6 Backpropagated gradients above the FakeQuantWithMinMaxVars operation. 19 Backpropagated gradients w.r.t. inputs: 20 `gradients * (inputs >= min && inputs <= max)`. 26 Backpropagated gradients w.r.t. min parameter: 27 `sum(gradients * (inputs < min))`. 33 Backpropagated gradients w.r.t. max parameter: 34 `sum(gradients * (inputs > max))`. 49 summary: "Compute gradients for a FakeQuantWithMinMaxVars operation."
|
D | api_def_EluGrad.pbtxt | 5 name: "gradients" 7 The backpropagated gradients to the corresponding Elu operation. 19 The gradients: `gradients * (outputs + 1)` if outputs < 0, 20 `gradients` otherwise. 23 summary: "Computes gradients for the exponential linear (Elu) operation."
|
D | api_def_SeluGrad.pbtxt | 5 name: "gradients" 7 The backpropagated gradients to the corresponding Selu operation. 19 The gradients: `gradients * (outputs + scale * alpha)` 20 if outputs < 0, `scale * gradients` otherwise. 23 summary: "Computes gradients for the scaled exponential linear (Selu) operation."
|
D | api_def_SparseAccumulatorTakeGradient.pbtxt | 12 Number of gradients required before we return an aggregate. 18 Indices of the average of the accumulated sparse gradients. 24 Values of the average of the accumulated sparse gradients. 30 Shape of the average of the accumulated sparse gradients. 36 The data type of accumulated gradients. Needs to correspond to the type 43 gradients have been accumulated. If the accumulator has already 44 aggregated more than num_required gradients, it will return its 45 average of the accumulated gradients. Also automatically increments
|
D | api_def_FakeQuantWithMinMaxArgsGradient.pbtxt | 4 name: "gradients" 6 Backpropagated gradients above the FakeQuantWithMinMaxArgs operation. 18 Backpropagated gradients below the FakeQuantWithMinMaxArgs operation: 19 `gradients * (inputs >= min && inputs <= max)`. 22 summary: "Compute gradients for a FakeQuantWithMinMaxArgs operation."
|
D | api_def_SoftsignGrad.pbtxt | 5 name: "gradients" 7 The backpropagated gradients to the corresponding softsign operation. 19 The gradients: `gradients / (1 + abs(features)) ** 2`. 22 summary: "Computes softsign gradients for a softsign operation."
|
D | api_def_SoftplusGrad.pbtxt | 5 name: "gradients" 7 The backpropagated gradients to the corresponding softplus operation. 19 The gradients: `gradients / (1 + exp(-features))`. 22 summary: "Computes softplus gradients for a softplus operation."
|
D | api_def_ResourceAccumulatorTakeGradient.pbtxt | 13 Number of gradients required before we return an aggregate. 19 The average of the accumulated gradients. 25 The data type of accumulated gradients. Needs to correspond to the type 32 gradients have been accumulated. If the accumulator has already 33 aggregated more than num_required gradients, it returns the average of 34 the accumulated gradients. Also automatically increments the recorded
|
D | api_def_AccumulatorTakeGradient.pbtxt | 12 Number of gradients required before we return an aggregate. 18 The average of the accumulated gradients. 24 The data type of accumulated gradients. Needs to correspond to the type 31 gradients have been accumulated. If the accumulator has already 32 aggregated more than num_required gradients, it returns the average of 33 the accumulated gradients. Also automatically increments the recorded
|
D | api_def_LeakyReluGrad.pbtxt | 5 name: "gradients" 7 The backpropagated gradients to the corresponding LeakyRelu operation. 20 `gradients * (features > 0) + alpha * gradients * (features <= 0)`. 23 summary: "Computes rectified linear gradients for a LeakyRelu operation."
|
D | api_def_Relu6Grad.pbtxt | 5 name: "gradients" 7 The backpropagated gradients to the corresponding Relu6 operation. 20 The gradients: 21 `gradients * (features > 0) * (features < 6)`. 24 summary: "Computes rectified linear 6 gradients for a Relu6 operation."
|
D | api_def_ReluGrad.pbtxt | 5 name: "gradients" 7 The backpropagated gradients to the corresponding Relu operation. 20 `gradients * (features > 0)`. 23 summary: "Computes rectified linear gradients for a Relu operation."
|
/external/tensorflow/tensorflow/python/ops/parallel_for/ |
D | gradients_test.py | 37 from tensorflow.python.ops import gradients as gradient_ops 46 from tensorflow.python.ops.parallel_for import gradients 106 pfor_jacobian = gradients.batch_jacobian(output, inp, use_pfor=True) 107 while_jacobian = gradients.batch_jacobian(output, inp, use_pfor=False) 114 pfor_jacobian = gradients.batch_jacobian(output, inp, use_pfor=True) 115 while_jacobian = gradients.batch_jacobian(output, inp, use_pfor=False) 122 pfor_jacobian = gradients.batch_jacobian(final_state.c, inp, use_pfor=True) 127 gradient_ops.gradients(array_ops.gather(final_state.c, i, axis=1), inp)[0] 135 pfor_jacobian = gradients.batch_jacobian(output, inp, use_pfor=True) 137 pfor_hessian = gradients.batch_jacobian(pfor_jacobian, inp, use_pfor=True) [all …]
|
/external/tensorflow/tensorflow/core/kernels/ |
D | relu_op_functor.h | 49 void operator()(const Device& d, typename TTypes<T>::ConstTensor gradients, in operator() 56 gradients * (features > static_cast<T>(0)).template cast<T>(); in operator() 83 void operator()(const Device& d, typename TTypes<T>::ConstTensor gradients, in operator() 90 backprops.device(d) = gradients * ((features > static_cast<T>(0)) * in operator() 130 void operator()(const Device& d, typename TTypes<T>::ConstTensor gradients, in operator() 134 (features > static_cast<T>(0)).select(gradients, gradients * alpha); in operator() 163 void operator()(const Device& d, typename TTypes<T>::ConstTensor gradients, in operator() 168 .select((activations + static_cast<T>(1)) * gradients, gradients); in operator() 201 void operator()(const Device& d, typename TTypes<T>::ConstTensor gradients, in operator() 208 .select(gradients * (activations + scale_alpha), gradients * scale); in operator()
|
/external/tensorflow/tensorflow/cc/ |
D | BUILD | 29 "framework/gradients.h", 40 "framework/gradients.h", 61 name = "gradients", 63 "framework/gradients.cc", 67 hdrs = ["framework/gradients.h"], 91 ":gradients", 112 ":gradients", 132 ":gradients", 174 srcs = ["gradients/grad_testutil.cc"], 175 hdrs = ["gradients/grad_testutil.h"], [all …]
|
/external/tensorflow/tensorflow/python/compiler/xla/ |
D | jit_test.py | 30 from tensorflow.python.ops import gradients 196 x_grads = gradients.gradients([y_c], [x])[0] 227 grad_a1 = gradients.gradients(a1t, a1, name="GA")[0] 228 grad_a2 = gradients.gradients(a2t, a2, name="GB")[0] 250 grad_a1 = gradients.gradients(a1t, a1, name="GA")[0] 251 grad_a2 = gradients.gradients(a2t, a2, name="GB")[0] 270 g_r = gradients.gradients(r, x, name="GA")[0] 299 g_r = gradients.gradients(r, x, name="GA")[0]
|
/external/tensorflow/tensorflow/compiler/mlir/tensorflow/tests/graphdef2mlir/ |
D | graph-gradient-def.pbtxt | 47 name: "gradients/Shape" 66 name: "gradients/grad_ys_0" 89 name: "gradients/Fill" 91 input: "gradients/Shape" 92 input: "gradients/grad_ys_0" 109 name: "gradients/foo_grad/SymbolicGradient" 112 input: "gradients/Fill"
|