Home
last modified time | relevance | path

Searched refs:grads (Results 1 – 25 of 155) sorted by relevance

1234567

/external/tensorflow/tensorflow/python/ops/
Dgradients_test.py161 grads = gradients.gradients(z, [x])
162 self.assertTrue(all(x is not None for x in grads))
172 grads = gradients.gradients(z, [x, y])
173 self.assertTrue(all(x is not None for x in grads))
174 self.assertEqual(6.0, grads[0].eval())
182 grads = gradients.gradients(
186 self.assertTrue(all(x is not None for x in grads))
187 self.assertEqual(20.0, grads[0].eval())
188 self.assertEqual(10.0, grads[1].eval())
196 grads = gradients.gradients(
[all …]
Dcudnn_rnn_grad.py21 def _cudnn_rnn_backward(op, *grads): argument
34 output_backprop=grads[0],
35 output_h_backprop=grads[1],
36 output_c_backprop=grads[2],
73 def _cudnn_rnn_backwardv3(op, *grads): argument
87 output_backprop=grads[0],
88 output_h_backprop=grads[1],
89 output_c_backprop=grads[2],
Dgradients_util.py227 def _VerifyGeneratedGradients(grads, op): argument
243 if len(grads) != len(op.inputs):
564 grads = {}
568 _SetGrad(grads, y, grad_y)
586 _SetGrad(grads, y, loop_state.ZerosLikeForExit(y))
596 out_grads = _AggregatedGrads(grads, op, gradient_uid, loop_state,
732 _SetGrad(grads, t_in, in_grad)
737 _UpdatePendingAndEnqueueReady(grads, op, queue, pending_count, loop_state,
742 return [_GetGrad(grads, x, unconnected_gradients) for x in xs]
745 def _HasAnyNotNoneGrads(grads, op): argument
[all …]
/external/tensorflow/tensorflow/java/src/test/java/org/tensorflow/op/core/
DGradientsTest.java47 Gradients grads = Gradients.create(scope, y1, Arrays.asList(x, y0)); in createGradients() local
49 assertNotNull(grads); in createGradients()
50 assertNotNull(grads.dy()); in createGradients()
51 assertEquals(2, grads.dy().size()); in createGradients()
56 sess.runner().feed(x, c).fetch(grads.dy(0)).fetch(grads.dy(1)).run())) { in createGradients()
74 Gradients grads = Gradients.create(scope, Arrays.asList(y0, y1), Arrays.asList(x)); in createGradientsWithSum() local
76 assertNotNull(grads); in createGradientsWithSum()
77 assertNotNull(grads.dy()); in createGradientsWithSum()
78 assertEquals(1, grads.dy().size()); in createGradientsWithSum()
82 new TestUtil.AutoCloseableList<>(sess.runner().feed(x, c).fetch(grads.dy(0)).run())) { in createGradientsWithSum()
/external/tensorflow/tensorflow/python/debug/lib/
Ddebug_gradients_test.py59 grads = gradients_impl.gradients(y, [self.u, self.v])
60 self.assertEqual(2, len(grads))
61 u_grad = grads[0]
62 v_grad = grads[1]
90 grads = gradients_impl.gradients(y, [self.u, self.v])
91 self.assertEqual(2, len(grads))
92 u_grad = grads[0]
93 v_grad = grads[1]
201 grads = gradients_impl.gradients(y, [self.u, self.v])
202 self.assertEqual(2, len(grads))
[all …]
/external/tensorflow/tensorflow/core/kernels/image/
Dcrop_and_resize_op.cc366 const Tensor& grads = context->input(0); in ComputeAsync() local
375 OP_REQUIRES_ASYNC(context, grads.dims() == 4, in ComputeAsync()
377 grads.shape().DebugString()), in ComputeAsync()
379 const int crop_height = grads.dim_size(1); in ComputeAsync()
380 const int crop_width = grads.dim_size(2); in ComputeAsync()
388 context, grads.dim_size(0) == num_boxes, in ComputeAsync()
409 context, grads.dim_size(3) == depth, in ComputeAsync()
432 const Tensor& grads = context->input(0); in ComputeAsync() local
436 context, grads.tensor<float, 4>(), boxes.tensor<float, 2>(), in ComputeAsync()
459 typename TTypes<float, 4>::ConstTensor grads, in operator ()()
[all …]
Dcrop_and_resize_op_gpu.cu.cc401 typename TTypes<float, 4>::ConstTensor grads, in operator ()()
410 const int num_boxes = grads.dimension(0); in operator ()()
411 const int crop_height = grads.dimension(1); in operator ()()
412 const int crop_width = grads.dimension(2); in operator ()()
413 const int depth = grads.dimension(3); in operator ()()
441 grads.data(), boxes.data(), box_ind.data(), num_boxes, batch, in operator ()()
452 typename TTypes<float, 4>::ConstTensor grads, in operator ()()
461 const int num_boxes = grads.dimension(0); in operator ()()
462 const int crop_height = grads.dimension(1); in operator ()()
463 const int crop_width = grads.dimension(2); in operator ()()
[all …]
/external/tensorflow/tensorflow/python/training/experimental/
Dloss_scale.py85 def update(self, grads): argument
246 def update(self, grads): argument
247 del grads
257 def _is_all_finite(grads): argument
260 math_ops.reduce_all(math_ops.is_finite(g)) for g in grads if g is not None
365 def update(self, grads): argument
367 grads = nest.flatten(grads)
371 def get_is_finite(grads): argument
372 is_finite = _is_all_finite(grads)
378 get_is_finite, args=(grads,))
[all …]
Dloss_scale_optimizer.py123 grads = [g for g, _ in grads_and_vars]
125 unscaled_grads = self._unscale_grads(grads)
138 def _unscale_grads(self, grads): argument
143 for g in grads
213 grads = [g for g, _ in grads_and_vars]
214 loss_scale_update_op, should_apply_grads = (self._loss_scale.update(grads))
/external/tensorflow/tensorflow/python/keras/mixed_precision/
Dloss_scale_optimizer.py64 def _is_all_finite(grads): argument
67 math_ops.reduce_all(math_ops.is_finite(g)) for g in grads if g is not None
212 def update(self, grads): argument
226 grads = nest.flatten(grads)
231 _is_all_finite, args=(grads,))
238 is_finite = _is_all_finite(grads)
542 def get_unscaled_gradients(self, grads): argument
566 for g in grads
578 grads = [g for g, _ in grads_and_vars]
580 unscaled_grads = self.get_unscaled_gradients(grads)
[all …]
/external/tensorflow/tensorflow/python/keras/
Doptimizer_v1.py94 grads = backend.gradients(loss, params)
95 if any(g is None for g in grads):
102 grads = [clip_ops.clip_by_norm(g, self.clipnorm) for g in grads]
104 grads = [
106 for g in grads
108 return grads
192 grads = self.get_gradients(loss, params)
204 for p, g, m in zip(params, grads, moments):
266 grads = self.get_gradients(loss, params)
278 for p, g, a in zip(params, grads, accumulators):
[all …]
/external/tensorflow/tensorflow/python/distribute/
Dcustom_training_loop_gradient_test.py79 grads = tape.gradient(y, x)
80 return grads
106 grads = tape.gradient(y, x)
107 return grads
141 grads = distribution.experimental_local_results(train_step())
142 self.assertLen(grads, distribution.num_replicas_in_sync)
143 self.assertTrue(all(g is not None for g in grads))
/external/tensorflow/tensorflow/core/common_runtime/
Dgradients.cc102 static Node* AddSymGrad(Graph* g, Node* n, gtl::ArraySlice<NodeOut> grads) { in AddSymGrad() argument
105 CHECK_EQ(num_y, grads.size()); in AddSymGrad()
122 for (const NodeOut& nout : grads) { in AddSymGrad()
234 auto* grads = &iter->second; in BackpropAlongEdge() local
235 grads->push_back(dst_grad); in BackpropAlongEdge()
307 const auto& grads = iter->second; in SumGradients() local
308 if (grads.empty() || dtype == DT_BOOL) { in SumGradients()
313 if (grads.size() == 1) { in SumGradients()
315 return grads[0]; in SumGradients()
321 for (const NodeOut& nout : grads) { in SumGradients()
[all …]
/external/tensorflow/tensorflow/python/framework/experimental/
Dunified_api_test.py92 grads = tape.gradient(result, [a, b])
93 return grads
145 grads = tape.gradient(result, t)
146 return grads
195 grads = tape.gradient(result, a)
196 return grads
241 grads = tape.gradient(result, [a, b])
242 return grads
290 grads = tape.gradient(result, [a, b])
291 return grads
[all …]
/external/tensorflow/tensorflow/compiler/mlir/tensorflow/transforms/
Dtensor_array_ops_decomposition.cc189 llvm::StringMap<Value> grads; member
406 sit->getSecond().grads.try_emplace(grad.source().str(), Value()); in HandleTensorArrayGradV3Op()
526 const llvm::SmallDenseMap<int64_t, llvm::SmallVector<string, 4>>& grads, in ChangeFunctionInputSignature() argument
536 auto grad_it = grads.find(argnum); in ChangeFunctionInputSignature()
537 if (grad_it == grads.end()) continue; in ChangeFunctionInputSignature()
546 stat.grads = std::move(grads_map); in ChangeFunctionInputSignature()
561 auto grads = AccessedGradients({body, cond}, module); in HandleWhileOp() local
573 ChangeFunctionInputSignature(body, grads, ta_arg_buffer_type, in HandleWhileOp()
576 ChangeFunctionInputSignature(cond, grads, ta_arg_buffer_type, in HandleWhileOp()
595 for (const string& source : grads[i]) { in HandleWhileOp()
[all …]
/external/tensorflow/tensorflow/python/kernel_tests/array_ops/
Dgather_nd_op_test.py259 grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0]
262 assert np.array_equal(expected_grads, self.evaluate(grads))
271 grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0]
274 self.assertIndexedSlices(grads)
275 self.assertAllEqual(expected_grads, ops.convert_to_tensor(grads))
287 grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0]
291 self.assertAllEqual(expected_grads, self.evaluate(grads))
314 grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0]
321 self.assertAllEqual(expected_grads, self.evaluate(grads))
333 grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0]
[all …]
/external/tensorflow/tensorflow/python/eager/benchmarks/resnet50/
Dhvp_test.py35 grads = grad_tape.gradient(loss, model.trainable_variables)
36 return acc.jvp(grads)
56 grads = grad_tape.gradient(loss, variables)
57 helpers = tf.nest.map_structure(tf.ones_like, grads)
58 transposing = tf.gradients(grads, variables, helpers)
68 grads = inner_tape.gradient(loss, model.trainable_variables)
70 grads, model.trainable_variables, output_gradients=vector)
/external/tensorflow/tensorflow/python/kernel_tests/data_structures/
Ddynamic_stitch_op_test.py165 grads = gradients_impl.gradients(stitched_t, indices + data,
167 self.assertEqual(grads[:3], [None] * 3) # Indices have no gradients
168 for datum, grad in zip(data, self.evaluate(grads[3:])):
268 grads = gradients_impl.gradients(stitched_t, indices + data,
270 self.assertEqual(grads[:3], [None] * 3) # Indices have no gradients
271 for datum, grad in zip(data, self.evaluate(grads[3:])):
305 grads = gradients_impl.gradients(stitched_t, indices + data,
307 self.assertEqual(grads[:3], [None] * 3) # Indices have no gradients
308 for datum, grad in zip(data, self.evaluate(grads[3:])):
/external/tensorflow/tensorflow/python/eager/
Dbackprop.py605 def aggregate_indexed_slices_gradients(grads): argument
607 if len(grads) < 1:
609 if len(grads) == 1:
610 return grads[0]
611 grads = [g for g in grads if g is not None]
614 if any(isinstance(g, ops.Tensor) for g in grads):
615 return math_ops.add_n(grads)
620 grads = math_ops._as_indexed_slices_list(grads) # pylint: disable=protected-access
622 grads = [flatten_nested_indexed_slices(x) for x in grads]
625 array_ops.concat([x.values for x in grads], axis=0),
[all …]
/external/tensorflow/tensorflow/python/keras/optimizer_v2/
Dutils.py37 grads = [pair[0] for pair in filtered_grads_and_vars]
39 ds_reduce_util.ReduceOp.SUM, grads)
118 grads, variables = zip(*grads_and_vars)
119 clipped_grads, _ = clip_ops.clip_by_global_norm(grads, clipnorm)
/external/tensorflow/tensorflow/core/ops/compat/ops_history_v1/
DResizeBilinearGrad.pbtxt4 name: "grads"
37 name: "grads"
71 name: "grads"
/external/tensorflow/tensorflow/core/ops/compat/ops_history_v2/
DResizeBilinearGrad.pbtxt4 name: "grads"
37 name: "grads"
71 name: "grads"
DResizeNearestNeighborGrad.pbtxt4 name: "grads"
40 name: "grads"
83 name: "grads"
/external/tensorflow/tensorflow/python/kernel_tests/sparse_ops/
Dsparse_xent_op_test.py53 grads = gradients_impl.gradients([crossent_sum], [logits])[0]
55 return (crossent_sum, grads)
66 grads = gradients_impl.gradients([crossent_sum], [logits])[0]
68 return (crossent_sum, grads)
/external/tensorflow/tensorflow/python/training/
Dadadelta_test.py49 grads = constant_op.constant([grad, grad], dtype=dtype)
67 zip([grads, grads], [var0, var1]))
111 adadelta_opt.apply_gradients(zip([grads, grads], [var0, var1]))

1234567