/external/tensorflow/tensorflow/contrib/boosted_trees/python/kernel_tests/ |
D | stats_accumulator_ops_test.py | 47 num_updates, partition, bucket_ids, grads, hessians = accumulator.flush( 49 num_updates, partition, bucket_ids, grads, hessians = sess.run( 50 [num_updates, partition, bucket_ids, grads, hessians]) 52 result = _AccumulatorResultToDict(partition, bucket_ids, grads, hessians) 75 num_updates, partition, bucket_ids, grads, hessians = accumulator.flush( 77 num_updates, partition, bucket_ids, grads, hessians = sess.run( 78 [num_updates, partition, bucket_ids, grads, hessians]) 80 result = _AccumulatorResultToDict(partition, bucket_ids, grads, hessians) 109 num_updates, partition, feature, grads, hessians = accumulator.flush( 111 num_updates, partition, feature, grads, hessians = sess.run( [all …]
|
/external/tensorflow/tensorflow/python/ops/ |
D | gradients_util.py | 301 def _VerifyGeneratedGradients(grads, op): argument 316 if len(grads) != len(op.inputs): 318 "inputs %d" % (len(grads), op.node_def, len(op.inputs))) 626 grads = {} 630 _SetGrad(grads, y, grad_y) 648 _SetGrad(grads, y, loop_state.ZerosLikeForExit(y)) 658 out_grads = _AggregatedGrads(grads, op, gradient_uid, loop_state, 768 _SetGrad(grads, t_in, in_grad) 773 _UpdatePendingAndEnqueueReady(grads, op, queue, pending_count, loop_state, 778 return [_GetGrad(grads, x, unconnected_gradients) for x in xs] [all …]
|
D | cudnn_rnn_grad.py | 25 def _cudnn_rnn_backward(op, *grads): argument 38 output_backprop=grads[0], 39 output_h_backprop=grads[1], 40 output_c_backprop=grads[2], 77 def _cudnn_rnn_backwardv3(op, *grads): argument 92 output_backprop=grads[0], 93 output_h_backprop=grads[1], 94 output_c_backprop=grads[2],
|
D | gradients_test.py | 159 grads = gradients.gradients(z, [x]) 160 self.assertTrue(all(x is not None for x in grads)) 170 grads = gradients.gradients(z, [x, y]) 171 self.assertTrue(all(x is not None for x in grads)) 172 self.assertEqual(6.0, grads[0].eval()) 180 grads = gradients.gradients( 184 self.assertTrue(all(x is not None for x in grads)) 185 self.assertEqual(20.0, grads[0].eval()) 186 self.assertEqual(10.0, grads[1].eval()) 194 grads = gradients.gradients( [all …]
|
D | while_v2.py | 248 def _WhileGrad(op, *grads): # pylint: disable=invalid-name argument 262 grads = [_preprocess_grad(grad, body_out, while_out) 264 in zip(grads, body_graph.outputs, while_op.outputs)] 270 body_graph.outputs, body_graph.inputs, grads) if grad is not None]) 303 grads, body_grad_graph, loop_vars, while_op.inputs) 331 return _get_structured_grad_output(outputs, grads, body_grad_graph) 423 def _create_grad_func(ys, xs, grads, cond_graph, body_graph, name, while_op, argument 443 assert len(ys) == len(grads) 449 args = [counter, maximum_iterations, total_iters] + list(grads) 555 def _get_structured_grad_output(outputs, grads, body_grad_graph): argument [all …]
|
D | cond_v2.py | 90 def _IfGrad(op, *grads): # pylint: disable=invalid-name argument 104 true_graph, grads, util.unique_grad_fn_name(true_graph.name)) 106 false_graph, grads, util.unique_grad_fn_name(false_graph.name)) 260 def _grad_fn(func_graph, grads): argument 278 assert len(func_graph.outputs) == len(grads) 281 for y, grad_y in zip(func_graph.outputs, grads): 309 def _create_grad_func(func_graph, grads, name): argument 313 lambda: _grad_fn(func_graph, grads), [], {},
|
/external/tensorflow/tensorflow/java/src/test/java/org/tensorflow/op/core/ |
D | GradientsTest.java | 47 Gradients grads = Gradients.create(scope, y1, Arrays.asList(x, y0)); in createGradients() local 49 assertNotNull(grads); in createGradients() 50 assertNotNull(grads.dy()); in createGradients() 51 assertEquals(2, grads.dy().size()); in createGradients() 56 sess.runner().feed(x, c).fetch(grads.dy(0)).fetch(grads.dy(1)).run())) { in createGradients() 74 Gradients grads = Gradients.create(scope, Arrays.asList(y0, y1), Arrays.asList(x)); in createGradientsWithSum() local 76 assertNotNull(grads); in createGradientsWithSum() 77 assertNotNull(grads.dy()); in createGradientsWithSum() 78 assertEquals(1, grads.dy().size()); in createGradientsWithSum() 82 new TestUtil.AutoCloseableList<>(sess.runner().feed(x, c).fetch(grads.dy(0)).run())) { in createGradientsWithSum()
|
/external/tensorflow/tensorflow/core/kernels/ |
D | crop_and_resize_op.cc | 346 const Tensor& grads = context->input(0); in ComputeAsync() local 355 OP_REQUIRES_ASYNC(context, grads.dims() == 4, in ComputeAsync() 357 grads.shape().DebugString()), in ComputeAsync() 359 const int crop_height = grads.dim_size(1); in ComputeAsync() 360 const int crop_width = grads.dim_size(2); in ComputeAsync() 368 context, grads.dim_size(0) == num_boxes, in ComputeAsync() 389 context, grads.dim_size(3) == depth, in ComputeAsync() 402 const Tensor& grads = context->input(0); in ComputeAsync() local 406 context->eigen_device<Device>(), grads.tensor<float, 4>(), in ComputeAsync() 429 typename TTypes<float, 4>::ConstTensor grads, in operator ()() [all …]
|
D | crop_and_resize_op_gpu.cu.cc | 388 typename TTypes<float, 4>::ConstTensor grads, in operator ()() 397 const int num_boxes = grads.dimension(0); in operator ()() 398 const int crop_height = grads.dimension(1); in operator ()() 399 const int crop_width = grads.dimension(2); in operator ()() 400 const int depth = grads.dimension(3); in operator ()() 427 grads.data(), boxes.data(), box_ind.data(), num_boxes, batch, in operator ()() 438 typename TTypes<float, 4>::ConstTensor grads, in operator ()() 447 const int num_boxes = grads.dimension(0); in operator ()() 448 const int crop_height = grads.dimension(1); in operator ()() 449 const int crop_width = grads.dimension(2); in operator ()() [all …]
|
/external/tensorflow/tensorflow/python/debug/lib/ |
D | debug_gradients_test.py | 63 grads = gradients_impl.gradients(y, [self.u, self.v]) 64 self.assertEqual(2, len(grads)) 65 u_grad = grads[0] 66 v_grad = grads[1] 94 grads = gradients_impl.gradients(y, [self.u, self.v]) 95 self.assertEqual(2, len(grads)) 96 u_grad = grads[0] 97 v_grad = grads[1] 205 grads = gradients_impl.gradients(y, [self.u, self.v]) 206 self.assertEqual(2, len(grads)) [all …]
|
/external/tensorflow/tensorflow/contrib/eager/python/examples/revnet/ |
D | revnet_test.py | 35 grads, loss = model.compute_gradients( 38 zip(grads, model.trainable_variables), global_step=global_step) 79 def _check_grad_angle_combined(self, grads, grads_true): argument 93 g1_all = tf.concat(_combine(grads), axis=0) 105 grads, loss = self.model.compute_gradients( 108 self.assertTrue(isinstance(grads, list)) 110 self.assertEqual(len(grads), len(vars_)) 111 for grad, var in zip(grads, vars_): 120 self.assertAllClose(grads, grads_true, rtol=1e-4, atol=1e-4) 121 self._check_grad_angle_combined(grads, grads_true) [all …]
|
D | blocks_test.py | 119 def _check_grad_angle(self, grads, grads_true, atol=1e0): argument 121 for g1, g2 in zip(grads, grads_true): 155 grads = tape.gradient(y, [x] + vars_, output_gradients=dy) 156 dx_true, dw_true = grads[0], grads[1:] 184 grads = tape.gradient(y, [x] + vars_, output_gradients=dy) 185 dx_true, dw_true = grads[0], grads[1:] 259 grads = tape.gradient( 261 dx_true, dw_true = grads[0], grads[1:]
|
/external/tensorflow/tensorflow/python/keras/mixed_precision/experimental/ |
D | loss_scale_optimizer.py | 76 grads = [g for g, _ in grads_and_vars] 78 scaled_grads = self._scale_grads(grads) 83 grads = self._optimizer.get_gradients(loss, params) 84 return self._scale_grads(grads) 93 def _scale_grads(self, grads): argument 95 return [None if g is None else g * loss_scale_reciprocal for g in grads]
|
/external/tensorflow/tensorflow/python/keras/ |
D | optimizers.py | 92 grads = K.gradients(loss, params) 93 if None in grads: 100 grads = [clip_ops.clip_by_norm(g, self.clipnorm) for g in grads] 102 grads = [ 104 for g in grads 106 return grads 186 grads = self.get_gradients(loss, params) 198 for p, g, m in zip(params, grads, moments): 256 grads = self.get_gradients(loss, params) 267 for p, g, a in zip(params, grads, accumulators): [all …]
|
/external/tensorflow/tensorflow/core/graph/ |
D | gradients.cc | 101 static Node* AddSymGrad(Graph* g, Node* n, gtl::ArraySlice<NodeOut> grads) { in AddSymGrad() argument 104 CHECK_EQ(num_y, grads.size()); in AddSymGrad() 121 for (const NodeOut& nout : grads) { in AddSymGrad() 230 auto* grads = &iter->second; in BackpropAlongEdge() local 231 grads->push_back(dst_grad); in BackpropAlongEdge() 298 const auto& grads = iter->second; in SumGradients() local 299 if (grads.empty()) { in SumGradients() 304 if (grads.size() == 1) { in SumGradients() 306 return grads[0]; in SumGradients() 312 for (const NodeOut& nout : grads) { in SumGradients() [all …]
|
/external/tensorflow/tensorflow/python/kernel_tests/ |
D | gather_nd_op_test.py | 258 grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0] 261 assert np.array_equal(expected_grads, self.evaluate(grads)) 270 grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0] 273 self.assertIndexedSlices(grads) 274 self.assertAllEqual(expected_grads, ops.convert_to_tensor(grads).eval()) 286 grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0] 290 self.assertAllEqual(expected_grads, self.evaluate(grads)) 313 grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0] 320 self.assertAllEqual(expected_grads, self.evaluate(grads)) 332 grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0] [all …]
|
D | dynamic_stitch_op_test.py | 154 grads = gradients_impl.gradients(stitched_t, indices + data, 156 self.assertEqual(grads[:3], [None] * 3) # Indices have no gradients 157 for datum, grad in zip(data, self.evaluate(grads[3:])): 257 grads = gradients_impl.gradients(stitched_t, indices + data, 259 self.assertEqual(grads[:3], [None] * 3) # Indices have no gradients 260 for datum, grad in zip(data, self.evaluate(grads[3:])): 294 grads = gradients_impl.gradients(stitched_t, indices + data, 296 self.assertEqual(grads[:3], [None] * 3) # Indices have no gradients 297 for datum, grad in zip(data, self.evaluate(grads[3:])):
|
D | cond_v2_test.py | 429 grads = nesting_fn() 431 return grads, pred_outer, pred_inner 434 grads, pred_outer, pred_inner = build_graph() 437 sess.run(grads, { 442 sess.run(grads, { 447 sess.run(grads, { 452 sess.run(grads, { 492 grads = nesting_fn() 494 return grads, pred_outer, pred_inner 497 grads, pred_outer, pred_inner = build_graph() [all …]
|
/external/tensorflow/tensorflow/contrib/layers/python/layers/ |
D | rev_block_lib.py | 69 for grads in zip(*lists_of_grads): 70 grads = [g for g in grads if g is not None] 71 if grads: 72 acc_grads.append(math_ops.add_n(grads)) 321 for idxs, grads in list(zip(f_vars_idxs, f_var_grads)) + list( 323 for i, grad in zip(idxs, grads): 563 grads = gradients_impl.gradients(outputs, inputs + variables, 568 grads = _tuple_with_data_dep(grads) 570 grads = control_flow_ops.tuple(grads) 572 grad_inputs = grads[:len(inputs)] [all …]
|
D | rev_block_lib_test.py | 152 grads = gradients_impl.gradients(loss, wrt) 156 y_val, yd_val, gd_val, g_val = sess.run([y, y_rev, grads_rev, grads]) 301 for grads in zip(all_grads_val): 302 current = grads[0] 303 for g in grads[1:]: 343 grads = gradients_impl.gradients(out, [inputs] + tvars) 344 for grad in grads: 391 grads = gradients_impl.gradients(layer_list[-1], layer_list[0]) 393 sess.run(grads)
|
/external/tensorflow/tensorflow/python/keras/optimizer_v2/ |
D | optimizer_v2.py | 331 grads = tape.gradient(loss_value, var_list, grad_loss) 334 grads = [clip_ops.clip_by_norm(g, self.clipnorm) for g in grads] 336 grads = [ 338 for g in grads 341 grads_and_vars = list(zip(grads, var_list)) 364 grads = gradients.gradients(loss, params) 365 if None in grads: 372 grads = [clip_ops.clip_by_norm(g, self.clipnorm) for g in grads] 374 grads = [ 376 for g in grads [all …]
|
D | adadelta_test.py | 53 grads = constant_op.constant([grad, grad], dtype=dtype) 71 zip([grads, grads], [var0, var1])) 100 adadelta_opt.apply_gradients(zip([grads, grads], [var0, var1]))
|
/external/tensorflow/tensorflow/contrib/training/python/training/ |
D | training.py | 441 grads = optimizer.compute_gradients( 449 grads = transform_grads_fn(grads) 454 add_gradients_summaries(grads) 457 grad_updates = optimizer.apply_gradients(grads, global_step=global_step)
|
/external/tensorflow/tensorflow/python/training/ |
D | adadelta_test.py | 53 grads = constant_op.constant([grad, grad], dtype=dtype) 71 zip([grads, grads], [var0, var1])) 115 adadelta_opt.apply_gradients(zip([grads, grads], [var0, var1]))
|
/external/tensorflow/tensorflow/contrib/slim/python/slim/ |
D | learning.py | 417 def transform_grads_fn(grads): argument 420 grads = multiply_gradients(grads, gradient_multipliers) 425 grads = clip_gradient_norms(grads, clip_gradient_norm) 426 return grads
|