/external/tensorflow/tensorflow/python/training/experimental/ |
D | loss_scaling_gradient_tape.py | 174 grads = replica_context.merge_call( 181 return grads 256 def cond(grads, ready_to_update, is_first_iteration): argument 258 del grads 270 def body(grads, ready_to_update, is_first_iteration): argument 272 del grads, ready_to_update, is_first_iteration 286 grads = [] # The unscaled gradients 295 grads.append(g * math_ops.cast(inv_loss_scale, g.dtype)) 301 grads.append(initial_grad) 302 return grads [all …]
|
D | loss_scale.py | 102 def update(self, grads): argument 270 def update(self, grads): argument 271 del grads 281 def _is_all_finite(grads): argument 284 math_ops.reduce_all(math_ops.is_finite(g)) for g in grads if g is not None 399 def update(self, grads): argument 401 grads = nest.flatten(grads) 405 def get_is_finite(grads): argument 406 is_finite = _is_all_finite(grads) 412 get_is_finite, args=(grads,)) [all …]
|
D | loss_scale_optimizer.py | 127 grads = [g for g, _ in grads_and_vars] 129 unscaled_grads = self._unscale_grads(grads) 142 def _unscale_grads(self, grads): argument 147 for g in grads 216 grads = [g for g, _ in grads_and_vars] 217 loss_scale_update_op, should_apply_grads = (self._loss_scale.update(grads))
|
/external/tensorflow/tensorflow/python/keras/distribute/ |
D | custom_training_loop_optimizer_test.py | 58 grads = values.PerReplica([ 63 def step_fn(grads): argument 65 [(grads, v)], 70 distribution.run(step_fn, args=(grads,))) 88 grads = ops.convert_to_tensor_v2_with_dispatch([1., 1.]) 90 def step_fn(grads): argument 92 [(grads, v)], 97 distribution.run(step_fn, args=(grads,))) 110 grads = ops.convert_to_tensor_v2_with_dispatch([1., 1.]) 112 def step_fn(grads): argument [all …]
|
D | custom_training_loop_models_test.py | 82 grads = tape.gradient(loss, model.variables) 83 return grads 107 grads = tape.gradient(loss, model.variables) 108 optimizer.apply_gradients(zip(grads, model.variables)) 145 grads = tape.gradient(loss, model.variables) 146 optimizer.apply_gradients(zip(grads, model.variables)) 170 grads = tape.gradient(loss, model.variables) 171 optimizer.apply_gradients(zip(grads, model.variables)) 202 grads = tape.gradient(loss, model.variables) 203 optimizer.apply_gradients(zip(grads, model.variables)) [all …]
|
/external/tensorflow/tensorflow/python/ops/ |
D | gradients_test.py | 162 grads = gradients.gradients(z, [x]) 163 self.assertTrue(all(x is not None for x in grads)) 173 grads = gradients.gradients(z, [x, y]) 174 self.assertTrue(all(x is not None for x in grads)) 175 self.assertEqual(6.0, grads[0].eval()) 183 grads = gradients.gradients( 187 self.assertTrue(all(x is not None for x in grads)) 188 self.assertEqual(20.0, grads[0].eval()) 189 self.assertEqual(10.0, grads[1].eval()) 197 grads = gradients.gradients( [all …]
|
D | cudnn_rnn_grad.py | 25 def _cudnn_rnn_backward(op, *grads): argument 38 output_backprop=grads[0], 39 output_h_backprop=grads[1], 40 output_c_backprop=grads[2], 77 def _cudnn_rnn_backwardv3(op, *grads): argument 92 output_backprop=grads[0], 93 output_h_backprop=grads[1], 94 output_c_backprop=grads[2],
|
D | gradients_util.py | 238 def _VerifyGeneratedGradients(grads, op): argument 254 if len(grads) != len(op.inputs): 256 "inputs %d" % (len(grads), op.node_def, len(op.inputs))) 561 grads = {} 565 _SetGrad(grads, y, grad_y) 583 _SetGrad(grads, y, loop_state.ZerosLikeForExit(y)) 593 out_grads = _AggregatedGrads(grads, op, gradient_uid, loop_state, 721 _SetGrad(grads, t_in, in_grad) 726 _UpdatePendingAndEnqueueReady(grads, op, queue, pending_count, loop_state, 731 return [_GetGrad(grads, x, unconnected_gradients) for x in xs] [all …]
|
/external/tensorflow/tensorflow/python/keras/integration_test/ |
D | gradients_test.py | 55 grads = tape.gradient(out, vars_to_grad) 57 return grads_re, grads 66 grads_re, grads = self._TestVariablesGradient(test_input, test_model, 70 grads = self.evaluate(grads) 71 for g, g_re in zip(grads, grads_re): 74 grads_re, grads = self._TestVariablesGradient(test_input, test_model, 78 grads = self.evaluate(grads) 79 for g, g_re in zip(grads, grads_re):
|
D | gradient_checkpoint_test.py | 108 grads = tape.gradient(loss, tr_vars) # tr_vars 109 optimizer.apply_gradients(zip(grads, tr_vars)) 110 del grads 138 grads = tape.gradient(loss, tr_vars) # tr_vars 139 optimizer.apply_gradients(zip(grads, tr_vars)) 140 del grads
|
/external/tensorflow/tensorflow/java/src/test/java/org/tensorflow/op/core/ |
D | GradientsTest.java | 47 Gradients grads = Gradients.create(scope, y1, Arrays.asList(x, y0)); in createGradients() local 49 assertNotNull(grads); in createGradients() 50 assertNotNull(grads.dy()); in createGradients() 51 assertEquals(2, grads.dy().size()); in createGradients() 56 sess.runner().feed(x, c).fetch(grads.dy(0)).fetch(grads.dy(1)).run())) { in createGradients() 74 Gradients grads = Gradients.create(scope, Arrays.asList(y0, y1), Arrays.asList(x)); in createGradientsWithSum() local 76 assertNotNull(grads); in createGradientsWithSum() 77 assertNotNull(grads.dy()); in createGradientsWithSum() 78 assertEquals(1, grads.dy().size()); in createGradientsWithSum() 82 new TestUtil.AutoCloseableList<>(sess.runner().feed(x, c).fetch(grads.dy(0)).run())) { in createGradientsWithSum()
|
/external/tensorflow/tensorflow/python/debug/lib/ |
D | debug_gradients_test.py | 63 grads = gradients_impl.gradients(y, [self.u, self.v]) 64 self.assertEqual(2, len(grads)) 65 u_grad = grads[0] 66 v_grad = grads[1] 94 grads = gradients_impl.gradients(y, [self.u, self.v]) 95 self.assertEqual(2, len(grads)) 96 u_grad = grads[0] 97 v_grad = grads[1] 205 grads = gradients_impl.gradients(y, [self.u, self.v]) 206 self.assertEqual(2, len(grads)) [all …]
|
/external/tensorflow/tensorflow/core/kernels/image/ |
D | crop_and_resize_op.cc | 364 const Tensor& grads = context->input(0); in ComputeAsync() local 373 OP_REQUIRES_ASYNC(context, grads.dims() == 4, in ComputeAsync() 375 grads.shape().DebugString()), in ComputeAsync() 377 const int crop_height = grads.dim_size(1); in ComputeAsync() 378 const int crop_width = grads.dim_size(2); in ComputeAsync() 386 context, grads.dim_size(0) == num_boxes, in ComputeAsync() 407 context, grads.dim_size(3) == depth, in ComputeAsync() 420 const Tensor& grads = context->input(0); in ComputeAsync() local 424 context, grads.tensor<float, 4>(), boxes.tensor<float, 2>(), in ComputeAsync() 447 typename TTypes<float, 4>::ConstTensor grads, in operator ()() [all …]
|
D | crop_and_resize_op_gpu.cu.cc | 390 typename TTypes<float, 4>::ConstTensor grads, in operator ()() 399 const int num_boxes = grads.dimension(0); in operator ()() 400 const int crop_height = grads.dimension(1); in operator ()() 401 const int crop_width = grads.dimension(2); in operator ()() 402 const int depth = grads.dimension(3); in operator ()() 430 grads.data(), boxes.data(), box_ind.data(), num_boxes, batch, in operator ()() 441 typename TTypes<float, 4>::ConstTensor grads, in operator ()() 450 const int num_boxes = grads.dimension(0); in operator ()() 451 const int crop_height = grads.dimension(1); in operator ()() 452 const int crop_width = grads.dimension(2); in operator ()() [all …]
|
/external/tensorflow/tensorflow/python/keras/ |
D | optimizer_v1.py | 99 grads = K.gradients(loss, params) 100 if any(g is None for g in grads): 107 grads = [clip_ops.clip_by_norm(g, self.clipnorm) for g in grads] 109 grads = [ 111 for g in grads 113 return grads 197 grads = self.get_gradients(loss, params) 208 for p, g, m in zip(params, grads, moments): 267 grads = self.get_gradients(loss, params) 278 for p, g, a in zip(params, grads, accumulators): [all …]
|
/external/tensorflow/tensorflow/python/distribute/ |
D | custom_training_loop_gradient_test.py | 83 grads = tape.gradient(y, x) 84 return grads 110 grads = tape.gradient(y, x) 111 return grads 145 grads = distribution.experimental_local_results(train_step()) 146 self.assertLen(grads, distribution.num_replicas_in_sync) 147 self.assertTrue(all(g is not None for g in grads))
|
/external/tensorflow/tensorflow/core/common_runtime/ |
D | gradients.cc | 102 static Node* AddSymGrad(Graph* g, Node* n, gtl::ArraySlice<NodeOut> grads) { in AddSymGrad() argument 105 CHECK_EQ(num_y, grads.size()); in AddSymGrad() 122 for (const NodeOut& nout : grads) { in AddSymGrad() 234 auto* grads = &iter->second; in BackpropAlongEdge() local 235 grads->push_back(dst_grad); in BackpropAlongEdge() 307 const auto& grads = iter->second; in SumGradients() local 308 if (grads.empty() || dtype == DT_BOOL) { in SumGradients() 313 if (grads.size() == 1) { in SumGradients() 315 return grads[0]; in SumGradients() 321 for (const NodeOut& nout : grads) { in SumGradients() [all …]
|
/external/tensorflow/tensorflow/python/framework/experimental/ |
D | unified_api_test.py | 95 grads = tape.gradient(result, [a, b]) 96 return grads 148 grads = tape.gradient(result, t) 149 return grads 198 grads = tape.gradient(result, a) 199 return grads 244 grads = tape.gradient(result, [a, b]) 245 return grads 293 grads = tape.gradient(result, [a, b]) 294 return grads [all …]
|
/external/tensorflow/tensorflow/compiler/mlir/tensorflow/transforms/ |
D | tensor_array_ops_decomposition.cc | 190 llvm::StringMap<Value> grads; member 407 sit->getSecond().grads.try_emplace(grad.source().str(), Value()); in HandleTensorArrayGradV3Op() 527 const llvm::SmallDenseMap<int64_t, llvm::SmallVector<string, 4>>& grads, in ChangeFunctionInputSignature() argument 537 auto grad_it = grads.find(argnum); in ChangeFunctionInputSignature() 538 if (grad_it == grads.end()) continue; in ChangeFunctionInputSignature() 547 stat.grads = std::move(grads_map); in ChangeFunctionInputSignature() 562 auto grads = AccessedGradients({body, cond}, module); in HandleWhileOp() local 574 ChangeFunctionInputSignature(body, grads, ta_arg_buffer_type, in HandleWhileOp() 577 ChangeFunctionInputSignature(cond, grads, ta_arg_buffer_type, in HandleWhileOp() 596 for (const string& source : grads[i]) { in HandleWhileOp() [all …]
|
/external/tensorflow/tensorflow/python/keras/mixed_precision/ |
D | loss_scale_optimizer.py | 183 def _is_all_finite(grads): argument 186 math_ops.reduce_all(math_ops.is_finite(g)) for g in grads if g is not None 328 def update(self, grads): argument 342 grads = nest.flatten(grads) 346 def get_is_finite(grads): argument 347 is_finite = _is_all_finite(grads) 353 get_is_finite, args=(grads,)) 359 is_finite = _is_all_finite(grads) 650 def get_unscaled_gradients(self, grads): argument 674 for g in grads [all …]
|
/external/tensorflow/tensorflow/python/eager/benchmarks/resnet50/ |
D | hvp_test.py | 40 grads = grad_tape.gradient(loss, model.trainable_variables) 41 return acc.jvp(grads) 61 grads = grad_tape.gradient(loss, variables) 62 helpers = tf.nest.map_structure(tf.ones_like, grads) 63 transposing = tf.gradients(grads, variables, helpers) 73 grads = inner_tape.gradient(loss, model.trainable_variables) 75 grads, model.trainable_variables, output_gradients=vector)
|
/external/tensorflow/tensorflow/python/kernel_tests/ |
D | gather_nd_op_test.py | 262 grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0] 265 assert np.array_equal(expected_grads, self.evaluate(grads)) 274 grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0] 277 self.assertIndexedSlices(grads) 278 self.assertAllEqual(expected_grads, ops.convert_to_tensor(grads)) 290 grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0] 294 self.assertAllEqual(expected_grads, self.evaluate(grads)) 317 grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0] 324 self.assertAllEqual(expected_grads, self.evaluate(grads)) 336 grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0] [all …]
|
D | dynamic_stitch_op_test.py | 169 grads = gradients_impl.gradients(stitched_t, indices + data, 171 self.assertEqual(grads[:3], [None] * 3) # Indices have no gradients 172 for datum, grad in zip(data, self.evaluate(grads[3:])): 272 grads = gradients_impl.gradients(stitched_t, indices + data, 274 self.assertEqual(grads[:3], [None] * 3) # Indices have no gradients 275 for datum, grad in zip(data, self.evaluate(grads[3:])): 309 grads = gradients_impl.gradients(stitched_t, indices + data, 311 self.assertEqual(grads[:3], [None] * 3) # Indices have no gradients 312 for datum, grad in zip(data, self.evaluate(grads[3:])):
|
/external/tensorflow/tensorflow/python/eager/ |
D | backprop.py | 598 def aggregate_indexed_slices_gradients(grads): argument 600 if len(grads) < 1: 602 if len(grads) == 1: 603 return grads[0] 604 grads = [g for g in grads if g is not None] 607 if any(isinstance(g, ops.Tensor) for g in grads): 608 return math_ops.add_n(grads) 613 grads = math_ops._as_indexed_slices_list(grads) # pylint: disable=protected-access 615 grads = [flatten_nested_indexed_slices(x) for x in grads] 618 array_ops.concat([x.values for x in grads], axis=0), [all …]
|
/external/tensorflow/tensorflow/core/ops/compat/ops_history_v2/ |
D | ResizeBilinearGrad.pbtxt | 4 name: "grads" 37 name: "grads" 71 name: "grads"
|