/external/llvm-project/lldb/source/Symbol/ |
D | VariableList.cpp | 103 size_t VariableList::AppendVariablesIfUnique(VariableList &var_list) { in AppendVariablesIfUnique() argument 104 const size_t initial_size = var_list.GetSize(); in AppendVariablesIfUnique() 107 var_list.AddVariableIfUnique(*pos); in AppendVariablesIfUnique() 108 return var_list.GetSize() - initial_size; in AppendVariablesIfUnique() 112 VariableList &var_list, in AppendVariablesIfUnique() argument 114 const size_t initial_size = var_list.GetSize(); in AppendVariablesIfUnique() 121 var_list.AddVariableIfUnique(*pos); in AppendVariablesIfUnique() 125 return var_list.GetSize() - initial_size; in AppendVariablesIfUnique() 129 VariableList &var_list, in AppendVariablesWithScope() argument 131 const size_t initial_size = var_list.GetSize(); in AppendVariablesWithScope() [all …]
|
/external/tensorflow/tensorflow/python/training/ |
D | optimizer.py | 363 def minimize(self, loss, global_step=None, var_list=None, argument 408 loss, var_list=var_list, gate_gradients=gate_gradients, 423 def compute_gradients(self, loss, var_list=None, argument 468 if var_list is not None: 469 tape.watch(var_list) 478 if var_list is None: 479 var_list = tape.watched_variables() 483 grads = tape.gradient(loss_value, var_list, grad_loss) 484 return list(zip(grads, var_list)) 503 if var_list is None: [all …]
|
D | moving_averages.py | 403 def apply(self, var_list=None): argument 435 if var_list is None: 436 var_list = variables.trainable_variables() 437 for v in var_list: 443 for var in var_list: 488 for var in var_list:
|
/external/tensorflow/tensorflow/python/keras/mixed_precision/ |
D | loss_scale_benchmark.py | 98 var_list = [ 102 return math_ops.add_n(var_list) 109 grads = tape.gradient(loss, var_list) 110 return opt.apply_gradients(zip(grads, var_list)) 116 scaled_grads = tape.gradient(scaled_loss, var_list) 118 return opt.apply_gradients(zip(grads, var_list)) 122 return opt.minimize(get_loss, var_list)
|
/external/tensorflow/tensorflow/python/keras/optimizer_v2/ |
D | optimizer_v2.py | 474 def _get_gradients(self, tape, loss, var_list, grad_loss=None): argument 476 grads = tape.gradient(loss, var_list, grad_loss) 477 return list(zip(grads, var_list)) 500 def minimize(self, loss, var_list, grad_loss=None, name=None, tape=None): argument 532 loss, var_list=var_list, grad_loss=grad_loss, tape=tape) 535 def _compute_gradients(self, loss, var_list, grad_loss=None, tape=None): argument 572 if not callable(var_list): 573 tape.watch(var_list) 575 if callable(var_list): 576 var_list = var_list() [all …]
|
D | nadam.py | 90 def _create_slots(self, var_list): argument 91 var_dtype = var_list[0].dtype.base_dtype 102 for var in var_list: 105 for var in var_list: 145 def _prepare(self, var_list): argument 148 return super(Nadam, self)._prepare(var_list)
|
D | adam.py | 123 def _create_slots(self, var_list): argument 126 for var in var_list: 128 for var in var_list: 131 for var in var_list: 375 def _create_slots(self, var_list): argument 378 for var in var_list: 380 for var in var_list: 383 for var in var_list:
|
D | adadelta.py | 93 def _create_slots(self, var_list): argument 95 for v in var_list: 97 for v in var_list:
|
D | adamax.py | 106 def _create_slots(self, var_list): argument 108 for var in var_list: 110 for var in var_list:
|
D | rmsprop.py | 153 def _create_slots(self, var_list): argument 154 for var in var_list: 157 for var in var_list: 160 for var in var_list:
|
/external/tensorflow/tensorflow/python/framework/ |
D | meta_graph_test.py | 90 meta_graph_def, var_list = meta_graph.export_scoped_meta_graph( 99 self.assertEqual({}, var_list) 330 var_list = graph2.get_collection(ops.GraphKeys.METRIC_VARIABLES) 331 self.assertEqual(len(var_list), 1) 332 v2 = var_list[0] 403 orig_meta_graph1, var_list = meta_graph.export_scoped_meta_graph( 407 self.assertEqual(["biases:0", "weights:0"], sorted(var_list.keys())) 408 var_names = [v.name for _, v in var_list.items()] 447 var_list = meta_graph.import_scoped_meta_graph( 453 self.assertEqual(["biases:0", "weights:0"], sorted(var_list.keys())) [all …]
|
D | meta_graph.py | 899 var_list = {} 903 var_list[ops.strip_name_scope(v.name, scope_to_prepend_to_names)] = v 905 return var_list, imported_return_elements 1026 var_list = {} 1031 var_list[ops.strip_name_scope(v.name, export_scope)] = v 1070 return scoped_meta_graph_def, var_list 1099 orig_meta_graph, var_list = export_scoped_meta_graph( 1101 var_list = import_scoped_meta_graph(orig_meta_graph, 1104 return var_list
|
/external/llvm-project/lldb/include/lldb/Symbol/ |
D | VariableList.h | 50 size_t AppendVariablesIfUnique(VariableList &var_list); 58 VariableList &var_list, size_t &total_matches); 60 size_t AppendVariablesWithScope(lldb::ValueType type, VariableList &var_list,
|
/external/tensorflow/tensorflow/python/ops/ |
D | variables.py | 3251 def variables_initializer(var_list, name="init"): argument 3271 if var_list and not context.executing_eagerly(): 3272 return control_flow_ops.group(*[v.initializer for v in var_list], name=name) 3279 def initialize_variables(var_list, name="init"): argument 3281 return variables_initializer(var_list, name=name) 3345 def assert_variables_initialized(var_list=None): argument 3365 if var_list is None: 3366 var_list = global_variables() + local_variables() 3368 if not var_list: 3369 var_list = [] [all …]
|
/external/tensorflow/tensorflow/python/tpu/ |
D | tpu_optimizer.py | 111 def compute_gradients(self, loss, var_list=None, **kwargs): argument 161 return self._opt.compute_gradients(loss, var_list=var_list, **kwargs)
|
/external/mesa3d/src/gallium/drivers/r300/compiler/ |
D | radeon_variable.h | 83 struct rc_list * var_list, 88 struct rc_list * var_list,
|
/external/python/uritemplates/uritemplate/ |
D | variable.py | 87 var_list = self.original 90 var_list = self.original[1:] 95 var_list = var_list.split(',') 97 for var in var_list:
|
/external/tensorflow/tensorflow/python/training/experimental/ |
D | loss_scale_optimizer_test.py | 128 return lambda: opt.minimize(loss, var_list=[var]) 215 run_fn = lambda: opt.minimize(loss, var_list=[var]) 226 run_fn = lambda: opt.minimize(loss, var_list=[var]) 249 run_fn = lambda: opt.minimize(loss, var_list=[var]) 287 run_fn = lambda: opt.minimize(lambda: var + 1., var_list=[var])
|
D | loss_scale_optimizer.py | 86 var_list=None, argument 121 var_list=var_list,
|
/external/mesa3d/src/gallium/drivers/lima/standalone/ |
D | lima_compiler_cmdline.c | 49 insert_sorted(struct exec_list *var_list, nir_variable *new_var) in insert_sorted() argument 51 nir_foreach_variable_in_list(var, var_list) { in insert_sorted() 58 exec_list_push_tail(var_list, &new_var->node); in insert_sorted()
|
/external/tensorflow/tensorflow/tools/api/golden/v1/ |
D | tensorflow.train.-optimizer.pbtxt | 28 …argspec: "args=[\'self\', \'loss\', \'var_list\', \'gate_gradients\', \'aggregation_method\', \'co… 44 …argspec: "args=[\'self\', \'loss\', \'global_step\', \'var_list\', \'gate_gradients\', \'aggregati…
|
D | tensorflow.train.-gradient-descent-optimizer.pbtxt | 29 …argspec: "args=[\'self\', \'loss\', \'var_list\', \'gate_gradients\', \'aggregation_method\', \'co… 45 …argspec: "args=[\'self\', \'loss\', \'global_step\', \'var_list\', \'gate_gradients\', \'aggregati…
|
D | tensorflow.train.-proximal-gradient-descent-optimizer.pbtxt | 29 …argspec: "args=[\'self\', \'loss\', \'var_list\', \'gate_gradients\', \'aggregation_method\', \'co… 45 …argspec: "args=[\'self\', \'loss\', \'global_step\', \'var_list\', \'gate_gradients\', \'aggregati…
|
D | tensorflow.train.-adadelta-optimizer.pbtxt | 29 …argspec: "args=[\'self\', \'loss\', \'var_list\', \'gate_gradients\', \'aggregation_method\', \'co… 45 …argspec: "args=[\'self\', \'loss\', \'global_step\', \'var_list\', \'gate_gradients\', \'aggregati…
|
/external/tensorflow/tensorflow/python/keras/ |
D | optimizer_v1.py | 766 def minimize(self, loss, var_list, grad_loss=None, tape=None): argument 774 if not callable(var_list): 775 tape.watch(var_list) 777 if callable(var_list): 778 var_list = var_list() 780 var_list = nest.flatten(var_list) 781 if var_list: 782 grads = tape.gradient(loss, var_list, grad_loss) 783 grads_and_vars = list(zip(grads, var_list))
|