Searched refs:adagrad (Results 1 – 25 of 40) sorted by relevance
12
/external/tensorflow/tensorflow/python/training/ |
D | adagrad_test.py | 34 from tensorflow.python.training import adagrad 57 ada_opt = adagrad.AdagradOptimizer( 108 sgd_op = adagrad.AdagradOptimizer(1.0).minimize(loss) 128 ada_opt = adagrad.AdagradOptimizer( 163 ada_opt = adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1) 197 repeated_update = adagrad.AdagradOptimizer(3.0).apply_gradients( 199 aggregated_update = adagrad.AdagradOptimizer(3.0).apply_gradients( 222 update_op_repeated = adagrad.AdagradOptimizer( 224 update_op_aggregated = adagrad.AdagradOptimizer( 256 ada_opt = adagrad.AdagradOptimizer(1.0, initial_accumulator_value=0.1) [all …]
|
D | proximal_adagrad_test.py | 31 from tensorflow.python.training import adagrad 226 adagrad.AdagradOptimizer( 245 adagrad.AdagradOptimizer(
|
D | ftrl_test.py | 31 from tensorflow.python.training import adagrad 433 adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1), 457 adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1),
|
/external/tensorflow/tensorflow/python/keras/optimizer_v2/ |
D | adagrad_test.py | 32 from tensorflow.python.keras.optimizer_v2 import adagrad 89 ada_opt = adagrad.Adagrad(learning_rate) 139 ada_opt = adagrad.Adagrad(learning_rate, decay=decay) 180 ada_opt = adagrad.Adagrad(learning_rate, epsilon=1.0) 223 ada_opt = adagrad.Adagrad(lr_schedule) 263 sgd_op = adagrad.Adagrad(1.0).minimize(loss, var_list=[var0]) 289 ada_opt = adagrad.Adagrad(learning_rate) 328 ada_opt = adagrad.Adagrad(learning_rate) 366 ada_opt = adagrad.Adagrad(learning_rate, epsilon=1.) 404 repeated_update = adagrad.Adagrad(3.0).apply_gradients([ [all …]
|
D | ftrl_test.py | 31 from tensorflow.python.training import adagrad 420 adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1), dtype) 442 adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1),
|
/external/tensorflow/tensorflow/compiler/tests/ |
D | adagrad_test.py | 28 from tensorflow.python.training import adagrad 40 ada_opt = adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1) 67 ada_opt = adagrad.AdagradOptimizer( 95 ada_opt = adagrad.AdagradOptimizer(3.0)
|
D | proximal_adagrad_test.py | 28 from tensorflow.python.training import adagrad 166 adagrad.AdagradOptimizer(
|
/external/tensorflow/tensorflow/python/keras/distribute/ |
D | optimizer_combinations.py | 23 from tensorflow.python.keras.optimizer_v2 import adagrad as adagrad_keras_v2 30 from tensorflow.python.training import adagrad 41 "AdagradV1", lambda: adagrad.AdagradOptimizer(0.001))
|
D | keras_premade_models_test.py | 29 from tensorflow.python.keras.optimizer_v2 import adagrad 89 dnn_opt = adagrad.Adagrad(learning_rate=0.1)
|
/external/tensorflow/tensorflow/python/keras/layers/ |
D | embeddings_test.py | 35 from tensorflow.python.training import adagrad 107 opt = adagrad.AdagradOptimizer(0.1)
|
/external/tensorflow/tensorflow/core/api_def/base_api/ |
D | api_def_ResourceApplyAdagrad.pbtxt | 35 summary: "Update \'*var\' according to the adagrad scheme."
|
D | api_def_ResourceApplyAdagradV2.pbtxt | 42 summary: "Update \'*var\' according to the adagrad scheme."
|
D | api_def_ApplyAdagrad.pbtxt | 41 summary: "Update \'*var\' according to the adagrad scheme."
|
D | api_def_ApplyAdagradV2.pbtxt | 48 summary: "Update \'*var\' according to the adagrad scheme."
|
D | api_def_ResourceSparseApplyAdagrad.pbtxt | 41 summary: "Update relevant entries in \'*var\' and \'*accum\' according to the adagrad scheme."
|
D | api_def_ResourceApplyAdagradDA.pbtxt | 58 summary: "Update \'*var\' according to the proximal adagrad scheme."
|
D | api_def_ApplyAdagradDA.pbtxt | 64 summary: "Update \'*var\' according to the proximal adagrad scheme."
|
D | api_def_SparseApplyAdagrad.pbtxt | 47 summary: "Update relevant entries in \'*var\' and \'*accum\' according to the adagrad scheme."
|
D | api_def_ResourceSparseApplyAdagradV2.pbtxt | 48 summary: "Update relevant entries in \'*var\' and \'*accum\' according to the adagrad scheme."
|
D | api_def_ResourceSparseApplyAdagradDA.pbtxt | 64 summary: "Update entries in \'*var\' and \'*accum\' according to the proximal adagrad scheme."
|
D | api_def_SparseApplyAdagradV2.pbtxt | 54 summary: "Update relevant entries in \'*var\' and \'*accum\' according to the adagrad scheme."
|
D | api_def_SparseApplyAdagradDA.pbtxt | 70 summary: "Update entries in \'*var\' and \'*accum\' according to the proximal adagrad scheme."
|
/external/tensorflow/tensorflow/python/keras/ |
D | optimizers.py | 30 from tensorflow.python.keras.optimizer_v2 import adagrad as adagrad_v2
|
/external/tensorflow/tensorflow/tools/api/golden/v1/ |
D | tensorflow.train.-adagrad-optimizer.pbtxt | 3 is_instance: "<class \'tensorflow.python.training.adagrad.AdagradOptimizer\'>"
|
/external/tensorflow/tensorflow/tools/api/golden/v2/ |
D | tensorflow.keras.optimizers.-adagrad.pbtxt | 3 is_instance: "<class \'tensorflow.python.keras.optimizer_v2.adagrad.Adagrad\'>"
|
12