Home
last modified time | relevance | path

Searched refs:adagrad (Results 1 – 25 of 40) sorted by relevance

12

/external/tensorflow/tensorflow/python/training/
Dadagrad_test.py34 from tensorflow.python.training import adagrad
57 ada_opt = adagrad.AdagradOptimizer(
108 sgd_op = adagrad.AdagradOptimizer(1.0).minimize(loss)
128 ada_opt = adagrad.AdagradOptimizer(
163 ada_opt = adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1)
197 repeated_update = adagrad.AdagradOptimizer(3.0).apply_gradients(
199 aggregated_update = adagrad.AdagradOptimizer(3.0).apply_gradients(
222 update_op_repeated = adagrad.AdagradOptimizer(
224 update_op_aggregated = adagrad.AdagradOptimizer(
256 ada_opt = adagrad.AdagradOptimizer(1.0, initial_accumulator_value=0.1)
[all …]
Dproximal_adagrad_test.py31 from tensorflow.python.training import adagrad
226 adagrad.AdagradOptimizer(
245 adagrad.AdagradOptimizer(
Dftrl_test.py31 from tensorflow.python.training import adagrad
433 adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1),
457 adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1),
/external/tensorflow/tensorflow/python/keras/optimizer_v2/
Dadagrad_test.py32 from tensorflow.python.keras.optimizer_v2 import adagrad
89 ada_opt = adagrad.Adagrad(learning_rate)
139 ada_opt = adagrad.Adagrad(learning_rate, decay=decay)
180 ada_opt = adagrad.Adagrad(learning_rate, epsilon=1.0)
223 ada_opt = adagrad.Adagrad(lr_schedule)
263 sgd_op = adagrad.Adagrad(1.0).minimize(loss, var_list=[var0])
289 ada_opt = adagrad.Adagrad(learning_rate)
328 ada_opt = adagrad.Adagrad(learning_rate)
366 ada_opt = adagrad.Adagrad(learning_rate, epsilon=1.)
404 repeated_update = adagrad.Adagrad(3.0).apply_gradients([
[all …]
Dftrl_test.py31 from tensorflow.python.training import adagrad
420 adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1), dtype)
442 adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1),
/external/tensorflow/tensorflow/compiler/tests/
Dadagrad_test.py28 from tensorflow.python.training import adagrad
40 ada_opt = adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1)
67 ada_opt = adagrad.AdagradOptimizer(
95 ada_opt = adagrad.AdagradOptimizer(3.0)
Dproximal_adagrad_test.py28 from tensorflow.python.training import adagrad
166 adagrad.AdagradOptimizer(
/external/tensorflow/tensorflow/python/keras/distribute/
Doptimizer_combinations.py23 from tensorflow.python.keras.optimizer_v2 import adagrad as adagrad_keras_v2
30 from tensorflow.python.training import adagrad
41 "AdagradV1", lambda: adagrad.AdagradOptimizer(0.001))
Dkeras_premade_models_test.py29 from tensorflow.python.keras.optimizer_v2 import adagrad
89 dnn_opt = adagrad.Adagrad(learning_rate=0.1)
/external/tensorflow/tensorflow/python/keras/layers/
Dembeddings_test.py35 from tensorflow.python.training import adagrad
107 opt = adagrad.AdagradOptimizer(0.1)
/external/tensorflow/tensorflow/core/api_def/base_api/
Dapi_def_ResourceApplyAdagrad.pbtxt35 summary: "Update \'*var\' according to the adagrad scheme."
Dapi_def_ResourceApplyAdagradV2.pbtxt42 summary: "Update \'*var\' according to the adagrad scheme."
Dapi_def_ApplyAdagrad.pbtxt41 summary: "Update \'*var\' according to the adagrad scheme."
Dapi_def_ApplyAdagradV2.pbtxt48 summary: "Update \'*var\' according to the adagrad scheme."
Dapi_def_ResourceSparseApplyAdagrad.pbtxt41 summary: "Update relevant entries in \'*var\' and \'*accum\' according to the adagrad scheme."
Dapi_def_ResourceApplyAdagradDA.pbtxt58 summary: "Update \'*var\' according to the proximal adagrad scheme."
Dapi_def_ApplyAdagradDA.pbtxt64 summary: "Update \'*var\' according to the proximal adagrad scheme."
Dapi_def_SparseApplyAdagrad.pbtxt47 summary: "Update relevant entries in \'*var\' and \'*accum\' according to the adagrad scheme."
Dapi_def_ResourceSparseApplyAdagradV2.pbtxt48 summary: "Update relevant entries in \'*var\' and \'*accum\' according to the adagrad scheme."
Dapi_def_ResourceSparseApplyAdagradDA.pbtxt64 summary: "Update entries in \'*var\' and \'*accum\' according to the proximal adagrad scheme."
Dapi_def_SparseApplyAdagradV2.pbtxt54 summary: "Update relevant entries in \'*var\' and \'*accum\' according to the adagrad scheme."
Dapi_def_SparseApplyAdagradDA.pbtxt70 summary: "Update entries in \'*var\' and \'*accum\' according to the proximal adagrad scheme."
/external/tensorflow/tensorflow/python/keras/
Doptimizers.py30 from tensorflow.python.keras.optimizer_v2 import adagrad as adagrad_v2
/external/tensorflow/tensorflow/tools/api/golden/v1/
Dtensorflow.train.-adagrad-optimizer.pbtxt3 is_instance: "<class \'tensorflow.python.training.adagrad.AdagradOptimizer\'>"
/external/tensorflow/tensorflow/tools/api/golden/v2/
Dtensorflow.keras.optimizers.-adagrad.pbtxt3 is_instance: "<class \'tensorflow.python.keras.optimizer_v2.adagrad.Adagrad\'>"

12