path: "tensorflow.train.FtrlOptimizer" tf_class { is_instance: "" is_instance: "" is_instance: "" is_instance: "" is_instance: "" member { name: "GATE_GRAPH" mtype: "" } member { name: "GATE_NONE" mtype: "" } member { name: "GATE_OP" mtype: "" } member_method { name: "__init__" argspec: "args=[\'self\', \'learning_rate\', \'learning_rate_power\', \'initial_accumulator_value\', \'l1_regularization_strength\', \'l2_regularization_strength\', \'use_locking\', \'name\', \'accum_name\', \'linear_name\', \'l2_shrinkage_regularization_strength\'], varargs=None, keywords=None, defaults=[\'-0.5\', \'0.1\', \'0.0\', \'0.0\', \'False\', \'Ftrl\', \'None\', \'None\', \'0.0\'], " } member_method { name: "apply_gradients" argspec: "args=[\'self\', \'grads_and_vars\', \'global_step\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], " } member_method { name: "compute_gradients" argspec: "args=[\'self\', \'loss\', \'var_list\', \'gate_gradients\', \'aggregation_method\', \'colocate_gradients_with_ops\', \'grad_loss\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'None\', \'False\', \'None\'], " } member_method { name: "get_name" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } member_method { name: "get_slot" argspec: "args=[\'self\', \'var\', \'name\'], varargs=None, keywords=None, defaults=None" } member_method { name: "get_slot_names" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } member_method { name: "minimize" argspec: "args=[\'self\', \'loss\', \'global_step\', \'var_list\', \'gate_gradients\', \'aggregation_method\', \'colocate_gradients_with_ops\', \'name\', \'grad_loss\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'1\', \'None\', \'False\', \'None\', \'None\'], " } member_method { name: "variables" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } }