Home
last modified time | relevance | path

Searched full:optimizer (Results 1 – 25 of 2117) sorted by relevance

12345678910>>...85

/external/tensorflow/tensorflow/core/grappler/optimizers/
Darithmetic_optimizer_test_utils.h31 // Optimize a graph using optimizer and prune all the nodes that no
33 void OptimizeAndPrune(GraphOptimizer* optimizer, GrapplerItem* item, in OptimizeAndPrune() argument
35 TF_EXPECT_OK(optimizer->Optimize(nullptr, *item, output)); in OptimizeAndPrune()
41 // Run optimizer twice to make sure the rewrite is idempotent.
42 void DedupAndOptimizeTwiceAndPrune(GraphOptimizer* optimizer, in DedupAndOptimizeTwiceAndPrune() argument
47 TF_EXPECT_OK(optimizer->Optimize(nullptr, *item, output)); in DedupAndOptimizeTwiceAndPrune()
50 TF_EXPECT_OK(optimizer->Optimize(nullptr, *item, output)); in DedupAndOptimizeTwiceAndPrune()
56 // Run optimizer twice to make sure the rewrite is idempotent.
57 void OptimizeTwice(GraphOptimizer* optimizer, GrapplerItem* item, in OptimizeTwice() argument
59 TF_EXPECT_OK(optimizer->Optimize(nullptr, *item, output)); in OptimizeTwice()
[all …]
/external/angle/third_party/spirv-tools/src/source/opt/
Doptimizer.cpp15 #include "spirv-tools/optimizer.hpp"
46 struct Optimizer::PassToken::Impl {
52 Optimizer::PassToken::PassToken( in PassToken()
53 std::unique_ptr<Optimizer::PassToken::Impl> impl) in PassToken()
56 Optimizer::PassToken::PassToken(std::unique_ptr<opt::Pass>&& pass) in PassToken()
57 : impl_(MakeUnique<Optimizer::PassToken::Impl>(std::move(pass))) {} in PassToken()
59 Optimizer::PassToken::PassToken(PassToken&& that) in PassToken()
62 Optimizer::PassToken& Optimizer::PassToken::operator=(PassToken&& that) { in operator =()
67 Optimizer::PassToken::~PassToken() {} in ~PassToken()
69 struct Optimizer::Impl {
[all …]
/external/deqp-deps/SPIRV-Tools/source/opt/
Doptimizer.cpp15 #include "spirv-tools/optimizer.hpp"
45 struct Optimizer::PassToken::Impl {
51 Optimizer::PassToken::PassToken( in PassToken()
52 std::unique_ptr<Optimizer::PassToken::Impl> impl) in PassToken()
55 Optimizer::PassToken::PassToken(std::unique_ptr<opt::Pass>&& pass) in PassToken()
56 : impl_(MakeUnique<Optimizer::PassToken::Impl>(std::move(pass))) {} in PassToken()
58 Optimizer::PassToken::PassToken(PassToken&& that) in PassToken()
61 Optimizer::PassToken& Optimizer::PassToken::operator=(PassToken&& that) { in operator =()
66 Optimizer::PassToken::~PassToken() {} in ~PassToken()
68 struct Optimizer::Impl {
[all …]
/external/swiftshader/third_party/SPIRV-Tools/source/opt/
Doptimizer.cpp15 #include "spirv-tools/optimizer.hpp"
46 struct Optimizer::PassToken::Impl {
52 Optimizer::PassToken::PassToken( in PassToken()
53 std::unique_ptr<Optimizer::PassToken::Impl> impl) in PassToken()
56 Optimizer::PassToken::PassToken(std::unique_ptr<opt::Pass>&& pass) in PassToken()
57 : impl_(MakeUnique<Optimizer::PassToken::Impl>(std::move(pass))) {} in PassToken()
59 Optimizer::PassToken::PassToken(PassToken&& that) in PassToken()
62 Optimizer::PassToken& Optimizer::PassToken::operator=(PassToken&& that) { in operator =()
67 Optimizer::PassToken::~PassToken() {} in ~PassToken()
69 struct Optimizer::Impl {
[all …]
/external/deqp-deps/glslang/SPIRV/
DSpvTools.cpp46 #include "spirv-tools/optimizer.hpp"
92 // Callback passed to spvtools::Optimizer::SetMessageConsumer
182 // Apply the SPIRV-Tools optimizer to generated SPIR-V. HLSL SPIR-V is legalized in the process.
188 spvtools::Optimizer optimizer(target_env); in SpirvToolsTransform() local
189 optimizer.SetMessageConsumer(OptimizerMesssageConsumer); in SpirvToolsTransform()
196 optimizer.RegisterPass(spvtools::CreateStripDebugInfoPass()); in SpirvToolsTransform()
198 optimizer.RegisterPass(spvtools::CreateWrapOpKillPass()); in SpirvToolsTransform()
199 optimizer.RegisterPass(spvtools::CreateDeadBranchElimPass()); in SpirvToolsTransform()
200 optimizer.RegisterPass(spvtools::CreateMergeReturnPass()); in SpirvToolsTransform()
201 optimizer.RegisterPass(spvtools::CreateInlineExhaustivePass()); in SpirvToolsTransform()
[all …]
/external/angle/third_party/glslang/src/SPIRV/
DSpvTools.cpp46 #include "spirv-tools/optimizer.hpp"
92 // Callback passed to spvtools::Optimizer::SetMessageConsumer
183 // Apply the SPIRV-Tools optimizer to generated SPIR-V. HLSL SPIR-V is legalized in the process.
189 spvtools::Optimizer optimizer(target_env); in SpirvToolsTransform() local
190 optimizer.SetMessageConsumer(OptimizerMesssageConsumer); in SpirvToolsTransform()
197 optimizer.RegisterPass(spvtools::CreateStripDebugInfoPass()); in SpirvToolsTransform()
199 optimizer.RegisterPass(spvtools::CreateWrapOpKillPass()); in SpirvToolsTransform()
200 optimizer.RegisterPass(spvtools::CreateDeadBranchElimPass()); in SpirvToolsTransform()
201 optimizer.RegisterPass(spvtools::CreateMergeReturnPass()); in SpirvToolsTransform()
202 optimizer.RegisterPass(spvtools::CreateInlineExhaustivePass()); in SpirvToolsTransform()
[all …]
/external/pytorch/torch/optim/
Dlr_scheduler.py27 from .optimizer import Optimizer
71 def _format_param(name: str, optimizer: Optimizer, param): argument
78 if len(param) != len(optimizer.param_groups):
80 f"{name} must have the same length as optimizer.param_groups. "
81 f"{name} has {len(param)} values, param_groups has {len(optimizer.param_groups)}."
84 param = [param] * len(optimizer.param_groups)
95 self, optimizer: Optimizer, last_epoch=-1, verbose="deprecated" argument
97 # Attach optimizer
98 if not isinstance(optimizer, Optimizer):
99 raise TypeError(f"{type(optimizer).__name__} is not an Optimizer")
[all …]
/external/tensorflow/tensorflow/python/keras/
Doptimizers.py16 """Built-in optimizer classes.
18 For more examples see the base class `tf.keras.optimizers.Optimizer`.
22 from tensorflow.python.keras.optimizer_v1 import Optimizer
35 from tensorflow.python.training import optimizer as tf_optimizer_module
40 def serialize(optimizer): argument
41 """Serialize the optimizer configuration to JSON compatible python dict.
43 The configuration can be used for persistence and reconstruct the `Optimizer`
52 optimizer: An `Optimizer` instance to serialize.
55 Python dict which contains the configuration of the input optimizer.
57 return serialize_keras_object(optimizer)
[all …]
/external/tensorflow/tensorflow/python/tpu/tests/
Dtpu_embedding_v1_correctness_test.py33 # Slot names in Keras optimizer v2 are different compared to the slot names
57 def _create_mid_level(self, optimizer=None): argument
59 if optimizer is None:
60 optimizer = tpu_embedding_v2_utils.SGD(learning_rate=0.1)
63 feature_config=self.feature_config, optimizer=optimizer)
65 def _get_slot_variable_creation_fn(self, optimizer): argument
67 # passed optimizer rather than the built-in methods. This allows a user to
72 slots[slot] = optimizer.add_slot(
73 table, _SLOT_NAME_MAPPING[type(optimizer)][slot], initializer)
81 # Keras optimizers has to be translated to embedding optimizer with slot
[all …]
Dtpu_embedding_base_test.py160 def _create_mid_level(self, optimizer=None): argument
162 if optimizer is None:
163 optimizer = tpu_embedding_v2_utils.SGD(learning_rate=0.1)
166 feature_config=self.feature_config, optimizer=optimizer)
173 optimizer = tpu_embedding_v2_utils.SGD(learning_rate=0.1)
175 optimizer = tpu_embedding_v2_utils.Adagrad(learning_rate=0.1)
177 optimizer = tpu_embedding_v2_utils.Adam(learning_rate=0.1)
179 optimizer = tpu_embedding_v2_utils.FTRL(learning_rate=0.1)
181 optimizer = tpu_embedding_v2_utils.AdagradMomentum(
189 raise ValueError('optimizer is not recognized: ', optimizer_name)
[all …]
/external/pytorch/torch/amp/
Dgrad_scaler.py56 * ``scaler.step(optimizer)`` safely unscales gradients and calls ``optimizer.step()``.
66 optimizer.zero_grad()
73 # scaler.step() first unscales gradients of the optimizer's params.
74 # If gradients don't contain infs/NaNs, optimizer.step() is then called,
75 # otherwise, optimizer.step() is skipped.
76 scaler.step(optimizer)
90 …``scaler.step(optimizer)`` (or optional separate ``scaler.unscale_(optimizer)``, see :meth:`unscal…
92 …* If infs/NaNs are found, ``scaler.step(optimizer)`` skips the underlying ``optimizer.step()`` (so…
95 …* If no infs/NaNs are found, ``scaler.step(optimizer)`` runs the underlying ``optimizer.step()`` a…
100 value calibrates. ``scaler.step`` will skip the underlying ``optimizer.step()`` for these
[all …]
/external/swiftshader/third_party/SPIRV-Tools/test/opt/
Dc_interface_test.cpp40 auto optimizer = spvOptimizerCreate(SPV_ENV_UNIVERSAL_1_1); in TEST() local
41 ASSERT_NE(optimizer, nullptr); in TEST()
51 spvOptimizerRun(optimizer, spirv, sizeof(spirv) / sizeof(uint32_t), in TEST()
57 spvOptimizerDestroy(optimizer); in TEST()
76 auto optimizer = spvOptimizerCreate(SPV_ENV_UNIVERSAL_1_1); in TEST() local
77 ASSERT_NE(optimizer, nullptr); in TEST()
80 optimizer, in TEST()
96 spvOptimizerRun(optimizer, spirv, sizeof(spirv) / sizeof(uint32_t), in TEST()
105 spvOptimizerDestroy(optimizer); in TEST()
124 auto optimizer = spvOptimizerCreate(SPV_ENV_UNIVERSAL_1_1); in TEST() local
[all …]
/external/deqp-deps/SPIRV-Tools/test/opt/
Dc_interface_test.cpp40 auto optimizer = spvOptimizerCreate(SPV_ENV_UNIVERSAL_1_1); in TEST() local
41 ASSERT_NE(optimizer, nullptr); in TEST()
51 spvOptimizerRun(optimizer, spirv, sizeof(spirv) / sizeof(uint32_t), in TEST()
57 spvOptimizerDestroy(optimizer); in TEST()
76 auto optimizer = spvOptimizerCreate(SPV_ENV_UNIVERSAL_1_1); in TEST() local
77 ASSERT_NE(optimizer, nullptr); in TEST()
80 optimizer, in TEST()
96 spvOptimizerRun(optimizer, spirv, sizeof(spirv) / sizeof(uint32_t), in TEST()
105 spvOptimizerDestroy(optimizer); in TEST()
124 auto optimizer = spvOptimizerCreate(SPV_ENV_UNIVERSAL_1_1); in TEST() local
[all …]
/external/angle/third_party/spirv-tools/src/test/opt/
Dc_interface_test.cpp40 auto optimizer = spvOptimizerCreate(SPV_ENV_UNIVERSAL_1_1); in TEST() local
41 ASSERT_NE(optimizer, nullptr); in TEST()
51 spvOptimizerRun(optimizer, spirv, sizeof(spirv) / sizeof(uint32_t), in TEST()
57 spvOptimizerDestroy(optimizer); in TEST()
76 auto optimizer = spvOptimizerCreate(SPV_ENV_UNIVERSAL_1_1); in TEST() local
77 ASSERT_NE(optimizer, nullptr); in TEST()
80 optimizer, in TEST()
96 spvOptimizerRun(optimizer, spirv, sizeof(spirv) / sizeof(uint32_t), in TEST()
105 spvOptimizerDestroy(optimizer); in TEST()
124 auto optimizer = spvOptimizerCreate(SPV_ENV_UNIVERSAL_1_1); in TEST() local
[all …]
/external/tensorflow/tensorflow/python/checkpoint/
Dcheckpoint_with_v1_optimizers_test.py42 optimizer = adam.AdamOptimizer(0.1)
44 optimizer.minimize(root.var.read_value)
46 train_op = optimizer.minimize(root.var)
47 # Note that `optimizer` has not been added as a dependency of
51 trackable_utils.Checkpoint(root=root, optimizer=optimizer)))
55 root.optimizer = optimizer
57 self.evaluate(state_ops.assign(optimizer.get_slot(name="m", var=root.var),
72 new_root.optimizer = adam.AdamOptimizer(0.1)
81 new_root.optimizer.get_slot(name="m", var=new_root.var)))
83 self.assertIs(new_root.optimizer.get_slot(name="m", var=new_root.var),
[all …]
/external/pytorch/torch/csrc/api/include/torch/optim/
Doptimizer.h110 class TORCH_API Optimizer {
113 // `state_dict` / `load_state_dict` API to copy an optimizer instead.
114 Optimizer(const Optimizer& optimizer) = delete;
115 Optimizer(Optimizer&& optimizer) = default;
117 explicit Optimizer( in Optimizer() function
126 /// Constructs the `Optimizer` from a vector of parameters.
127 explicit Optimizer( in Optimizer() function
130 : Optimizer( in Optimizer()
134 /// Adds the given param_group to the optimizer's param_group list.
137 virtual ~Optimizer() = default;
[all …]
/external/deqp-deps/SPIRV-Tools/include/spirv-tools/
Doptimizer.hpp40 class SPIRV_TOOLS_EXPORT Optimizer { class
74 explicit Optimizer(spv_target_env env);
77 Optimizer(const Optimizer&) = delete;
78 Optimizer(Optimizer&&) = delete;
79 Optimizer& operator=(const Optimizer&) = delete;
80 Optimizer& operator=(Optimizer&&) = delete;
83 ~Optimizer();
92 // Registers the given |pass| to this optimizer. Passes will be run in the
95 Optimizer& RegisterPass(PassToken&& pass);
103 Optimizer& RegisterPerformancePasses();
[all …]
/external/swiftshader/third_party/SPIRV-Tools/include/spirv-tools/
Doptimizer.hpp40 class SPIRV_TOOLS_EXPORT Optimizer { class
74 explicit Optimizer(spv_target_env env);
77 Optimizer(const Optimizer&) = delete;
78 Optimizer(Optimizer&&) = delete;
79 Optimizer& operator=(const Optimizer&) = delete;
80 Optimizer& operator=(Optimizer&&) = delete;
83 ~Optimizer();
92 // Registers the given |pass| to this optimizer. Passes will be run in the
95 Optimizer& RegisterPass(PassToken&& pass);
103 Optimizer& RegisterPerformancePasses();
[all …]
/external/pytorch/torch/csrc/api/src/optim/
Doptimizer.cpp1 #include <torch/optim/optimizer.h>
98 void Optimizer::add_param_group(const OptimizerParamGroup& param_group) { in add_param_group()
117 void Optimizer::add_parameters(const std::vector<Tensor>& parameters) { in add_parameters()
118 TORCH_WARN("Optimizer::add_parameters() will be removed in PyTorch 1.6"); in add_parameters()
123 void Optimizer::zero_grad(bool set_to_none) { in zero_grad()
137 const std::vector<Tensor>& Optimizer::parameters() const noexcept { in parameters()
138 TORCH_WARN("Optimizer::parameters() will be removed in PyTorch 1.6"); in parameters()
142 std::vector<Tensor>& Optimizer::parameters() noexcept { in parameters()
143 TORCH_WARN("Optimizer::parameters() will be removed in PyTorch 1.6"); in parameters()
147 size_t Optimizer::size() const noexcept { in size()
[all …]
/external/tensorflow/tensorflow/python/training/experimental/
Dmixed_precision.py19 from tensorflow.python.training import optimizer
27 # is a loss scale optimizer class, and wrapper_fn is a function that takes in
28 # an optimizer and LossScale and returns a wrapper_cls instance.
30 optimizer.Optimizer:
37 """Registers a loss scale optimizer wrapper.
40 automatically wraps an optimizer with an optimizer wrapper that performs loss
48 optimizer_cls: A base optimizer class, e.g. `tf.keras.optimizers.Optimizer`.
49 wrapper_fn: A function that takes in arguments "optimizer" and
50 "loss_scale", and returns a loss scale optimizer of type "wrapper_cls"
51 that wraps "optimizer".
[all …]
/external/angle/third_party/spirv-tools/src/include/spirv-tools/
Doptimizer.hpp40 class SPIRV_TOOLS_EXPORT Optimizer { class
74 explicit Optimizer(spv_target_env env);
77 Optimizer(const Optimizer&) = delete;
78 Optimizer(Optimizer&&) = delete;
79 Optimizer& operator=(const Optimizer&) = delete;
80 Optimizer& operator=(Optimizer&&) = delete;
83 ~Optimizer();
92 // Registers the given |pass| to this optimizer. Passes will be run in the
95 Optimizer& RegisterPass(PassToken&& pass);
103 Optimizer& RegisterPerformancePasses();
[all …]
/external/pytorch/docs/source/
Doptim.rst6 How to use an optimizer
9 To use :mod:`torch.optim` you have to construct an optimizer object that will hold
15 To construct an :class:`Optimizer` you have to give it an iterable containing the
17 you can specify optimizer-specific options such as the learning rate, weight decay, etc.
21 optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
22 optimizer = optim.Adam([var1, var2], lr=0.0001)
27 :class:`Optimizer` s also support specifying per-parameter options. To do this, instead
74 All optimizers implement a :func:`~Optimizer.step` method, that updates the
77 ``optimizer.step()``
87 optimizer.zero_grad()
[all …]
/external/pytorch/torch/utils/benchmark/utils/
Dcompile.py44 optimizer: Optional[torch.optim.Optimizer] = None, argument
48 if optimizer and loss_fn:
54 optimizer.step()
55 optimizer.zero_grad()
64 …globals={"model": model, "sample_input": sample_input, "optimizer": optimizer, "loss_fn": loss_fn},
80 optimizer: Optional[torch.optim.Optimizer] = None, argument
93 compilation_time = bench_loop(opt_model, sample_input, 1, optimizer, loss_fn)
95 running_time = bench_loop(opt_model, sample_input, num_iters, optimizer, loss_fn)
110 running_time = bench_loop(opt_model, sample_input, num_iters, optimizer, loss_fn)
123 optimizer: Optional[torch.optim.Optimizer] = None, argument
[all …]
/external/pytorch/test/cpp/api/
Doptim.cpp33 OptimizerClass optimizer(model->parameters(), options); in test_optimizer_xor() local
47 auto step = [&](OptimizerClass& optimizer, in test_optimizer_xor()
52 optimizer.zero_grad(); in test_optimizer_xor()
58 return optimizer.step(closure); in test_optimizer_xor()
61 torch::Tensor loss = step(optimizer, model, inputs, labels); in test_optimizer_xor()
122 auto optimizer = OptimizerClass(parameters.values(), options); in check_exact_values() local
128 optimizer.zero_grad(); in check_exact_values()
134 optimizer.step(closure); in check_exact_values()
164 auto optimizer = Adagrad(params, options); in TEST() local
166 auto& options_ = static_cast<AdagradOptions&>(optimizer.defaults()); in TEST()
[all …]
/external/tensorflow/tensorflow/python/tpu/
Dtpu_optimizer.py16 """Optimizer that implements cross-shard gradient reduction for TPU."""
24 from tensorflow.python.training import optimizer
29 class CrossShardOptimizer(optimizer.Optimizer):
30 """An optimizer that averages gradients across TPU shards."""
37 """Construct a new cross-shard optimizer.
40 opt: An existing `Optimizer` to encapsulate.
46 optimizer to subgroups.
56 if not isinstance(opt, optimizer.Optimizer):
58 "CrossShardOptimizer only works with tf.training.Optimizer and not "
59 f"Keras Optimizer. Received: {opt}. "
[all …]

12345678910>>...85