Home
last modified time | relevance | path

Searched refs:optimization (Results 1 – 25 of 1093) sorted by relevance

12345678910>>...44

/external/mesa3d/src/compiler/glsl/
Dtest_optpass.cpp56 do_optimization(struct exec_list *ir, const char *optimization, in do_optimization() argument
65 if (sscanf(optimization, "do_common_optimization ( %d ) ", &int_0) == 1) { in do_optimization()
67 } else if (strcmp(optimization, "do_algebraic") == 0) { in do_optimization()
69 } else if (strcmp(optimization, "do_constant_folding") == 0) { in do_optimization()
71 } else if (strcmp(optimization, "do_constant_variable") == 0) { in do_optimization()
73 } else if (strcmp(optimization, "do_constant_variable_unlinked") == 0) { in do_optimization()
75 } else if (strcmp(optimization, "do_copy_propagation") == 0) { in do_optimization()
77 } else if (strcmp(optimization, "do_copy_propagation_elements") == 0) { in do_optimization()
79 } else if (strcmp(optimization, "do_constant_propagation") == 0) { in do_optimization()
81 } else if (strcmp(optimization, "do_dead_code") == 0) { in do_optimization()
[all …]
/external/apache-commons-math/src/main/java/org/apache/commons/math/optimization/general/
DAbstractScalarDifferentiableOptimizer.java18 package org.apache.commons.math.optimization.general;
25 import org.apache.commons.math.optimization.DifferentiableMultivariateRealOptimizer;
26 import org.apache.commons.math.optimization.GoalType;
27 import org.apache.commons.math.optimization.OptimizationException;
28 import org.apache.commons.math.optimization.RealConvergenceChecker;
29 import org.apache.commons.math.optimization.RealPointValuePair;
30 import org.apache.commons.math.optimization.SimpleScalarValueChecker;
/external/tensorflow/tensorflow/core/common_runtime/
Doptimization_registry.h134 #define REGISTER_OPTIMIZATION(grouping, phase, optimization) \ argument
135 REGISTER_OPTIMIZATION_UNIQ_HELPER(__COUNTER__, grouping, phase, optimization)
137 #define REGISTER_OPTIMIZATION_UNIQ_HELPER(ctr, grouping, phase, optimization) \ argument
138 REGISTER_OPTIMIZATION_UNIQ(ctr, grouping, phase, optimization)
140 #define REGISTER_OPTIMIZATION_UNIQ(ctr, grouping, phase, optimization) \ argument
145 new optimization()), \
146 #optimization)
/external/tensorflow/tensorflow/python/data/experimental/kernel_tests/optimization/
Dchoose_fastest_branch_dataset_test.py23 from tensorflow.python.data.experimental.ops import optimization
45 choose_fastest = optimization._ChooseFastestBranchDataset(
65 choose_fastest = optimization._ChooseFastestBranchDataset(
80 choose_fastest = optimization._ChooseFastestBranchDataset(
96 choose_fastest = optimization._ChooseFastestBranchDataset(
113 choose_fastest = optimization._ChooseFastestBranchDataset(
125 choose_fastest = optimization._ChooseFastestBranchDataset(
140 return optimization._ChooseFastestBranchDataset(
163 choose_fastest = optimization._ChooseFastestBranchDataset(
DBUILD22 "//tensorflow/python/data/experimental/ops:optimization",
44 "//tensorflow/python/data/experimental/ops:optimization",
71 "//tensorflow/python/data/experimental/ops:optimization",
93 "//tensorflow/python/data/experimental/ops:optimization",
113 "//tensorflow/python/data/experimental/ops:optimization",
131 "//tensorflow/python/data/experimental/ops:optimization",
153 "//tensorflow/python/data/experimental/ops:optimization",
173 "//tensorflow/python/data/experimental/ops:optimization",
200 "//tensorflow/python/data/experimental/ops:optimization",
237 "//tensorflow/python/data/experimental/ops:optimization",
[all …]
Dchoose_fastest_dataset_test.py22 from tensorflow.python.data.experimental.ops import optimization
37 merge = optimization._ChooseFastestDataset([dataset, dataset])
45 merge = optimization._ChooseFastestDataset([dataset for _ in range(5)])
56 merge = optimization._ChooseFastestDataset([dataset_a, dataset_b])
77 merge = optimization._ChooseFastestDataset([dataset_a, dataset_b])
79 merge = optimization._ChooseFastestDataset([dataset_a, dataset_b])
/external/tensorflow/tensorflow/python/data/experimental/ops/
Doptimization_options.py135 for optimization in all_optimizations:
136 if getattr(self, optimization):
137 result.add(optimization)
147 for optimization in optimizations_to_disable:
148 if getattr(self, optimization) is not False:
149 result.add(optimization)
/external/apache-commons-math/src/main/java/org/apache/commons/math/optimization/fitting/
DGaussianFitter.java18 package org.apache.commons.math.optimization.fitting;
21 import org.apache.commons.math.optimization.DifferentiableMultivariateVectorialOptimizer;
22 import org.apache.commons.math.optimization.OptimizationException;
23 import org.apache.commons.math.optimization.fitting.CurveFitter;
24 import org.apache.commons.math.optimization.fitting.WeightedObservedPoint;
/external/tensorflow/tensorflow/core/api_def/base_api/
Dapi_def_RetrieveTPUEmbeddingMDLAdagradLightParameters.pbtxt7 Parameter parameters updated by the MDL Adagrad Light optimization algorithm.
13 Parameter accumulators updated by the MDL Adagrad Light optimization algorithm.
19 Parameter weights updated by the MDL Adagrad Light optimization algorithm.
25 Parameter benefits updated by the MDL Adagrad Light optimization algorithm.
30 An op that retrieves optimization parameters from embedding to host
Dapi_def_RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug.pbtxt7 Parameter parameters updated by the Adadelta optimization algorithm.
13 Parameter accumulators updated by the Adadelta optimization algorithm.
19 Parameter updates updated by the Adadelta optimization algorithm.
25 Parameter gradient_accumulators updated by the Adadelta optimization algorithm.
30 An op that retrieves optimization parameters from embedding to host
Dapi_def_RetrieveTPUEmbeddingFTRLParametersGradAccumDebug.pbtxt7 Parameter parameters updated by the FTRL optimization algorithm.
13 Parameter accumulators updated by the FTRL optimization algorithm.
19 Parameter linears updated by the FTRL optimization algorithm.
25 Parameter gradient_accumulators updated by the FTRL optimization algorithm.
30 An op that retrieves optimization parameters from embedding to host
Dapi_def_RetrieveTPUEmbeddingADAMParametersGradAccumDebug.pbtxt7 Parameter parameters updated by the ADAM optimization algorithm.
13 Parameter momenta updated by the ADAM optimization algorithm.
19 Parameter velocities updated by the ADAM optimization algorithm.
25 Parameter gradient_accumulators updated by the ADAM optimization algorithm.
30 An op that retrieves optimization parameters from embedding to host
Dapi_def_RetrieveTPUEmbeddingCenteredRMSPropParameters.pbtxt7 Parameter parameters updated by the centered RMSProp optimization algorithm.
13 Parameter ms updated by the centered RMSProp optimization algorithm.
19 Parameter mom updated by the centered RMSProp optimization algorithm.
25 Parameter mg updated by the centered RMSProp optimization algorithm.
30 An op that retrieves optimization parameters from embedding to host
Dapi_def_RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug.pbtxt7 Parameter parameters updated by the RMSProp optimization algorithm.
13 Parameter ms updated by the RMSProp optimization algorithm.
19 Parameter mom updated by the RMSProp optimization algorithm.
25 Parameter gradient_accumulators updated by the RMSProp optimization algorithm.
30 An op that retrieves optimization parameters from embedding to host
Dapi_def_LoadTPUEmbeddingRMSPropParametersGradAccumDebug.pbtxt7 Value of parameters used in the RMSProp optimization algorithm.
13 Value of ms used in the RMSProp optimization algorithm.
19 Value of mom used in the RMSProp optimization algorithm.
25 Value of gradient_accumulators used in the RMSProp optimization algorithm.
30 An op that loads optimization parameters into HBM for embedding. Must be
Dapi_def_LoadTPUEmbeddingFTRLParametersGradAccumDebug.pbtxt7 Value of parameters used in the FTRL optimization algorithm.
13 Value of accumulators used in the FTRL optimization algorithm.
19 Value of linears used in the FTRL optimization algorithm.
25 Value of gradient_accumulators used in the FTRL optimization algorithm.
30 An op that loads optimization parameters into HBM for embedding. Must be
Dapi_def_LoadTPUEmbeddingCenteredRMSPropParameters.pbtxt7 Value of parameters used in the centered RMSProp optimization algorithm.
13 Value of ms used in the centered RMSProp optimization algorithm.
19 Value of mom used in the centered RMSProp optimization algorithm.
25 Value of mg used in the centered RMSProp optimization algorithm.
30 An op that loads optimization parameters into HBM for embedding. Must be
Dapi_def_LoadTPUEmbeddingADAMParametersGradAccumDebug.pbtxt7 Value of parameters used in the ADAM optimization algorithm.
13 Value of momenta used in the ADAM optimization algorithm.
19 Value of velocities used in the ADAM optimization algorithm.
25 Value of gradient_accumulators used in the ADAM optimization algorithm.
30 An op that loads optimization parameters into HBM for embedding. Must be
Dapi_def_LoadTPUEmbeddingAdadeltaParametersGradAccumDebug.pbtxt7 Value of parameters used in the Adadelta optimization algorithm.
13 Value of accumulators used in the Adadelta optimization algorithm.
19 Value of updates used in the Adadelta optimization algorithm.
25 Value of gradient_accumulators used in the Adadelta optimization algorithm.
30 An op that loads optimization parameters into HBM for embedding. Must be
Dapi_def_LoadTPUEmbeddingMDLAdagradLightParameters.pbtxt7 Value of parameters used in the MDL Adagrad Light optimization algorithm.
13 Value of accumulators used in the MDL Adagrad Light optimization algorithm.
19 Value of weights used in the MDL Adagrad Light optimization algorithm.
25 Value of benefits used in the MDL Adagrad Light optimization algorithm.
30 An op that loads optimization parameters into HBM for embedding. Must be
Dapi_def_RetrieveTPUEmbeddingADAMParameters.pbtxt7 Parameter parameters updated by the ADAM optimization algorithm.
13 Parameter momenta updated by the ADAM optimization algorithm.
19 Parameter velocities updated by the ADAM optimization algorithm.
24 An op that retrieves optimization parameters from embedding to host
/external/apache-commons-math/src/main/java/org/apache/commons/math/optimization/linear/
DLinearOptimizer.java18 package org.apache.commons.math.optimization.linear;
22 import org.apache.commons.math.optimization.GoalType;
23 import org.apache.commons.math.optimization.OptimizationException;
24 import org.apache.commons.math.optimization.RealPointValuePair;
DAbstractLinearOptimizer.java18 package org.apache.commons.math.optimization.linear;
23 import org.apache.commons.math.optimization.GoalType;
24 import org.apache.commons.math.optimization.OptimizationException;
25 import org.apache.commons.math.optimization.RealPointValuePair;
/external/apache-commons-math/src/main/java/org/apache/commons/math/optimization/direct/
DPowellOptimizer.java18 package org.apache.commons.math.optimization.direct;
23 import org.apache.commons.math.optimization.GoalType;
24 import org.apache.commons.math.optimization.OptimizationException;
25 import org.apache.commons.math.optimization.RealPointValuePair;
26 import org.apache.commons.math.optimization.general.AbstractScalarDifferentiableOptimizer;
27 import org.apache.commons.math.optimization.univariate.AbstractUnivariateRealOptimizer;
28 import org.apache.commons.math.optimization.univariate.BracketFinder;
29 import org.apache.commons.math.optimization.univariate.BrentOptimizer;
/external/tensorflow/tensorflow/python/data/experimental/benchmarks/
Dautotune_benchmark.py26 from tensorflow.python.data.experimental.ops import optimization
46 math_ops.matmul, num_parallel_calls=optimization.AUTOTUNE)
84 num_parallel_calls=optimization.AUTOTUNE,
123 num_parallel_calls=optimization.AUTOTUNE)
169 dataset = dataset.map(f1, num_parallel_calls=optimization.AUTOTUNE)
172 num_parallel_calls=optimization.AUTOTUNE,
176 dataset = dataset.map(f2, num_parallel_calls=optimization.AUTOTUNE)
179 num_parallel_calls=optimization.AUTOTUNE,
183 dataset = dataset.map(f2, num_parallel_calls=optimization.AUTOTUNE)

12345678910>>...44