Home
last modified time | relevance | path

Searched refs:config_proto (Results 1 – 25 of 195) sorted by relevance

12345678

/external/tensorflow/tensorflow/core/grappler/optimizers/
Dmeta_optimizer_test.cc140 ConfigProto config_proto; in TEST_F() local
142 *config_proto.mutable_graph_options()->mutable_rewrite_options(); in TEST_F()
146 MetaOptimizer optimizer(nullptr, config_proto); in TEST_F()
159 ConfigProto config_proto; in TEST_F() local
161 *config_proto.mutable_graph_options()->mutable_rewrite_options(); in TEST_F()
167 MetaOptimizer optimizer(nullptr, config_proto); in TEST_F()
181 ConfigProto config_proto; in TEST_F() local
183 *config_proto.mutable_graph_options()->mutable_rewrite_options(); in TEST_F()
189 MetaOptimizer optimizer(nullptr, config_proto); in TEST_F()
203 ConfigProto config_proto; in TEST_F() local
[all …]
/external/tensorflow/tensorflow/python/kernel_tests/signal/
Dtest_util.py25 def grappler_optimize(graph, fetches=None, config_proto=None): argument
39 if config_proto is None:
40 config_proto = config_pb2.ConfigProto()
41 config_proto.graph_options.rewrite_options.min_graph_nodes = -1
46 return tf_optimizer.OptimizeGraph(config_proto, metagraph)
/external/tensorflow/tensorflow/python/grappler/
Dtf_optimizer.py27 def OptimizeGraph(config_proto, argument
50 if not isinstance(config_proto, config_pb2.ConfigProto):
55 config_proto.SerializeToString(),
66 config_proto.SerializeToString(),
Dtf_optimizer_wrapper.cc70 tensorflow::ConfigProto config_proto; in PYBIND11_MODULE() local
71 if (!config_proto.ParseFromString(serialized_config_proto)) { in PYBIND11_MODULE()
99 config_proto); in PYBIND11_MODULE()
/external/tensorflow/tensorflow/compiler/mlir/tensorflow/tests/
Dinlining.mlir19 …%result = "tf.StatefulPartitionedCall"() {config = "", config_proto = "", executor_type = "", f = …
29 …%result = "tf.PartitionedCall"() {config = "", config_proto = "", executor_type = "", f = @simple_…
42 …%result = "tf.TPUPartitionedCall"(%0) {config = "", config_proto = "", executor_type = "", f = @si…
66 …%0 = "tf.PartitionedCall"() {config = "", config_proto = "", executor_type = "", f = @custom_calle…
67 …%1 = "tf.PartitionedCall"() {config = "", config_proto = "", executor_type = "", f = @custom_calle…
77 …%result = "tf.PartitionedCall"(%arg) {config = "", config_proto = "", executor_type = "", f = @inl…
89 …%result = "tf.StatefulPartitionedCall"() {config = "", config_proto = "", executor_type = "", f = …
112 …%result = "tf.StatefulPartitionedCall"() {config = "", config_proto = "", executor_type = "", f = …
116 …%result_2 = "tf.StatefulPartitionedCall"() {config = "", config_proto = "", executor_type = "", f …
142 …%result = "tf.StatefulPartitionedCall"() {config = "", config_proto = "", executor_type = "", f = …
[all …]
Dtf_saved_model_optimize_global_tensors_interprocedural.mlir19 …%val = "tf.PartitionedCall"(%arg0) {config = "", config_proto = "", executor_type = "", f = @f_cal…
24 …%val = "tf.PartitionedCall"(%arg0) {config = "", config_proto = "", executor_type = "", f = @f_cal…
52 …%val = "tf.PartitionedCall"(%arg0) {config = "", config_proto = "", executor_type = "", f = @f_com…
58 …%val = "tf.PartitionedCall"(%arg0) {config = "", config_proto = "", executor_type = "", f = @f_com…
83 …%val = "tf.PartitionedCall"(%arg0) {config = "", config_proto = "", executor_type = "", f = @f_cal…
84 …%val_2 = "tf.PartitionedCall"(%arg0) {config = "", config_proto = "", executor_type = "", f = @f_c…
110 …%val = "tf.PartitionedCall"(%arg0) {config = "", config_proto = "", executor_type = "", f = @f_cal…
116 …%val = "tf.PartitionedCall"(%arg0) {config = "", config_proto = "", executor_type = "", f = @f_cal…
144 …%val = "tf.StatefulPartitionedCall"(%arg0) {config = "", config_proto = "", executor_type = "", f …
150 …%val = "tf.PartitionedCall"(%arg0) {config = "", config_proto = "", executor_type = "", f = @f_cal…
[all …]
/external/tensorflow/tensorflow/python/data/experimental/service/
Dserver_lib.py159 config_proto = config
161 config_proto = service_config_pb2.DispatcherConfig(
170 config_proto.SerializeToString())
339 config_proto = config
341 config_proto = service_config_pb2.WorkerConfig(
350 config_proto.SerializeToString())
/external/tensorflow/tensorflow/python/distribute/cluster_resolver/
Dcluster_resolver.py38 def get_accelerator_devices(master, config_proto): argument
50 with session.Session(master, config=config_proto) as s:
133 config_proto=None): argument
158 devices = get_accelerator_devices(master, config_proto)
391 config_proto=None): argument
404 del task_type, task_id, config_proto
614 config_proto=None): argument
616 task_type, task_id, config_proto)
/external/tensorflow/tensorflow/python/tpu/
Dtpu_embedding.py1500 def config_proto(self): member in TPUEmbedding
1528 config_proto = elc.TPUEmbeddingConfiguration()
1530 table_descriptor = config_proto.table_descriptor.add()
1590 feature_descriptor = config_proto.feature_descriptor.add()
1602 config_proto.mode = self._mode
1603 config_proto.num_hosts = self._num_hosts
1604 config_proto.num_tensor_cores = self._num_cores
1605 config_proto.sharding_strategy = (
1608 config_proto.pipeline_execution_with_tensor_core = (
1611 config_proto.profile_data_directory = self._profile_data_directory
[all …]
/external/tensorflow/tensorflow/compiler/mlir/tf2xla/
Dmlir_bridge_rollout_policy.cc25 std::optional<ConfigProto> config_proto, in GetMlirBridgeRolloutPolicy() argument
28 switch (GetMlirBridgeRolloutState(config_proto)) { in GetMlirBridgeRolloutPolicy()
42 std::optional<ConfigProto> config_proto, in LogGraphFeatures() argument
/external/tensorflow/tensorflow/compiler/mlir/tensorflow/transforms/
Dgraph_optimization_pass.h32 const ::tensorflow::ConfigProto& config_proto, in GetPassState()
36 return config_proto.experimental().enable_mlir_graph_optimization() in GetPassState()
42 const ::tensorflow::ConfigProto& config_proto, ModuleOp module,
Dconstant_fold.cc149 tensorflow::ConfigProto config_proto; in ConstantFoldFallbackHook() local
152 (*config_proto.mutable_device_count())["CPU"] = 1; in ConstantFoldFallbackHook()
153 (*config_proto.mutable_device_count())["GPU"] = 0; in ConstantFoldFallbackHook()
160 const size_t proto_size = config_proto.ByteSizeLong(); in ConstantFoldFallbackHook()
167 if (!config_proto.SerializeWithCachedSizesToArray( in ConstantFoldFallbackHook()
/external/tensorflow/tensorflow/core/common_runtime/
Dfunction_optimization_registry_test.cc32 Status Run(const DeviceSet& device_set, const ConfigProto& config_proto, in Run() argument
49 ConfigProto config_proto; in TEST() local
51 device_set, config_proto, /*graph=*/nullptr, /*flib_def=*/nullptr, in TEST()
Dfunction_optimization_registration_test.cc30 Status Run(const DeviceSet& device_set, const ConfigProto& config_proto, in Run() argument
48 ConfigProto config_proto; in TEST() local
50 device_set, config_proto, /*graph=*/nullptr, /*flib_def=*/nullptr, in TEST()
Dfunction_optimization_registry_pass_failure_test.cc31 Status Run(const DeviceSet& device_set, const ConfigProto& config_proto, in Run() argument
48 ConfigProto config_proto; in TEST() local
50 device_set, config_proto, /*graph=*/nullptr, /*flib_def=*/nullptr, in TEST()
/external/tensorflow/tensorflow/compiler/tf2xla/
Dmlir_bridge_pass.h34 const DeviceSet* device_set, const ConfigProto& config_proto,
40 Status Run(const ConfigProto& config_proto, mlir::ModuleOp module,
53 const DeviceSet* device_set, const ConfigProto& config_proto,
Dmlir_bridge_pass.cc167 const DeviceSet* device_set, const ConfigProto& config_proto, in GetPassState() argument
185 graph, &function_library, config_proto, in GetPassState()
216 Status MlirBridgePass::Run(const ConfigProto& config_proto, in Run() argument
242 auto pass_state = GetPassState(/*device_set=*/nullptr, config_proto, graph, in Run()
266 const DeviceSet* device_set, const ConfigProto& config_proto, in GetPassState() argument
282 graph, /*function_library=*/&function_library, config_proto, in GetPassState()
/external/tensorflow/tensorflow/compiler/mlir/tfr/integration/
Dgraph_decompose_pass.cc34 const DeviceSet* device_set, const ConfigProto& config_proto, in GetPassState() argument
43 const ConfigProto& config_proto, mlir::ModuleOp module, const Graph& graph, in Run() argument
45 if (GetPassState(/*device_set=*/nullptr, config_proto, graph, in Run()
/external/tensorflow/tensorflow/core/tfrt/eager/
Dtransform_graph_function.cc146 ConfigProto config_proto; in TransformGraphFunction() local
147 config_proto.mutable_experimental()->set_use_tfrt(true); in TransformGraphFunction()
148 config_proto.mutable_graph_options() in TransformGraphFunction()
152 config_proto.mutable_graph_options() in TransformGraphFunction()
160 func_lib_def, device_set, cpu_device, config_proto, in TransformGraphFunction()
/external/tensorflow/tensorflow/compiler/mlir/
Dmlir_graph_optimization_pass.h63 const DeviceSet* device_set, const ConfigProto& config_proto,
67 virtual Status Run(const ConfigProto& config_proto, mlir::ModuleOp module,
119 Status Run(const DeviceSet& device_set, const ConfigProto& config_proto,
146 const DeviceSet* device_set, const ConfigProto& config_proto,
Dmlir_graph_optimization_pass_test.cc38 const ConfigProto& config_proto, const Graph& graph,
40 MOCK_METHOD4(Run, Status(const ConfigProto& config_proto,
53 const ConfigProto& config_proto, const Graph& graph,
68 const ConfigProto& config_proto, const Graph& graph,
73 Status Run(const ConfigProto& config_proto, mlir::ModuleOp module, in Run() argument
/external/tensorflow/tensorflow/compiler/xrt/kernels/
Dtpu_execute_op.cc174 const xrt::XRTExecutionConfig& config_proto, in AllocateOutputTensors() argument
185 config_proto.return_exploded_tuple()); in AllocateOutputTensors()
301 xrt::XRTExecutionConfig config_proto; in DoWork() local
303 config_proto.ParseFromString(execution_config.scalar<tstring>()())); in DoWork()
305 int core_index_in_replica = config_proto.core_index_in_replica(); in DoWork()
306 bool release_inputs = config_proto.release_input_handles(); in DoWork()
307 bool release_compilation = config_proto.release_compilation_handle(); in DoWork()
360 get_buffers_fn, config_proto.execution_instance_key(), in DoWork()
361 config_proto.rng_seed(), tpu_program_group, backend, stream, in DoWork()
367 context, memory_manager.get(), node_context.get(), stream, config_proto, in DoWork()
/external/tensorflow/tensorflow/dtensor/python/
Dmulti_client_util.py87 config_proto = context.get_config()
88 config_proto.experimental.collective_group_leader = collective_leader
98 default_session_config=config_proto,
/external/tensorflow/tensorflow/core/grappler/optimizers/inference/
Dbatch_op_rewriter_test.cc170 ConfigProto config_proto; in TEST_P() local
171 config_proto.mutable_experimental()->mutable_session_metadata()->set_version( in TEST_P()
173 config_proto.mutable_experimental()->mutable_session_metadata()->set_name( in TEST_P()
176 TF_ASSERT_OK(optimizer.InitWithConfig(config_proto, &rewriter_config)); in TEST_P()
/external/tensorflow/tensorflow/compiler/tf2tensorrt/
Dtrt_convert_api.cc67 const ConfigProto& config_proto, grappler::Cluster* cluster, in RunGrappler() argument
88 std::move(*item), config_proto, cpu_device, cluster, out_graph_def)); in RunGrappler()
160 ConfigProto config_proto; in RunTfTrt() local
161 config_proto.mutable_graph_options()->mutable_rewrite_options()->CopyFrom( in RunTfTrt()
164 VLOG(4) << "Setting up Grappler parameters\n" << config_proto.DebugString(); in RunTfTrt()
172 config_proto, cluster.get(), in RunTfTrt()
431 ConfigProto config_proto; in InlineFunctions() local
433 config_proto.mutable_graph_options()->mutable_rewrite_options(); in InlineFunctions()
439 TF_RETURN_IF_ERROR(RunGrappler(meta_graph_def, {}, {}, config_proto, nullptr, in InlineFunctions()

12345678