Home
last modified time | relevance | path

Searched refs:config_proto (Results 1 – 25 of 144) sorted by relevance

123456

/external/tensorflow/tensorflow/core/grappler/optimizers/
Dmeta_optimizer_test.cc146 ConfigProto config_proto; in TEST_F() local
148 *config_proto.mutable_graph_options()->mutable_rewrite_options(); in TEST_F()
152 MetaOptimizer optimizer(nullptr, config_proto); in TEST_F()
165 ConfigProto config_proto; in TEST_F() local
167 *config_proto.mutable_graph_options()->mutable_rewrite_options(); in TEST_F()
173 MetaOptimizer optimizer(nullptr, config_proto); in TEST_F()
187 ConfigProto config_proto; in TEST_F() local
189 *config_proto.mutable_graph_options()->mutable_rewrite_options(); in TEST_F()
195 MetaOptimizer optimizer(nullptr, config_proto); in TEST_F()
208 ConfigProto config_proto; in TEST_F() local
[all …]
Dcustom_graph_optimizer.h35 const ConfigProto& config_proto,
37 config_proto_ = config_proto;
/external/tensorflow/tensorflow/compiler/mlir/
Dmlir_bridge_rollout_policy.cc23 absl::optional<ConfigProto> config_proto) { in GetUserRequest() argument
37 if (!config_proto.has_value()) { in GetUserRequest()
47 if (config_proto.value().experimental().enable_mlir_bridge()) { in GetUserRequest()
50 return config_proto.value().experimental().mlir_bridge_rollout(); in GetUserRequest()
54 const tensorflow::Graph& graph, absl::optional<ConfigProto> config_proto, in GetMlirBridgeRolloutPolicy() argument
56 switch (GetUserRequest(config_proto)) { in GetMlirBridgeRolloutPolicy()
Dmlir_graph_optimization_pass.h67 const DeviceSet* device_set, const ConfigProto& config_proto,
70 virtual Status Run(const ConfigProto& config_proto, mlir::ModuleOp module,
134 Status Run(const DeviceSet& device_set, const ConfigProto& config_proto,
161 const ConfigProto& config_proto,
/external/tensorflow/tensorflow/python/kernel_tests/signal/
Dtest_util.py29 def grappler_optimize(graph, fetches=None, config_proto=None): argument
43 if config_proto is None:
44 config_proto = config_pb2.ConfigProto()
45 config_proto.graph_options.rewrite_options.min_graph_nodes = -1
50 return tf_optimizer.OptimizeGraph(config_proto, metagraph)
/external/tensorflow/tensorflow/python/grappler/
Dtf_optimizer.py27 def OptimizeGraph(config_proto, argument
50 if not isinstance(config_proto, config_pb2.ConfigProto):
52 type(config_proto))
56 config_proto.SerializeToString(),
Dtf_optimizer_wrapper.cc68 tensorflow::ConfigProto config_proto; in PYBIND11_MODULE() local
69 if (!config_proto.ParseFromString( in PYBIND11_MODULE()
96 tensorflow::grappler::MetaOptimizer optimizer(cpu_device, config_proto); in PYBIND11_MODULE()
/external/tensorflow/tensorflow/compiler/tf2xla/
Dmlir_bridge_pass.cc87 const DeviceSet* device_set, const ConfigProto& config_proto, in GetPassState() argument
97 graph, config_proto, /*uses_uninitialized_resource_args=*/false); in GetPassState()
115 Status MlirBridgePass::Run(const ConfigProto& config_proto, in Run() argument
119 if (GetPassState(/*device_set=*/nullptr, config_proto, graph) == in Run()
142 const ConfigProto& config_proto, in IsEnabled() argument
152 graph, config_proto, /*uses_uninitialized_resource_args=*/false); in IsEnabled()
Dmlir_bridge_pass.h34 const ConfigProto& config_proto,
39 Status Run(const ConfigProto& config_proto, mlir::ModuleOp module,
50 bool IsEnabled(const DeviceSet* device_set, const ConfigProto& config_proto,
/external/tensorflow/tensorflow/compiler/mlir/tensorflow/translate/
Dupgrade_graph.cc136 ConfigProto config_proto; in RunGrappler() local
138 config_proto.mutable_experimental()->set_use_tfrt(true); in RunGrappler()
139 config_proto.mutable_graph_options() in RunGrappler()
143 config_proto.mutable_graph_options() in RunGrappler()
148 config_proto.mutable_graph_options() in RunGrappler()
175 std::move(*item), config_proto, cpu_device, &cluster, &output_graph_def)); in RunGrappler()
/external/tensorflow/tensorflow/python/distribute/cluster_resolver/
Dcluster_resolver.py42 def get_accelerator_devices(master, config_proto): argument
54 with session.Session(master, config=config_proto) as s:
137 config_proto=None): argument
162 devices = get_accelerator_devices(master, config_proto)
395 config_proto=None): argument
408 del task_type, task_id, config_proto
618 config_proto=None): argument
620 task_type, task_id, config_proto)
/external/tensorflow/tensorflow/compiler/mlir/tensorflow/tests/
Dtf_saved_model_optimize_global_tensors_interprocedural.mlir19 …%val = "tf.PartitionedCall"(%arg0) {config = "", config_proto = "", executor_type = "", f = @f_cal…
24 …%val = "tf.PartitionedCall"(%arg0) {config = "", config_proto = "", executor_type = "", f = @f_cal…
52 …%val = "tf.PartitionedCall"(%arg0) {config = "", config_proto = "", executor_type = "", f = @f_com…
58 …%val = "tf.PartitionedCall"(%arg0) {config = "", config_proto = "", executor_type = "", f = @f_com…
83 …%val = "tf.PartitionedCall"(%arg0) {config = "", config_proto = "", executor_type = "", f = @f_cal…
84 …%val_2 = "tf.PartitionedCall"(%arg0) {config = "", config_proto = "", executor_type = "", f = @f_c…
110 …%val = "tf.PartitionedCall"(%arg0) {config = "", config_proto = "", executor_type = "", f = @f_cal…
116 …%val = "tf.PartitionedCall"(%arg0) {config = "", config_proto = "", executor_type = "", f = @f_cal…
144 …%val = "tf.StatefulPartitionedCall"(%arg0) {config = "", config_proto = "", executor_type = "", f …
150 …%val = "tf.PartitionedCall"(%arg0) {config = "", config_proto = "", executor_type = "", f = @f_cal…
[all …]
Dinlining.mlir14 …%result = "tf.StatefulPartitionedCall"() {config = "", config_proto = "", executor_type = "", f = …
31 …%result = "tf.TPUPartitionedCall"(%0) {config = "", config_proto = "", executor_type = "", f = @si…
48 …%result = "tf.PartitionedCall"(%arg) {config = "", config_proto = "", executor_type = "", f = @inl…
73 …%result = "tf.StatefulPartitionedCall"() {config = "", config_proto = "", executor_type = "", f = …
77 …%result_2 = "tf.StatefulPartitionedCall"() {config = "", config_proto = "", executor_type = "", f …
/external/tensorflow/tensorflow/python/tpu/
Dtpu_embedding.py1419 def config_proto(self): member in TPUEmbedding
1447 config_proto = elc.TPUEmbeddingConfiguration()
1449 table_descriptor = config_proto.table_descriptor.add()
1503 config_proto.mode = self._mode
1504 config_proto.batch_size_per_tensor_core = self._batch_size_per_core
1505 config_proto.num_hosts = self._num_hosts
1506 config_proto.num_tensor_cores = self._num_cores
1507 config_proto.sharding_strategy = (
1511 config_proto.pipeline_execution_with_tensor_core = (
1514 config_proto.profile_data_directory = self._profile_data_directory
[all …]
/external/tensorflow/tensorflow/compiler/mlir/tensorflow/transforms/
Dgraph_optimization_pass.h32 const ::tensorflow::ConfigProto& config_proto, in GetPassState()
34 return config_proto.experimental().enable_mlir_graph_optimization() in GetPassState()
39 ::tensorflow::Status Run(const ::tensorflow::ConfigProto& config_proto,
/external/tensorflow/tensorflow/core/common_runtime/
Dfunction_optimization_registry_pass_failure_test.cc31 Status Run(const DeviceSet& device_set, const ConfigProto& config_proto, in Run() argument
48 ConfigProto config_proto; in TEST() local
50 device_set, config_proto, /*graph=*/nullptr, /*flib_def=*/nullptr, in TEST()
Dfunction_optimization_registration_test.cc30 Status Run(const DeviceSet& device_set, const ConfigProto& config_proto, in Run() argument
48 ConfigProto config_proto; in TEST() local
50 device_set, config_proto, /*graph=*/nullptr, /*flib_def=*/nullptr, in TEST()
Dfunction_optimization_registry_test.cc32 Status Run(const DeviceSet& device_set, const ConfigProto& config_proto, in Run() argument
49 ConfigProto config_proto; in TEST() local
51 device_set, config_proto, /*graph=*/nullptr, /*flib_def=*/nullptr, in TEST()
Dfunction_optimization_registry_no_pass_test.cc30 ConfigProto config_proto; in TEST() local
32 device_set, config_proto, /*graph=*/nullptr, /*flib_def=*/nullptr, in TEST()
/external/tensorflow/tensorflow/compiler/mlir/tfr/integration/
Dgraph_decompose_pass.cc34 const DeviceSet* device_set, const ConfigProto& config_proto, in GetPassState() argument
41 Status GraphDecomposePass::Run(const ConfigProto& config_proto, in Run() argument
43 if (GetPassState(/*device_set=*/nullptr, config_proto, graph) == in Run()
Dgraph_decompose_pass.h37 const DeviceSet* device_set, const ConfigProto& config_proto,
42 Status Run(const ConfigProto& config_proto, mlir::ModuleOp module,
/external/tensorflow/tensorflow/compiler/jit/
Dxla_kernel_creator.cc102 absl::optional<ConfigProto> config_proto; in CreateXlaKernel() local
103 if (flr->config_proto()) { in CreateXlaKernel()
104 config_proto = *flr->config_proto(); in CreateXlaKernel()
114 *fbody->graph, config_proto, /*uses_uninitialized_resource_args=*/true); in CreateXlaKernel()
/external/tensorflow/tensorflow/python/data/experimental/service/
Dserver_lib.py136 config_proto = service_config_pb2.DispatcherConfig(
144 config_proto.SerializeToString())
304 config_proto = service_config_pb2.WorkerConfig(
313 config_proto.SerializeToString())
/external/tensorflow/tensorflow/compiler/xrt/kernels/
Dtpu_execute_op.cc173 const xrt::XRTExecutionConfig& config_proto, in AllocateOutputTensors() argument
184 config_proto.return_exploded_tuple()); in AllocateOutputTensors()
299 xrt::XRTExecutionConfig config_proto; in DoWork() local
301 config_proto.ParseFromString(execution_config.scalar<tstring>()())); in DoWork()
303 int core_index_in_replica = config_proto.core_index_in_replica(); in DoWork()
304 bool release_inputs = config_proto.release_input_handles(); in DoWork()
305 bool release_compilation = config_proto.release_compilation_handle(); in DoWork()
358 get_buffers_fn, config_proto.execution_instance_key(), in DoWork()
359 config_proto.rng_seed(), tpu_program_group, backend, stream, in DoWork()
365 context, memory_manager.get(), node_context.get(), stream, config_proto, in DoWork()
/external/perfetto/src/tracing/consumer_api_deprecated/
Dconsumer_api_deprecated.cc321 TraceConfig config_proto; in Create() local
322 bool parsed = config_proto.ParseFromArray(config_proto_buf, config_len); in Create()
328 if (!config_proto.duration_ms()) { in Create()
336 callback_arg, config_proto); in Create()
400 PERFETTO_EXPORTED_API Handle Create(const void* config_proto, in Create() argument
404 return TracingController::GetInstance()->Create(config_proto, config_len, in Create()

123456