/external/tensorflow/tensorflow/core/kernels/batching_util/ |
D | batch_scheduler_test.cc | 49 batch.AddTask(std::unique_ptr<FakeTask>(task0)); in TEST() 58 batch.AddTask(std::unique_ptr<FakeTask>(task1)); in TEST() 85 batch.AddTask(std::unique_ptr<FakeTask>(new FakeTask(3))); in TEST() 99 batch->AddTask(std::unique_ptr<FakeTask>(new FakeTask(3))); in TEST() 120 batch.AddTask(std::unique_ptr<FakeTask>(task0)); in TEST() 123 batch.AddTask(std::unique_ptr<FakeTask>(task1)); in TEST()
|
D | batch_scheduler.h | 89 void AddTask(std::unique_ptr<TaskType> task); 220 void Batch<TaskType>::AddTask(std::unique_ptr<TaskType> task) { in AddTask() function
|
D | shared_batch_scheduler.h | 780 task_handle_batches_.back()->AddTask(std::move(task_handles[i])); in ScheduleWithLazySplit() 859 batches_.back()->AddTask(std::move(output_tasks[i])); in ScheduleWithoutOrEagerSplit() 1006 batch_to_schedule->AddTask(std::move(task_handles[i]->GetSplitTask())); in ScheduleBatch()
|
D | serial_device_batch_scheduler.h | 510 current_batch_->AddTask(std::move(*task)); in Schedule()
|
D | adaptive_shared_batch_scheduler.h | 772 current_batch_->AddTask(std::move(task)); in Schedule()
|
/external/tensorflow/tensorflow/core/tfrt/runtime/ |
D | work_queue_interface.cc | 33 void AddTask(tfrt::TaskFunction work) override { in AddTask() function in tensorflow::tfrt_stub::__anon0e3943c60111::DefaultWorkQueueWrapper 41 work_queue_->AddTask(std::move(wrapped_work)); in AddTask() 44 void AddTask(const tfrt::ExecutionContext& exec_ctx, in AddTask() function in tensorflow::tfrt_stub::__anon0e3943c60111::DefaultWorkQueueWrapper 53 work_queue_->AddTask(exec_ctx, std::move(wrapped_work)); in AddTask()
|
D | tf_threadpool_concurrent_work_queue.cc | 44 void TfThreadPoolWorkQueue::AddTask(tfrt::TaskFunction work) { in AddTask() function in tensorflow::tfrt_stub::TfThreadPoolWorkQueue 54 AddTask(std::move(work)); in AddBlockingTask()
|
D | tf_threadpool_concurrent_work_queue_test.cc | 90 tf_threadpool_cwq_.AddTask(tfrt::TaskFunction([&n, &m, &latch] { in TEST_F() 107 tf_threadpool_cwq_.AddTask(tfrt::TaskFunction([&n, &m, &latch] { in TEST_F()
|
D | tf_threadpool_concurrent_work_queue.h | 53 void AddTask(::tfrt::TaskFunction work) override;
|
/external/tensorflow/tensorflow/core/tfrt/run_handler_thread_pool/ |
D | run_handler_concurrent_work_queue.cc | 85 void RunHandlerThreadWorkQueue::AddTask(TaskFunction work) { in AddTask() function in tfrt::tf::RunHandlerThreadWorkQueue 86 non_blocking_work_queue_.AddTask(std::move(work)); in AddTask() 89 void RunHandlerThreadWorkQueue::AddTask(const ExecutionContext& exec_ctx, in AddTask() function in tfrt::tf::RunHandlerThreadWorkQueue
|
D | run_handler_concurrent_work_queue.h | 98 void AddTask(TaskFunction work) override; 100 void AddTask(const ExecutionContext& exec_ctx, TaskFunction work) override;
|
D | run_handler_concurrent_work_queue_test.cc | 121 queue_->AddTask(*exec_ctx_, TaskFunction([&n, &m] { in TEST_F() 134 queue_->AddTask(TaskFunction([&n, &m] { in TEST_F() 147 queue_->AddTask(*exec_ctx_, TaskFunction([&n, &m] { in TEST_F()
|
/external/tensorflow/tensorflow/lite/delegates/gpu/metal/ |
D | compute_task.h | 55 absl::Status AddTask(ComputeTask* task);
|
D | compute_task.cc | 75 absl::Status ComputeTask::AddTask(ComputeTask* task) { in AddTask() function in tflite::gpu::metal::ComputeTask
|
D | inference_context.cc | 122 return dst->task.AddTask(&src->task); in MergeNodes()
|
/external/libchrome/base/ |
D | observer_list_unittest.cc | 130 base::BindOnce(&AddRemoveThread::AddTask, weak_factory_.GetWeakPtr())); in ThreadMain() 145 void AddTask() { in AddTask() function in base::__anon3b97dba10111::AddRemoveThread 164 base::BindOnce(&AddRemoveThread::AddTask, weak_factory_.GetWeakPtr())); in AddTask()
|
/external/tensorflow/tensorflow/core/kernels/data/experimental/ |
D | data_service_dataset_op.cc | 550 Status AddTask(const TaskInfo& task_info) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) { in AddTask() function in tensorflow::data::DataServiceDatasetOp::Dataset::Iterator 650 Status s = AddTask(it->second); in UpdateTasks()
|