Home
last modified time | relevance | path

Searched refs:queues_ (Results 1 – 15 of 15) sorted by relevance

/external/tensorflow/tensorflow/core/kernels/
Drandom_shuffle_queue_op.cc65 return queues_[0].size(); in size()
113 queues_[i].reserve(min_after_dequeue_); in Initialize()
119 DCHECK_GT(queues_[0].size(), size_t{0}); in DequeueLocked()
120 int64_t index = generator_() % queues_[0].size(); in DequeueLocked()
123 (*tuple).push_back(queues_[i][index]); in DequeueLocked()
124 queues_[i][index] = queues_[i].back(); in DequeueLocked()
125 queues_[i].pop_back(); in DequeueLocked()
147 if (queues_[0].size() < static_cast<size_t>(capacity_)) { in TryEnqueue()
149 queues_[i].push_back(tuple[i]); in TryEnqueue()
207 while (queues_[0].size() < static_cast<size_t>(capacity_)) { in TryEnqueueMany()
[all …]
Dfifo_queue.cc43 DCHECK_GT(queues_[0].size(), size_t{0}); in DequeueLocked()
46 (*tuple).push_back(queues_[i][0]); in DequeueLocked()
47 queues_[i].pop_front(); in DequeueLocked()
69 if (queues_[0].size() < static_cast<size_t>(capacity_)) { in TryEnqueue()
71 queues_[i].push_back(tuple[i]); in TryEnqueue()
127 while (queues_[0].size() < static_cast<size_t>(capacity_)) { in TryEnqueueMany()
136 queues_[i].push_back(element); in TryEnqueueMany()
168 const int64_t queue_size = queues_[0].size(); in TryDequeue()
260 int64_t queue_size = queues_[0].size(); in TryDequeueMany()
282 queues_[j].push_front(element); in TryDequeueMany()
[all …]
Dtyped_queue.h44 std::vector<SubQueue> queues_ TF_GUARDED_BY(mu_);
67 queues_.reserve(num_components()); in Initialize()
69 queues_.push_back(SubQueue()); in Initialize()
110 for (const auto& sq : queues_) { in MemoryUsed()
Dpriority_queue.cc64 DCHECK_GT(queues_[0].size(), 0); in DequeueLocked()
67 Tensor tensor = gtl::ConsumeTop(&queues_[i]).second; in DequeueLocked()
90 if (queues_[0].size() < static_cast<size_t>(capacity_)) { in TryEnqueue()
100 queues_[i].emplace(priority, tuple[i]); in TryEnqueue()
155 while (queues_[0].size() < static_cast<size_t>(capacity_)) { in TryEnqueueMany()
177 queues_[i].emplace(priority, element); in TryEnqueueMany()
210 const int32_t s = queues_[0].size(); in TryDequeue()
302 int32_t s = queues_[0].size(); in TryDequeueMany()
Dpadding_fifo_queue.cc102 int32_t queue_size = queues_[0].size(); in TryDequeueMany()
120 queues_[j].push_front(element); in TryDequeueMany()
124 if (allow_small_batch && !queues_[0].empty()) { in TryDequeueMany()
126 queue_size = queues_[0].size(); in TryDequeueMany()
Dfifo_queue.h54 return queues_[0].size(); in size()
Dpriority_queue.h73 return queues_[0].size(); in size()
/external/libchrome/base/task/sequence_manager/
Dgraceful_queue_shutdown_helper.cc23 queues_.push_back(std::move(task_queue)); in GracefullyShutdownTaskQueue()
29 queues_.clear(); in OnSequenceManagerDeleted()
36 result.swap(queues_); in TakeQueues()
Dsequence_manager_perftest.cc70 queues_.clear(); in TearDown()
87 queues_.push_back(manager_->CreateTaskQueue<TestTaskQueue>( in Initialize()
125 queues_[queue]->PostDelayedTask(FROM_HERE, delayed_task_closure_, in TestDelayedTask()
156 queues_[queue]->PostTask(FROM_HERE, immediate_task_closure_); in TestImmediateTask()
202 std::vector<scoped_refptr<SingleThreadTaskRunner>> queues_; member in base::sequence_manager::SequenceManagerPerfTest
Dgraceful_queue_shutdown_helper.h41 std::vector<std::unique_ptr<internal::TaskQueueImpl>> queues_; variable
/external/perfetto/src/trace_processor/sorter/
Dtrace_sorter.h171 for (const auto& queue : queues_) { in ExtractEventsForced()
174 queues_.clear(); in ExtractEventsForced()
301 if (PERFETTO_UNLIKELY(index >= queues_.size())) in GetQueue()
302 queues_.resize(index + 1); in GetQueue()
303 return &queues_[index]; in GetQueue()
351 std::vector<Queue> queues_; variable
Dtrace_sorter.cc47 for (auto& queue : queues_) { in ~TraceSorter()
110 for (size_t i = 0; i < queues_.size(); i++) { in SortAndExtractEventsUntilAllocId()
111 auto& queue = queues_[i]; in SortAndExtractEventsUntilAllocId()
128 Queue& queue = queues_[min_queue_idx]; in SortAndExtractEventsUntilAllocId()
/external/tensorflow/tensorflow/core/kernels/batching_util/
Dshared_batch_scheduler.h261 QueueList queues_ TF_GUARDED_BY(mu_);
537 if (queues_.empty()) { in ~SharedBatchScheduler()
629 queues_.push_back(std::move(internal_queue)); in AddQueueAfterRewritingOptions()
630 if (next_queue_to_schedule_ == queues_.end()) { in AddQueueAfterRewritingOptions()
631 next_queue_to_schedule_ = queues_.begin(); in AddQueueAfterRewritingOptions()
640 : options_(options), next_queue_to_schedule_(queues_.end()) { in SharedBatchScheduler()
668 const int num_queues = queues_.size(); in GetNextWorkItem_Locked()
672 DCHECK(next_queue_to_schedule_ != queues_.end()); in GetNextWorkItem_Locked()
692 next_queue_to_schedule_ = queues_.erase(next_queue_to_schedule_); in GetNextWorkItem_Locked()
696 if (next_queue_to_schedule_ == queues_.end() && !queues_.empty()) { in GetNextWorkItem_Locked()
[all …]
/external/vulkan-validation-layers/tests/
Dvktestbinding.cpp329 queues_[GRAPHICS].push_back(queue_storage.back().get()); in init_queues()
333 queues_[COMPUTE].push_back(queue_storage.back().get()); in init_queues()
337 queues_[DMA].push_back(queue_storage.back().get()); in init_queues()
344 EXPECT(!queues_[GRAPHICS].empty() || !queues_[COMPUTE].empty()); in init_queues()
Dvktestbinding.h214 const std::vector<Queue *> &graphics_queues() const { return queues_[GRAPHICS]; } in graphics_queues()
215 const std::vector<Queue *> &compute_queues() { return queues_[COMPUTE]; } in compute_queues()
216 const std::vector<Queue *> &dma_queues() { return queues_[DMA]; } in dma_queues()
281 std::vector<Queue *> queues_[QUEUE_COUNT]; variable