/external/webrtc/test/pc/e2e/analyzer/video/ |
D | multi_head_queue.h | 34 queues_.push_back(std::deque<T>()); in MultiHeadQueue() 40 for (auto& queue : queues_) { in PushBack() 47 RTC_CHECK_LT(index, queues_.size()); in PopFront() 48 if (queues_[index].empty()) { in PopFront() 51 T out = queues_[index].front(); in PopFront() 52 queues_[index].pop_front(); in PopFront() 58 RTC_CHECK_LT(index, queues_.size()); in Front() 59 if (queues_[index].empty()) { in Front() 62 return queues_[index].front(); in Front() 68 RTC_CHECK_LT(index, queues_.size()); in IsEmpty() [all …]
|
/external/eigen/unsupported/Eigen/CXX11/src/ThreadPool/ |
D | NonBlockingThreadPool.h | 25 queues_(num_threads), in env_() 55 queues_.push_back(new Queue()); in env_() 71 for (size_t i = 0; i < threads_.size(); i++) delete queues_[i]; in ~NonBlockingThreadPoolTempl() local 79 Queue* q = queues_[pt->thread_id]; in Schedule() 84 Queue* q = queues_[Rand(&pt->rand) % queues_.size()]; in Schedule() 126 MaxSizeVector<Queue*> queues_; variable 140 Queue* q = queues_[thread_id]; in WorkerLoop() 175 const size_t size = queues_.size(); in Steal() 180 Task t = queues_[victim]->PopBack(); in Steal() 204 *t = queues_[victim]->PopBack(); in WaitForWork() [all …]
|
/external/tensorflow/tensorflow/core/kernels/ |
D | random_shuffle_queue_op.cc | 64 return queues_[0].size(); in size() 112 queues_[i].reserve(min_after_dequeue_); in Initialize() 118 DCHECK_GT(queues_[0].size(), size_t{0}); in DequeueLocked() 119 int64 index = generator_() % queues_[0].size(); in DequeueLocked() 122 (*tuple).push_back(*queues_[i][index].AccessTensor(ctx)); in DequeueLocked() 123 queues_[i][index] = queues_[i].back(); in DequeueLocked() 124 queues_[i].pop_back(); in DequeueLocked() 146 if (queues_[0].size() < static_cast<size_t>(capacity_)) { in TryEnqueue() 148 queues_[i].push_back(PersistentTensor(tuple[i])); in TryEnqueue() 205 while (queues_[0].size() < static_cast<size_t>(capacity_)) { in TryEnqueueMany() [all …]
|
D | fifo_queue.cc | 42 DCHECK_GT(queues_[0].size(), size_t{0}); in DequeueLocked() 45 (*tuple).push_back(*queues_[i][0].AccessTensor(ctx)); in DequeueLocked() 46 queues_[i].pop_front(); in DequeueLocked() 68 if (queues_[0].size() < static_cast<size_t>(capacity_)) { in TryEnqueue() 70 queues_[i].push_back(PersistentTensor(tuple[i])); in TryEnqueue() 127 while (queues_[0].size() < static_cast<size_t>(capacity_)) { in TryEnqueueMany() 136 queues_[i].push_back(element); in TryEnqueueMany() 168 const int64 queue_size = queues_[0].size(); in TryDequeue() 260 int64 queue_size = queues_[0].size(); in TryDequeueMany() 282 queues_[j].push_front(element); in TryDequeueMany() [all …]
|
D | typed_queue.h | 44 std::vector<SubQueue> queues_ TF_GUARDED_BY(mu_); 67 queues_.reserve(num_components()); in Initialize() 69 queues_.push_back(SubQueue()); in Initialize() 114 for (const auto& sq : queues_) { in MemoryUsed()
|
D | priority_queue.cc | 63 DCHECK_GT(queues_[0].size(), 0); in DequeueLocked() 66 PersistentTensor persistent_tensor = gtl::ConsumeTop(&queues_[i]).second; in DequeueLocked() 89 if (queues_[0].size() < static_cast<size_t>(capacity_)) { in TryEnqueue() 99 queues_[i].emplace(priority, PersistentTensor(tuple[i])); in TryEnqueue() 156 while (queues_[0].size() < static_cast<size_t>(capacity_)) { in TryEnqueueMany() 179 queues_[i].emplace(priority, element); in TryEnqueueMany() 212 const int32 s = queues_[0].size(); in TryDequeue() 304 int32 s = queues_[0].size(); in TryDequeueMany()
|
D | padding_fifo_queue.cc | 102 int32 queue_size = queues_[0].size(); in TryDequeueMany() 120 queues_[j].push_front(element); in TryDequeueMany() 124 if (allow_small_batch && !queues_[0].empty()) { in TryDequeueMany() 126 queue_size = queues_[0].size(); in TryDequeueMany()
|
D | fifo_queue.h | 54 return queues_[0].size(); in size()
|
D | priority_queue.h | 73 return queues_[0].size(); in size()
|
/external/libchrome/base/task/sequence_manager/ |
D | graceful_queue_shutdown_helper.cc | 23 queues_.push_back(std::move(task_queue)); in GracefullyShutdownTaskQueue() 29 queues_.clear(); in OnSequenceManagerDeleted() 36 result.swap(queues_); in TakeQueues()
|
D | sequence_manager_perftest.cc | 70 queues_.clear(); in TearDown() 87 queues_.push_back(manager_->CreateTaskQueue<TestTaskQueue>( in Initialize() 125 queues_[queue]->PostDelayedTask(FROM_HERE, delayed_task_closure_, in TestDelayedTask() 156 queues_[queue]->PostTask(FROM_HERE, immediate_task_closure_); in TestImmediateTask() 202 std::vector<scoped_refptr<SingleThreadTaskRunner>> queues_; member in base::sequence_manager::SequenceManagerPerfTest
|
D | graceful_queue_shutdown_helper.h | 41 std::vector<std::unique_ptr<internal::TaskQueueImpl>> queues_; variable
|
/external/perfetto/src/trace_processor/ |
D | trace_sorter.cc | 96 for (size_t i = 0; i < queues_.size(); i++) { in SortAndExtractEventsBeyondWindow() 97 auto& queue = queues_[i]; in SortAndExtractEventsBeyondWindow() 117 Queue& queue = queues_[min_queue_idx]; in SortAndExtractEventsBeyondWindow() 168 for (auto& q : queues_) in SortAndExtractEventsBeyondWindow() 186 for (auto& q : queues_) { in SortAndExtractEventsBeyondWindow()
|
D | trace_sorter.h | 160 queues_.resize(0); in ExtractEventsForced() 227 if (PERFETTO_UNLIKELY(index >= queues_.size())) in GetQueue() 228 queues_.resize(index + 1); in GetQueue() 229 return &queues_[index]; in GetQueue() 248 std::vector<Queue> queues_; variable
|
/external/tensorflow/tensorflow/core/kernels/batching_util/ |
D | shared_batch_scheduler.h | 225 QueueList queues_ TF_GUARDED_BY(mu_); 444 if (queues_.empty()) { in ~SharedBatchScheduler() 509 queues_.push_back(std::move(internal_queue)); in AddQueue() 510 if (next_queue_to_schedule_ == queues_.end()) { in AddQueue() 511 next_queue_to_schedule_ = queues_.begin(); in AddQueue() 520 : options_(options), next_queue_to_schedule_(queues_.end()) { in SharedBatchScheduler() 542 const int num_queues = queues_.size(); in ThreadLogic() 546 DCHECK(next_queue_to_schedule_ != queues_.end()); in ThreadLogic() 565 next_queue_to_schedule_ = queues_.erase(next_queue_to_schedule_); in ThreadLogic() 569 if (next_queue_to_schedule_ == queues_.end() && !queues_.empty()) { in ThreadLogic() [all …]
|
/external/vulkan-validation-layers/tests/ |
D | vktestbinding.cpp | 329 queues_[GRAPHICS].push_back(queue_storage.back().get()); in init_queues() 333 queues_[COMPUTE].push_back(queue_storage.back().get()); in init_queues() 337 queues_[DMA].push_back(queue_storage.back().get()); in init_queues() 344 EXPECT(!queues_[GRAPHICS].empty() || !queues_[COMPUTE].empty()); in init_queues()
|
D | vktestbinding.h | 214 const std::vector<Queue *> &graphics_queues() const { return queues_[GRAPHICS]; } in graphics_queues() 215 const std::vector<Queue *> &compute_queues() { return queues_[COMPUTE]; } in compute_queues() 216 const std::vector<Queue *> &dma_queues() { return queues_[DMA]; } in dma_queues() 281 std::vector<Queue *> queues_[QUEUE_COUNT]; variable
|