/external/clang/test/SemaCXX/ |
D | warn-thread-safety-analysis.cpp | 972 void func1(int y) LOCKS_EXCLUDED(mu_); 973 template <typename T> void func2(T x) LOCKS_EXCLUDED(mu_); 975 Mutex mu_; member in thread_annot_lock_38::Foo 991 Mutex *mu_; member in thread_annot_lock_43::Foo 997 int GetA() EXCLUSIVE_LOCKS_REQUIRED(foo_->mu_) { return a_; } in GetA() 998 int a_ GUARDED_BY(foo_->mu_); 1006 fb->foo_->mu_->Lock(); in main() 1008 fb->foo_->mu_->Unlock(); in main() 1124 Mutex mu_; member in thread_annot_lock_68_modified::Bar 1131 mu_.Lock(); in func() [all …]
|
/external/tensorflow/tensorflow/core/framework/ |
D | model.h | 164 mutex_lock l(mu_); in ~Node() 174 mutex_lock l(node->mu_); in ~Node() 186 void add_input(std::shared_ptr<Node> node) TF_LOCKS_EXCLUDED(mu_) { in add_input() 187 mutex_lock l(mu_); in add_input() 192 void add_processing_time(int64 delta) TF_LOCKS_EXCLUDED(mu_) { in add_processing_time() 197 bool autotune() const TF_LOCKS_EXCLUDED(mu_) { in autotune() 202 int64 buffered_bytes() const TF_LOCKS_EXCLUDED(mu_) { in buffered_bytes() 207 int64 buffered_elements() const TF_LOCKS_EXCLUDED(mu_) { in buffered_elements() 212 int64 bytes_consumed() const TF_LOCKS_EXCLUDED(mu_) { in bytes_consumed() 217 int64 bytes_produced() const TF_LOCKS_EXCLUDED(mu_) { in bytes_produced() [all …]
|
/external/llvm-project/clang/test/SemaCXX/ |
D | warn-thread-safety-analysis.cpp | 1003 void func1(int y) LOCKS_EXCLUDED(mu_); 1004 template <typename T> void func2(T x) LOCKS_EXCLUDED(mu_); 1006 Mutex mu_; member in thread_annot_lock_38::Foo 1022 Mutex *mu_; member in thread_annot_lock_43::Foo 1028 int GetA() EXCLUSIVE_LOCKS_REQUIRED(foo_->mu_) { return a_; } in GetA() 1029 int a_ GUARDED_BY(foo_->mu_); 1037 fb->foo_->mu_->Lock(); in main() 1039 fb->foo_->mu_->Unlock(); in main() 1155 Mutex mu_; member in thread_annot_lock_68_modified::Bar 1162 mu_.Lock(); in func() [all …]
|
/external/rust/crates/grpcio-sys/grpc/src/core/lib/gprpp/ |
D | sync.h | 42 Mutex() { gpr_mu_init(&mu_); } in Mutex() 43 ~Mutex() { gpr_mu_destroy(&mu_); } in ~Mutex() 48 gpr_mu* get() { return &mu_; } in get() 49 const gpr_mu* get() const { return &mu_; } in get() 52 gpr_mu mu_; 58 explicit MutexLock(Mutex* mu) : mu_(mu->get()) { gpr_mu_lock(mu_); } in MutexLock() 59 explicit MutexLock(gpr_mu* mu) : mu_(mu) { gpr_mu_lock(mu_); } in MutexLock() 60 ~MutexLock() { gpr_mu_unlock(mu_); } in ~MutexLock() 66 gpr_mu* const mu_; 71 explicit ReleasableMutexLock(Mutex* mu) : mu_(mu->get()) { gpr_mu_lock(mu_); } in ReleasableMutexLock() [all …]
|
D | fork.cc | 63 gpr_mu_init(&mu_); in ExecCtxState() 74 gpr_mu_lock(&mu_); in IncExecCtxCount() 77 gpr_cv_wait(&cv_, &mu_, gpr_inf_future(GPR_CLOCK_REALTIME)); in IncExecCtxCount() 80 gpr_mu_unlock(&mu_); in IncExecCtxCount() 93 gpr_mu_lock(&mu_); in BlockExecCtx() 95 gpr_mu_unlock(&mu_); in BlockExecCtx() 102 gpr_mu_lock(&mu_); in AllowExecCtx() 106 gpr_mu_unlock(&mu_); in AllowExecCtx() 110 gpr_mu_destroy(&mu_); in ~ExecCtxState() 116 gpr_mu mu_; member in grpc_core::internal::ExecCtxState [all …]
|
/external/tensorflow/tensorflow/core/data/service/ |
D | dispatcher_impl.h | 156 TF_EXCLUSIVE_LOCKS_REQUIRED(mu_); 161 TF_EXCLUSIVE_LOCKS_REQUIRED(mu_); 165 int64& dataset_id) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_); 171 TF_LOCKS_EXCLUDED(mu_); 178 TF_EXCLUSIVE_LOCKS_REQUIRED(mu_); 185 int64& job_client_id) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_); 192 TF_EXCLUSIVE_LOCKS_REQUIRED(mu_); 199 TF_EXCLUSIVE_LOCKS_REQUIRED(mu_); 208 TF_LOCKS_EXCLUDED(mu_); 211 TF_LOCKS_EXCLUDED(mu_); [all …]
|
D | worker_impl.h | 70 Status SendTaskUpdates() TF_LOCKS_EXCLUDED(mu_); 73 TF_EXCLUSIVE_LOCKS_REQUIRED(mu_); 76 void TaskCompletionThread() TF_LOCKS_EXCLUDED(mu_); 78 void HeartbeatThread() TF_LOCKS_EXCLUDED(mu_); 80 Status Heartbeat() TF_LOCKS_EXCLUDED(mu_); 88 mutex mu_; variable 90 absl::flat_hash_map<int64, std::unique_ptr<Task>> tasks_ TF_GUARDED_BY(mu_); 92 absl::flat_hash_set<int64> finished_tasks_ TF_GUARDED_BY(mu_); 94 absl::flat_hash_set<int64> pending_completed_tasks_ TF_GUARDED_BY(mu_); 95 bool cancelled_ TF_GUARDED_BY(mu_) = false; [all …]
|
/external/rust/crates/grpcio-sys/grpc/include/grpcpp/impl/codegen/ |
D | sync.h | 49 Mutex() { g_core_codegen_interface->gpr_mu_init(&mu_); } in Mutex() 50 ~Mutex() { g_core_codegen_interface->gpr_mu_destroy(&mu_); } in ~Mutex() 55 gpr_mu* get() { return &mu_; } in get() 56 const gpr_mu* get() const { return &mu_; } in get() 60 gpr_mu mu_; member 71 explicit MutexLock(Mutex* mu) : mu_(mu->get()) { in MutexLock() 72 g_core_codegen_interface->gpr_mu_lock(mu_); in MutexLock() 74 explicit MutexLock(gpr_mu* mu) : mu_(mu) { in MutexLock() 75 g_core_codegen_interface->gpr_mu_lock(mu_); in MutexLock() 77 ~MutexLock() { g_core_codegen_interface->gpr_mu_unlock(mu_); } in ~MutexLock() [all …]
|
/external/rust/crates/grpcio-sys/grpc/spm-cpp-include/grpcpp/impl/codegen/ |
D | sync.h | 49 Mutex() { g_core_codegen_interface->gpr_mu_init(&mu_); } in Mutex() 50 ~Mutex() { g_core_codegen_interface->gpr_mu_destroy(&mu_); } in ~Mutex() 55 gpr_mu* get() { return &mu_; } in get() 56 const gpr_mu* get() const { return &mu_; } in get() 60 gpr_mu mu_; member 71 explicit MutexLock(Mutex* mu) : mu_(mu->get()) { in MutexLock() 72 g_core_codegen_interface->gpr_mu_lock(mu_); in MutexLock() 74 explicit MutexLock(gpr_mu* mu) : mu_(mu) { in MutexLock() 75 g_core_codegen_interface->gpr_mu_lock(mu_); in MutexLock() 77 ~MutexLock() { g_core_codegen_interface->gpr_mu_unlock(mu_); } in ~MutexLock() [all …]
|
/external/grpc-grpc/src/core/lib/gprpp/ |
D | fork.cc | 53 gpr_mu_init(&mu_); in ExecCtxState() 64 gpr_mu_lock(&mu_); in IncExecCtxCount() 67 gpr_cv_wait(&cv_, &mu_, gpr_inf_future(GPR_CLOCK_REALTIME)); in IncExecCtxCount() 70 gpr_mu_unlock(&mu_); in IncExecCtxCount() 83 gpr_mu_lock(&mu_); in BlockExecCtx() 85 gpr_mu_unlock(&mu_); in BlockExecCtx() 92 gpr_mu_lock(&mu_); in AllowExecCtx() 96 gpr_mu_unlock(&mu_); in AllowExecCtx() 100 gpr_mu_destroy(&mu_); in ~ExecCtxState() 106 gpr_mu mu_; member in grpc_core::internal::ExecCtxState [all …]
|
/external/tensorflow/tensorflow/core/platform/ |
D | mutex.h | 105 internal::MuData mu_; variable 150 : mu_(&mu) { in mutex_lock() 151 mu_->lock(); in mutex_lock() 155 : mu_(&mu) { in mutex_lock() 157 mu_ = nullptr; in mutex_lock() 163 mutex_lock(mutex_lock&& ml) noexcept TF_EXCLUSIVE_LOCK_FUNCTION(ml.mu_) in mutex_lock() 164 : mu_(ml.mu_) { in mutex_lock() 165 ml.mu_ = nullptr; in mutex_lock() 168 if (mu_ != nullptr) { in TF_UNLOCK_FUNCTION() 169 mu_->unlock(); in TF_UNLOCK_FUNCTION() [all …]
|
/external/tensorflow/tensorflow/compiler/jit/ |
D | xla_device.h | 145 TF_LOCKS_EXCLUDED(mu_); 153 TF_LOCKS_EXCLUDED(mu_); 157 Tensor* tensor) override TF_LOCKS_EXCLUDED(mu_); 164 Tensor* tensor) TF_LOCKS_EXCLUDED(mu_); 174 Status EnsureDeviceContextOk() TF_LOCKS_EXCLUDED(mu_); 178 Status UseGpuDeviceInfo() TF_LOCKS_EXCLUDED(mu_); 183 TF_LOCKS_EXCLUDED(mu_); 184 bool AllowsSyncOnCompletion() const override TF_LOCKS_EXCLUDED(mu_); 189 Status RefreshStatus() override TF_LOCKS_EXCLUDED(mu_); 194 TF_EXCLUSIVE_LOCKS_REQUIRED(mu_); [all …]
|
/external/tensorflow/tensorflow/core/platform/cloud/ |
D | ram_file_block_cache.h | 100 TF_LOCKS_EXCLUDED(mu_); 103 void RemoveFile(const string& filename) override TF_LOCKS_EXCLUDED(mu_); 106 void Flush() override TF_LOCKS_EXCLUDED(mu_); 114 size_t CacheSize() const override TF_LOCKS_EXCLUDED(mu_); 192 void Prune() TF_LOCKS_EXCLUDED(mu_); 195 TF_EXCLUSIVE_LOCKS_REQUIRED(mu_); 198 std::shared_ptr<Block> Lookup(const Key& key) TF_LOCKS_EXCLUDED(mu_); 201 TF_LOCKS_EXCLUDED(mu_); 204 void Trim() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_); 208 TF_LOCKS_EXCLUDED(mu_); [all …]
|
D | gcs_throttle.h | 112 inline int64 available_tokens() TF_LOCKS_EXCLUDED(mu_) { in available_tokens() 113 mutex_lock l(mu_); in available_tokens() 125 bool is_enabled() TF_LOCKS_EXCLUDED(mu_) { in is_enabled() 126 mutex_lock l(mu_); in is_enabled() 137 void UpdateState() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_); 143 mutex mu_; variable 150 uint64 last_updated_secs_ TF_GUARDED_BY(mu_) = 0; 159 int64 available_tokens_ TF_GUARDED_BY(mu_) = 0; 162 GcsThrottleConfig config_ TF_GUARDED_BY(mu_);
|
/external/tensorflow/tensorflow/c/experimental/filesystem/plugins/gcs/ |
D | ram_file_block_cache.h | 111 ABSL_LOCKS_EXCLUDED(mu_); 114 void RemoveFile(const std::string& filename) ABSL_LOCKS_EXCLUDED(mu_); 117 void Flush() ABSL_LOCKS_EXCLUDED(mu_); 125 size_t CacheSize() const ABSL_LOCKS_EXCLUDED(mu_); 208 void Prune() ABSL_LOCKS_EXCLUDED(mu_); 211 ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_); 214 std::shared_ptr<Block> Lookup(const Key& key) ABSL_LOCKS_EXCLUDED(mu_); 217 TF_Status* status) ABSL_LOCKS_EXCLUDED(mu_); 220 void Trim() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_); 224 TF_Status* status) ABSL_LOCKS_EXCLUDED(mu_); [all …]
|
/external/tensorflow/tensorflow/core/common_runtime/gpu/ |
D | gpu_allocator_retry_test.cc | 40 mutex_lock l(mu_); in AllocateRaw() 52 mutex_lock l(mu_); in DeallocateRaw() 60 mutex mu_; member in tensorflow::__anon8bb5cd3e0111::FakeAllocator 61 size_t memory_capacity_ TF_GUARDED_BY(mu_); 80 mutex_lock l(mu_); in WaitTurn() 94 mutex_lock l(mu_); in Done() 103 void IncrementTurn() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) { in IncrementTurn() 112 mutex mu_; member in tensorflow::__anon8bb5cd3e0111::AlternatingBarrier 115 int next_turn_ TF_GUARDED_BY(mu_); 116 std::vector<bool> done_ TF_GUARDED_BY(mu_); [all …]
|
/external/protobuf/src/google/protobuf/stubs/ |
D | mutex.h | 99 void Lock() GOOGLE_PROTOBUF_ACQUIRE() { mu_.lock(); } in Lock() 100 void Unlock() GOOGLE_PROTOBUF_RELEASE() { mu_.unlock(); } in Unlock() 107 std::mutex mu_; 109 CriticalSectionLock mu_; 118 explicit MutexLock(Mutex *mu) : mu_(mu) { this->mu_->Lock(); } in MutexLock() 119 ~MutexLock() { this->mu_->Unlock(); } in ~MutexLock() 121 Mutex *const mu_; 133 mu_(mu) { if (this->mu_ != nullptr) { this->mu_->Lock(); } } in MutexLockMaybe() 134 ~MutexLockMaybe() { if (this->mu_ != nullptr) { this->mu_->Unlock(); } } in ~MutexLockMaybe() 136 Mutex *const mu_;
|
/external/tensorflow/tensorflow/core/common_runtime/ |
D | scoped_allocator.h | 54 ~ScopedAllocator() TF_LOCKS_EXCLUDED(mu_); 69 void* AllocateRaw(int32 field_index, size_t num_bytes) TF_LOCKS_EXCLUDED(mu_); 70 void DeallocateRaw(void* p) TF_LOCKS_EXCLUDED(mu_); 77 mutex mu_; variable 78 int32 expected_call_count_ TF_GUARDED_BY(mu_); 79 int32 live_alloc_count_ TF_GUARDED_BY(mu_); 101 void DropFromTable() TF_LOCKS_EXCLUDED(mu_); 103 TF_LOCKS_EXCLUDED(mu_) override; 108 void DeallocateRaw(void* p) TF_LOCKS_EXCLUDED(mu_) override; 117 mutex mu_; [all …]
|
/external/angle/third_party/abseil-cpp/absl/synchronization/ |
D | mutex.h | 456 std::atomic<intptr_t> mu_; // The Mutex state. 525 explicit MutexLock(Mutex *mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) : mu_(mu) { in MutexLock() 526 this->mu_->Lock(); in MutexLock() 534 : mu_(mu) { in MutexLock() 535 this->mu_->LockWhen(cond); in MutexLock() 543 ~MutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_->Unlock(); } in ABSL_UNLOCK_FUNCTION() 546 Mutex *const mu_; 555 explicit ReaderMutexLock(Mutex *mu) ABSL_SHARED_LOCK_FUNCTION(mu) : mu_(mu) { in ReaderMutexLock() 561 : mu_(mu) { in ReaderMutexLock() 570 ~ReaderMutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_->ReaderUnlock(); } in ABSL_UNLOCK_FUNCTION() [all …]
|
/external/openscreen/third_party/abseil/src/absl/synchronization/ |
D | mutex.h | 456 std::atomic<intptr_t> mu_; // The Mutex state. 527 explicit MutexLock(Mutex *mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) : mu_(mu) { in MutexLock() 528 this->mu_->Lock(); in MutexLock() 536 : mu_(mu) { in MutexLock() 537 this->mu_->LockWhen(cond); in MutexLock() 545 ~MutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_->Unlock(); } in ABSL_UNLOCK_FUNCTION() 548 Mutex *const mu_; 557 explicit ReaderMutexLock(Mutex *mu) ABSL_SHARED_LOCK_FUNCTION(mu) : mu_(mu) { in ReaderMutexLock() 563 : mu_(mu) { in ReaderMutexLock() 572 ~ReaderMutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_->ReaderUnlock(); } in ABSL_UNLOCK_FUNCTION() [all …]
|
/external/tensorflow/tensorflow/core/tpu/kernels/ |
D | tpu_compilation_cache_interface.h | 215 CompiledSubgraph* entry) ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_); 222 ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_); 244 ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_); 247 size_t RemoveEntry(const std::string& key) ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_); 251 ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_); 255 ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_); 268 ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) = 0; 274 void UnloadAndDestroy(CompiledSubgraph* entry) ABSL_LOCKS_EXCLUDED(mu_); 281 absl::Mutex mu_; variable 283 int64 cache_size_ ABSL_GUARDED_BY(mu_) = 0; [all …]
|
/external/tensorflow/tensorflow/core/kernels/data/ |
D | parallel_interleave_dataset_op.cc | 312 mu_(std::make_shared<mutex>()), in ParallelInterleaveIterator() 315 params.dataset->num_parallel_calls_, mu_, in ParallelInterleaveIterator() 326 mutex_lock l(*mu_); in Initialize() 361 mutex_lock l(*mu_); in GetNextInternal() 410 mutex_lock l(*mu_); in SaveInternal() 452 mutex_lock l(*mu_); in RestoreInternal() 466 mutex_lock l(*mu_); in RestoreInternal() 495 if (mu_->try_lock()) { in GetTraceMeMetadata() 497 mu_->unlock(); in GetTraceMeMetadata() 521 int64 id TF_GUARDED_BY(&ParallelInterleaveIterator::mu_); [all …]
|
/external/rust/crates/grpcio-sys/grpc/third_party/re2/util/ |
D | mutex.h | 109 explicit MutexLock(Mutex *mu) : mu_(mu) { mu_->Lock(); } in MutexLock() 110 ~MutexLock() { mu_->Unlock(); } in ~MutexLock() 112 Mutex * const mu_; 121 explicit ReaderMutexLock(Mutex *mu) : mu_(mu) { mu_->ReaderLock(); } in ReaderMutexLock() 122 ~ReaderMutexLock() { mu_->ReaderUnlock(); } in ~ReaderMutexLock() 124 Mutex * const mu_; 132 explicit WriterMutexLock(Mutex *mu) : mu_(mu) { mu_->WriterLock(); } in WriterMutexLock() 133 ~WriterMutexLock() { mu_->WriterUnlock(); } in ~WriterMutexLock() 135 Mutex * const mu_;
|
/external/tensorflow/tensorflow/core/common_runtime/device/ |
D | device_event_mgr.h | 76 mutex_lock l(mu_); in ThenExecute() 89 mutex mu_; variable 90 condition_variable events_pending_ TF_GUARDED_BY(mu_); 112 TF_EXCLUSIVE_LOCKS_REQUIRED(mu_); 115 TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) { in QueueFunc() 125 TF_EXCLUSIVE_LOCKS_REQUIRED(mu_); 136 std::vector<se::Event*> free_events_ TF_GUARDED_BY(mu_); 139 std::deque<InUse> used_events_ TF_GUARDED_BY(mu_); 141 bool stop_polling_ TF_GUARDED_BY(mu_); 156 mutex mu_; [all …]
|
/external/tensorflow/tensorflow/core/kernels/batching_util/ |
D | serial_device_batch_scheduler.h | 121 mutex_lock l(mu_); in in_flight_batches_limit() 126 mutex_lock l(mu_); in recent_low_traffic_ratio() 151 std::vector<const internal::SDBSBatch<TaskType>*> batches_ TF_GUARDED_BY(mu_); 155 queues_and_callbacks_ TF_GUARDED_BY(mu_); 161 int64 in_flight_batches_limit_ TF_GUARDED_BY(mu_); 164 int64 processing_threads_ TF_GUARDED_BY(mu_) = 0; 168 int64 batch_count_ TF_GUARDED_BY(mu_) = 0; 172 int64 no_batch_count_ TF_GUARDED_BY(mu_) = 0; 189 mutex mu_; variable 232 SDBSBatch<TaskType>* current_batch_ TF_GUARDED_BY(mu_) = nullptr; [all …]
|