Home
last modified time | relevance | path

Searched refs:mu_ (Results 1 – 25 of 406) sorted by relevance

12345678910>>...17

/external/tensorflow/tensorflow/core/framework/
Dmodel.h132 void add_input(std::shared_ptr<Node> node) LOCKS_EXCLUDED(mu_) { in add_input()
133 mutex_lock l(mu_); in add_input()
138 void add_processing_time(int64 delta) LOCKS_EXCLUDED(mu_) { in add_processing_time()
139 mutex_lock l(mu_); in add_processing_time()
144 bool autotune() const LOCKS_EXCLUDED(mu_) { in autotune()
145 tf_shared_lock l(mu_); in autotune()
150 int64 buffered_bytes() const LOCKS_EXCLUDED(mu_) { in buffered_bytes()
151 tf_shared_lock l(mu_); in buffered_bytes()
156 int64 buffered_elements() const LOCKS_EXCLUDED(mu_) { in buffered_elements()
157 tf_shared_lock l(mu_); in buffered_elements()
[all …]
Dtracking_allocator.h94 bool UnRef() EXCLUSIVE_LOCKS_REQUIRED(mu_);
97 mutable mutex mu_; variable
101 int ref_ GUARDED_BY(mu_);
105 size_t allocated_ GUARDED_BY(mu_);
109 size_t high_watermark_ GUARDED_BY(mu_);
114 size_t total_bytes_ GUARDED_BY(mu_);
116 gtl::InlinedVector<AllocRecord, 4> allocations_ GUARDED_BY(mu_);
126 std::unordered_map<const void*, Chunk> in_use_ GUARDED_BY(mu_);
127 int64 next_allocation_id_ GUARDED_BY(mu_);
Dcancellation.cc43 mutex_lock l(mu_); in StartCancel()
75 mutex_lock l(mu_); in StartCancel()
87 mutex_lock l(mu_); in RegisterCallback()
99 mu_.lock(); in DeregisterCallback()
101 mu_.unlock(); in DeregisterCallback()
106 mu_.unlock(); in DeregisterCallback()
119 mu_.unlock(); in DeregisterCallback()
125 mutex_lock l(mu_); in RegisterChild()
151 mutex_lock l(mu_); in DeregisterChild()
184 mutex_lock lock(mu_); in TryDeregisterCallback()
[all …]
/external/clang/test/SemaCXX/
Dwarn-thread-safety-analysis.cpp972 void func1(int y) LOCKS_EXCLUDED(mu_);
973 template <typename T> void func2(T x) LOCKS_EXCLUDED(mu_);
975 Mutex mu_; member in thread_annot_lock_38::Foo
991 Mutex *mu_; member in thread_annot_lock_43::Foo
997 int GetA() EXCLUSIVE_LOCKS_REQUIRED(foo_->mu_) { return a_; } in GetA()
998 int a_ GUARDED_BY(foo_->mu_);
1006 fb->foo_->mu_->Lock(); in main()
1008 fb->foo_->mu_->Unlock(); in main()
1124 Mutex mu_; member in thread_annot_lock_68_modified::Bar
1131 mu_.Lock(); in func()
[all …]
/external/tensorflow/tensorflow/compiler/jit/
Dxla_device.h140 LOCKS_EXCLUDED(mu_);
148 LOCKS_EXCLUDED(mu_);
152 Tensor* tensor) override LOCKS_EXCLUDED(mu_);
159 Tensor* tensor) LOCKS_EXCLUDED(mu_);
169 Status EnsureDeviceContextOk() LOCKS_EXCLUDED(mu_);
173 Status UseGpuDeviceInfo() LOCKS_EXCLUDED(mu_);
177 void SetAllowsSyncOnCompletion(bool sync_on_completion) LOCKS_EXCLUDED(mu_);
178 bool AllowsSyncOnCompletion() const override LOCKS_EXCLUDED(mu_);
183 Status RefreshStatus() override LOCKS_EXCLUDED(mu_);
188 EXCLUSIVE_LOCKS_REQUIRED(mu_);
[all …]
/external/grpc-grpc/src/core/lib/gprpp/
Dfork.cc53 gpr_mu_init(&mu_); in ExecCtxState()
64 gpr_mu_lock(&mu_); in IncExecCtxCount()
67 gpr_cv_wait(&cv_, &mu_, gpr_inf_future(GPR_CLOCK_REALTIME)); in IncExecCtxCount()
70 gpr_mu_unlock(&mu_); in IncExecCtxCount()
83 gpr_mu_lock(&mu_); in BlockExecCtx()
85 gpr_mu_unlock(&mu_); in BlockExecCtx()
92 gpr_mu_lock(&mu_); in AllowExecCtx()
96 gpr_mu_unlock(&mu_); in AllowExecCtx()
100 gpr_mu_destroy(&mu_); in ~ExecCtxState()
106 gpr_mu mu_; member in grpc_core::internal::ExecCtxState
[all …]
/external/tensorflow/tensorflow/core/platform/
Dmutex.h105 internal::MuData mu_; variable
149 explicit mutex_lock(mutex_type& mu) EXCLUSIVE_LOCK_FUNCTION(mu) : mu_(&mu) { in mutex_lock()
150 mu_->lock(); in mutex_lock()
154 : mu_(&mu) { in mutex_lock()
156 mu_ = nullptr; in mutex_lock()
162 mutex_lock(mutex_lock&& ml) noexcept EXCLUSIVE_LOCK_FUNCTION(ml.mu_) in mutex_lock()
163 : mu_(ml.mu_) { in mutex_lock()
164 ml.mu_ = nullptr; in mutex_lock()
167 if (mu_ != nullptr) { in UNLOCK_FUNCTION()
168 mu_->unlock(); in UNLOCK_FUNCTION()
[all …]
/external/tensorflow/tensorflow/core/platform/cloud/
Dram_file_block_cache.h100 LOCKS_EXCLUDED(mu_);
103 void RemoveFile(const string& filename) override LOCKS_EXCLUDED(mu_);
106 void Flush() override LOCKS_EXCLUDED(mu_);
114 size_t CacheSize() const override LOCKS_EXCLUDED(mu_);
192 void Prune() LOCKS_EXCLUDED(mu_);
195 EXCLUSIVE_LOCKS_REQUIRED(mu_);
198 std::shared_ptr<Block> Lookup(const Key& key) LOCKS_EXCLUDED(mu_);
201 LOCKS_EXCLUDED(mu_);
204 void Trim() EXCLUSIVE_LOCKS_REQUIRED(mu_);
208 LOCKS_EXCLUDED(mu_);
[all …]
Dgcs_throttle.h112 inline int64 available_tokens() LOCKS_EXCLUDED(mu_) { in available_tokens()
113 mutex_lock l(mu_); in available_tokens()
125 bool is_enabled() LOCKS_EXCLUDED(mu_) { in is_enabled()
126 mutex_lock l(mu_); in is_enabled()
137 void UpdateState() EXCLUSIVE_LOCKS_REQUIRED(mu_);
143 mutex mu_; variable
150 uint64 last_updated_secs_ GUARDED_BY(mu_) = 0;
159 int64 available_tokens_ GUARDED_BY(mu_) = 0;
162 GcsThrottleConfig config_ GUARDED_BY(mu_);
Dexpiring_lru_cache.h50 mutex_lock lock(mu_); in Insert()
58 mutex_lock lock(mu_); in Delete()
69 mutex_lock lock(mu_); in Lookup()
88 mutex_lock lock(mu_); in LookupOrCompute()
101 mutex_lock lock(mu_); in Clear()
122 bool LookupLocked(const string& key, T* value) EXCLUSIVE_LOCKS_REQUIRED(mu_) { in LookupLocked()
139 EXCLUSIVE_LOCKS_REQUIRED(mu_) { in InsertLocked()
152 bool DeleteLocked(const string& key) EXCLUSIVE_LOCKS_REQUIRED(mu_) { in DeleteLocked()
174 mutex mu_; variable
177 std::map<string, Entry> cache_ GUARDED_BY(mu_);
[all …]
/external/tensorflow/tensorflow/core/common_runtime/gpu/
Dgpu_allocator_retry_test.cc40 mutex_lock l(mu_); in AllocateRaw()
52 mutex_lock l(mu_); in DeallocateRaw()
60 mutex mu_; member in tensorflow::__anon3016a1e20111::FakeAllocator
61 size_t memory_capacity_ GUARDED_BY(mu_);
80 mutex_lock l(mu_); in WaitTurn()
94 mutex_lock l(mu_); in Done()
103 void IncrementTurn() EXCLUSIVE_LOCKS_REQUIRED(mu_) { in IncrementTurn()
112 mutex mu_; member in tensorflow::__anon3016a1e20111::AlternatingBarrier
115 int next_turn_ GUARDED_BY(mu_);
116 std::vector<bool> done_ GUARDED_BY(mu_);
[all …]
Dgpu_event_mgr.h87 mutex_lock l(mu_); in ThenDeleteBuffer()
100 mutex_lock l(mu_); in ThenExecute()
114 mutex mu_; variable
115 condition_variable events_pending_ GUARDED_BY(mu_);
117 void FlushAccumulatedTensors() EXCLUSIVE_LOCKS_REQUIRED(mu_);
155 EXCLUSIVE_LOCKS_REQUIRED(mu_);
158 EXCLUSIVE_LOCKS_REQUIRED(mu_) { in QueueTensors()
163 EXCLUSIVE_LOCKS_REQUIRED(mu_) { in QueueBuffer()
168 EXCLUSIVE_LOCKS_REQUIRED(mu_) { in QueueFunc()
178 EXCLUSIVE_LOCKS_REQUIRED(mu_);
[all …]
/external/tensorflow/tensorflow/core/kernels/data/
Dparallel_interleave_dataset_op.cc228 mu_(std::make_shared<mutex>()), in ParallelInterleaveIterator()
231 params.dataset->num_parallel_calls_, mu_, in ParallelInterleaveIterator()
245 if (mu_->try_lock()) { in BuildTraceMeName()
247 mu_->unlock(); in BuildTraceMeName()
258 mutex_lock l(*mu_); in Initialize()
292 mutex_lock l(*mu_); in GetNextInternal()
334 mutex_lock l(*mu_); in SaveInternal()
376 mutex_lock l(*mu_); in RestoreInternal()
390 mutex_lock l(*mu_); in RestoreInternal()
429 int64 id GUARDED_BY(&ParallelInterleaveIterator::mu_);
[all …]
Dcache_dataset_ops.cc148 mutex_lock l(mu_); in Initialize()
155 mutex_lock l(mu_); in GetNextInternal()
167 mutex_lock l(mu_); in SaveInternal()
173 mutex_lock l(mu_); in RestoreInternal()
253 mutex_lock l(mu_); in GetNextInternal()
305 mutex_lock l(mu_); in SaveInternal()
341 mutex_lock l(mu_); in RestoreInternal()
377 EXCLUSIVE_LOCKS_REQUIRED(mu_) { in EnsureLockFileExists()
438 Status Finish() EXCLUSIVE_LOCKS_REQUIRED(mu_) { in Finish()
467 mutex mu_; member in tensorflow::data::CacheDatasetOp::FileDataset::FileIterator::FileWriterIterator
[all …]
/external/protobuf/src/google/protobuf/stubs/
Dmutex.h99 void Lock() GOOGLE_PROTOBUF_ACQUIRE() { mu_.lock(); } in Lock()
100 void Unlock() GOOGLE_PROTOBUF_RELEASE() { mu_.unlock(); } in Unlock()
107 std::mutex mu_;
109 CriticalSectionLock mu_;
118 explicit MutexLock(Mutex *mu) : mu_(mu) { this->mu_->Lock(); } in MutexLock()
119 ~MutexLock() { this->mu_->Unlock(); } in ~MutexLock()
121 Mutex *const mu_;
133 mu_(mu) { if (this->mu_ != nullptr) { this->mu_->Lock(); } } in MutexLockMaybe()
134 ~MutexLockMaybe() { if (this->mu_ != nullptr) { this->mu_->Unlock(); } } in ~MutexLockMaybe()
136 Mutex *const mu_;
/external/tensorflow/tensorflow/core/common_runtime/
Dscoped_allocator.h54 ~ScopedAllocator() LOCKS_EXCLUDED(mu_);
69 void* AllocateRaw(int32 field_index, size_t num_bytes) LOCKS_EXCLUDED(mu_);
70 void DeallocateRaw(void* p) LOCKS_EXCLUDED(mu_);
77 mutex mu_; variable
78 int32 expected_call_count_ GUARDED_BY(mu_);
79 int32 live_alloc_count_ GUARDED_BY(mu_);
101 void DropFromTable() LOCKS_EXCLUDED(mu_);
103 LOCKS_EXCLUDED(mu_) override;
108 void DeallocateRaw(void* p) LOCKS_EXCLUDED(mu_) override;
117 mutex mu_;
[all …]
/external/tensorflow/tensorflow/core/kernels/batching_util/
Dserial_device_batch_scheduler.h121 mutex_lock l(mu_); in in_flight_batches_limit()
126 mutex_lock l(mu_); in recent_low_traffic_ratio()
151 std::vector<const internal::SDBSBatch<TaskType>*> batches_ GUARDED_BY(mu_);
155 queues_and_callbacks_ GUARDED_BY(mu_);
161 int64 in_flight_batches_limit_ GUARDED_BY(mu_);
164 int64 processing_threads_ GUARDED_BY(mu_) = 0;
168 int64 batch_count_ GUARDED_BY(mu_) = 0;
172 int64 no_batch_count_ GUARDED_BY(mu_) = 0;
189 mutex mu_; variable
232 SDBSBatch<TaskType>* current_batch_ GUARDED_BY(mu_) = nullptr;
[all …]
Dadaptive_shared_batch_scheduler.h142 mutex_lock l(mu_); in in_flight_batches_limit()
157 void MaybeScheduleNextBatch() EXCLUSIVE_LOCKS_REQUIRED(mu_);
164 void MaybeScheduleClosedBatch() EXCLUSIVE_LOCKS_REQUIRED(mu_);
179 std::vector<const internal::ASBSBatch<TaskType>*> batches_ GUARDED_BY(mu_);
183 queues_and_callbacks_ GUARDED_BY(mu_);
185 mutex mu_; variable
193 double in_flight_batches_limit_ GUARDED_BY(mu_);
196 int64 in_flight_batches_ GUARDED_BY(mu_) = 0;
198 int64 in_flight_express_batches_ GUARDED_BY(mu_) = 0;
206 int64 batch_count_ GUARDED_BY(mu_) = 0;
[all …]
Dshared_batch_scheduler.h180 mutex mu_; variable
190 QueueList queues_ GUARDED_BY(mu_);
194 typename QueueList::iterator next_queue_to_schedule_ GUARDED_BY(mu_);
274 mutex_lock l(mu_); in closed()
280 bool IsEmptyInternal() const EXCLUSIVE_LOCKS_REQUIRED(mu_);
284 void StartNewBatch() EXCLUSIVE_LOCKS_REQUIRED(mu_);
288 bool IsOpenBatchSchedulable() const EXCLUSIVE_LOCKS_REQUIRED(mu_);
303 mutable mutex mu_; variable
308 bool closed_ GUARDED_BY(mu_) = false;
311 std::deque<std::unique_ptr<Batch<TaskType>>> batches_ GUARDED_BY(mu_);
[all …]
/external/tensorflow/tensorflow/stream_executor/
Dmulti_platform_manager.cc33 LOCKS_EXCLUDED(mu_);
36 LOCKS_EXCLUDED(mu_);
39 LOCKS_EXCLUDED(mu_);
43 LOCKS_EXCLUDED(mu_);
46 LOCKS_EXCLUDED(mu_);
49 const std::function<bool(const Platform*)>& filter) LOCKS_EXCLUDED(mu_);
53 LOCKS_EXCLUDED(mu_);
59 EXCLUSIVE_LOCKS_REQUIRED(mu_);
64 EXCLUSIVE_LOCKS_REQUIRED(mu_);
66 absl::Mutex mu_; member in stream_executor::__anon77988d260111::MultiPlatformManagerImpl
[all …]
/external/tensorflow/tensorflow/compiler/xrt/
Dxrt_compilation_cache.h176 EXCLUSIVE_LOCKS_REQUIRED(mu_);
180 void MarkOldestEntryForEviction() EXCLUSIVE_LOCKS_REQUIRED(mu_);
198 EXCLUSIVE_LOCKS_REQUIRED(mu_);
209 initialize_program) EXCLUSIVE_LOCKS_REQUIRED(mu_);
215 mutable absl::Mutex mu_; variable
217 int cache_entries_ GUARDED_BY(mu_) = 0;
219 int marked_for_eviction_entries_ GUARDED_BY(mu_) = 0;
222 int64 use_counter_ GUARDED_BY(mu_) = 0;
226 std::unordered_map<string, CompiledSubgraph*> cache_ GUARDED_BY(mu_);
229 std::unordered_map<int64, CompiledSubgraph*> entries_by_uid_ GUARDED_BY(mu_);
[all …]
/external/tensorflow/tensorflow/core/distributed_runtime/rpc/
Dgrpc_state.h417 mutex_lock l(mu_); in SendNextRequest()
437 mutex_lock l(mu_); in CallStarted()
450 mu_.lock(); in RequestWriteCompleted()
452 mu_.unlock(); in RequestWriteCompleted()
468 mu_.unlock(); in RequestWriteCompleted()
474 mu_.lock(); in ResponseReadCompleted()
476 mu_.unlock(); in ResponseReadCompleted()
481 mu_.unlock(); in ResponseReadCompleted()
492 mu_.unlock(); in ResponseReadCompleted()
497 mutex_lock l(mu_); in ResponseReadCompleted()
[all …]
/external/tensorflow/tensorflow/core/platform/default/
Dmutex.cc39 mutex::mutex() { nsync::nsync_mu_init(mu_cast(&mu_)); } in mutex()
43 void mutex::lock() { nsync::nsync_mu_lock(mu_cast(&mu_)); } in lock()
45 bool mutex::try_lock() { return nsync::nsync_mu_trylock(mu_cast(&mu_)) != 0; }; in try_lock()
47 void mutex::unlock() { nsync::nsync_mu_unlock(mu_cast(&mu_)); } in unlock()
49 void mutex::lock_shared() { nsync::nsync_mu_rlock(mu_cast(&mu_)); } in lock_shared()
52 return nsync::nsync_mu_rtrylock(mu_cast(&mu_)) != 0; in try_lock_shared()
55 void mutex::unlock_shared() { nsync::nsync_mu_runlock(mu_cast(&mu_)); } in unlock_shared()
63 nsync::nsync_mu_wait(mu_cast(&mu_), &EvaluateCondition, &cond, nullptr); in Await()
70 return nsync::nsync_mu_wait_with_deadline(mu_cast(&mu_), &EvaluateCondition, in AwaitWithDeadline()
93 nsync::nsync_cv_wait(cv_cast(&cv_), mu_cast(&lock.mutex()->mu_)); in wait()
/external/tensorflow/tensorflow/core/lib/monitoring/
Dpercentile_sampler.h74 mutable mutex mu_; variable
76 std::vector<Sample> samples_ GUARDED_BY(mu_);
77 size_t num_samples_ GUARDED_BY(mu_);
78 size_t next_position_ GUARDED_BY(mu_);
79 size_t total_samples_ GUARDED_BY(mu_);
80 long double accumulator_ GUARDED_BY(mu_);
118 PercentileSamplerCell* GetCell(const Labels&... labels) LOCKS_EXCLUDED(mu_);
134 mutex_lock l(mu_); in PercentileSampler()
160 mutable mutex mu_; variable
181 std::map<LabelArray, PercentileSamplerCell> cells_ GUARDED_BY(mu_);
[all …]
/external/clang/test/Sema/
Dwarn-thread-safety-analysis.c31 struct Mutex *mu_; member
45 int a_ GUARDED_BY(foo_.mu_);
46 int *b_ PT_GUARDED_BY(foo_.mu_) = &a_;
67 void set_value(int *a, int value) EXCLUSIVE_LOCKS_REQUIRED(foo_.mu_) { in set_value()
71 int get_value(int *p) SHARED_LOCKS_REQUIRED(foo_.mu_){ in get_value()
106 mutex_exclusive_lock(foo_.mu_); in main()
108 mutex_unlock(foo_.mu_); in main()
109 mutex_shared_lock(foo_.mu_); in main()
111 mutex_unlock(foo_.mu_); in main()
115 mutex_exclusive_lock(foo_.mu_); in main()
[all …]

12345678910>>...17