Home
last modified time | relevance | path

Searched refs:lock (Results 1 – 25 of 3098) sorted by relevance

12345678910>>...124

/drivers/staging/lustre/lustre/ldlm/
Dldlm_lock.c161 struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock) in ldlm_lock_get() argument
163 atomic_inc(&lock->l_refc); in ldlm_lock_get()
164 return lock; in ldlm_lock_get()
173 void ldlm_lock_put(struct ldlm_lock *lock) in ldlm_lock_put() argument
175 LASSERT(lock->l_resource != LP_POISON); in ldlm_lock_put()
176 LASSERT(atomic_read(&lock->l_refc) > 0); in ldlm_lock_put()
177 if (atomic_dec_and_test(&lock->l_refc)) { in ldlm_lock_put()
180 LDLM_DEBUG(lock, in ldlm_lock_put()
183 res = lock->l_resource; in ldlm_lock_put()
184 LASSERT(ldlm_is_destroyed(lock)); in ldlm_lock_put()
[all …]
Dldlm_request.c89 struct ldlm_lock *lock = lwd->lwd_lock; in ldlm_expired_completion_wait() local
93 if (!lock->l_conn_export) { in ldlm_expired_completion_wait()
97 (s64)lock->l_last_activity, in ldlm_expired_completion_wait()
99 lock->l_last_activity)); in ldlm_expired_completion_wait()
100 …LDLM_DEBUG(lock, "lock timed out (enqueued at %lld, %llds ago); not entering recovery in server co… in ldlm_expired_completion_wait()
101 (s64)lock->l_last_activity, in ldlm_expired_completion_wait()
103 lock->l_last_activity)); in ldlm_expired_completion_wait()
108 ldlm_lock_to_ns(lock)); in ldlm_expired_completion_wait()
115 obd = lock->l_conn_export->exp_obd; in ldlm_expired_completion_wait()
118 LDLM_ERROR(lock, "lock timed out (enqueued at %lld, %llds ago), entering recovery for %s@%s", in ldlm_expired_completion_wait()
[all …]
Dldlm_flock.c74 ldlm_same_flock_owner(struct ldlm_lock *lock, struct ldlm_lock *new) in ldlm_same_flock_owner() argument
77 lock->l_policy_data.l_flock.owner) && in ldlm_same_flock_owner()
78 (new->l_export == lock->l_export)); in ldlm_same_flock_owner()
82 ldlm_flocks_overlap(struct ldlm_lock *lock, struct ldlm_lock *new) in ldlm_flocks_overlap() argument
85 lock->l_policy_data.l_flock.end) && in ldlm_flocks_overlap()
87 lock->l_policy_data.l_flock.start)); in ldlm_flocks_overlap()
91 ldlm_flock_destroy(struct ldlm_lock *lock, enum ldlm_mode mode, __u64 flags) in ldlm_flock_destroy() argument
93 LDLM_DEBUG(lock, "ldlm_flock_destroy(mode: %d, flags: 0x%llx)", in ldlm_flock_destroy()
97 LASSERT(hlist_unhashed(&lock->l_exp_flock_hash)); in ldlm_flock_destroy()
99 list_del_init(&lock->l_res_link); in ldlm_flock_destroy()
[all …]
Dldlm_lockd.c116 struct ldlm_lock_desc *ld, struct ldlm_lock *lock) in ldlm_handle_bl_callback() argument
120 LDLM_DEBUG(lock, "client blocking AST callback handler"); in ldlm_handle_bl_callback()
122 lock_res_and_lock(lock); in ldlm_handle_bl_callback()
123 ldlm_set_cbpending(lock); in ldlm_handle_bl_callback()
125 if (ldlm_is_cancel_on_block(lock)) in ldlm_handle_bl_callback()
126 ldlm_set_cancel(lock); in ldlm_handle_bl_callback()
128 do_ast = !lock->l_readers && !lock->l_writers; in ldlm_handle_bl_callback()
129 unlock_res_and_lock(lock); in ldlm_handle_bl_callback()
133 "Lock %p already unused, calling callback (%p)\n", lock, in ldlm_handle_bl_callback()
134 lock->l_blocking_ast); in ldlm_handle_bl_callback()
[all …]
Dl_lock.c47 struct ldlm_resource *lock_res_and_lock(struct ldlm_lock *lock) in lock_res_and_lock() argument
48 __acquires(&lock->l_lock) in lock_res_and_lock()
49 __acquires(&lock->l_resource->lr_lock) in lock_res_and_lock()
51 spin_lock(&lock->l_lock); in lock_res_and_lock()
53 lock_res(lock->l_resource); in lock_res_and_lock()
55 ldlm_set_res_locked(lock); in lock_res_and_lock()
56 return lock->l_resource; in lock_res_and_lock()
63 void unlock_res_and_lock(struct ldlm_lock *lock) in unlock_res_and_lock() argument
64 __releases(&lock->l_resource->lr_lock) in unlock_res_and_lock()
65 __releases(&lock->l_lock) in unlock_res_and_lock()
[all …]
Dldlm_extent.c63 __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms) in ldlm_extent_shift_kms() argument
65 struct ldlm_resource *res = lock->l_resource; in ldlm_extent_shift_kms()
74 ldlm_set_kms_ignore(lock); in ldlm_extent_shift_kms()
109 struct ldlm_interval *ldlm_interval_alloc(struct ldlm_lock *lock) in ldlm_interval_alloc() argument
113 LASSERT(lock->l_resource->lr_type == LDLM_EXTENT); in ldlm_interval_alloc()
119 ldlm_interval_attach(node, lock); in ldlm_interval_alloc()
160 struct ldlm_lock *lock) in ldlm_extent_add_lock() argument
167 LASSERT(lock->l_granted_mode == lock->l_req_mode); in ldlm_extent_add_lock()
169 node = lock->l_tree_node; in ldlm_extent_add_lock()
173 idx = lock_mode_to_index(lock->l_granted_mode); in ldlm_extent_add_lock()
[all …]
/drivers/gpu/drm/ttm/
Dttm_lock.c45 void ttm_lock_init(struct ttm_lock *lock) in ttm_lock_init() argument
47 spin_lock_init(&lock->lock); in ttm_lock_init()
48 init_waitqueue_head(&lock->queue); in ttm_lock_init()
49 lock->rw = 0; in ttm_lock_init()
50 lock->flags = 0; in ttm_lock_init()
51 lock->kill_takers = false; in ttm_lock_init()
52 lock->signal = SIGKILL; in ttm_lock_init()
56 void ttm_read_unlock(struct ttm_lock *lock) in ttm_read_unlock() argument
58 spin_lock(&lock->lock); in ttm_read_unlock()
59 if (--lock->rw == 0) in ttm_read_unlock()
[all …]
/drivers/md/persistent-data/
Ddm-block-manager.c38 spinlock_t lock; member
55 static unsigned __find_holder(struct block_lock *lock, in __find_holder() argument
61 if (lock->holders[i] == task) in __find_holder()
69 static void __add_holder(struct block_lock *lock, struct task_struct *task) in __add_holder() argument
71 unsigned h = __find_holder(lock, NULL); in __add_holder()
77 lock->holders[h] = task; in __add_holder()
80 t = lock->traces + h; in __add_holder()
83 t->entries = lock->entries[h]; in __add_holder()
90 static void __del_holder(struct block_lock *lock, struct task_struct *task) in __del_holder() argument
92 unsigned h = __find_holder(lock, task); in __del_holder()
[all …]
/drivers/gpu/drm/
Ddrm_lock.c57 volatile unsigned int *lock = &lock_data->hw_lock->lock; in drm_lock_take() local
61 old = *lock; in drm_lock_take()
69 prev = cmpxchg(lock, old, new); in drm_lock_take()
106 volatile unsigned int *lock = &lock_data->hw_lock->lock; in drm_lock_transfer() local
110 old = *lock; in drm_lock_transfer()
112 prev = cmpxchg(lock, old, new); in drm_lock_transfer()
121 volatile unsigned int *lock = &lock_data->hw_lock->lock; in drm_legacy_lock_free() local
133 old = *lock; in drm_legacy_lock_free()
135 prev = cmpxchg(lock, old, new); in drm_legacy_lock_free()
162 struct drm_lock *lock = data; in drm_legacy_lock() local
[all …]
Ddrm_modeset_lock.c307 struct drm_modeset_lock *lock; in drm_modeset_drop_locks() local
309 lock = list_first_entry(&ctx->locked, in drm_modeset_drop_locks()
312 drm_modeset_unlock(lock); in drm_modeset_drop_locks()
317 static inline int modeset_lock(struct drm_modeset_lock *lock, in modeset_lock() argument
328 if (!ww_mutex_trylock(&lock->mutex)) in modeset_lock()
333 ret = ww_mutex_lock_slow_interruptible(&lock->mutex, &ctx->ww_ctx); in modeset_lock()
335 ret = ww_mutex_lock_interruptible(&lock->mutex, &ctx->ww_ctx); in modeset_lock()
337 ww_mutex_lock_slow(&lock->mutex, &ctx->ww_ctx); in modeset_lock()
340 ret = ww_mutex_lock(&lock->mutex, &ctx->ww_ctx); in modeset_lock()
343 WARN_ON(!list_empty(&lock->head)); in modeset_lock()
[all …]
/drivers/staging/lustre/lustre/llite/
Drange_lock.c64 void range_lock_init(struct range_lock *lock, __u64 start, __u64 end) in range_lock_init() argument
66 memset(&lock->rl_node, 0, sizeof(lock->rl_node)); in range_lock_init()
69 interval_set(&lock->rl_node, start >> PAGE_SHIFT, end); in range_lock_init()
70 INIT_LIST_HEAD(&lock->rl_next_lock); in range_lock_init()
71 lock->rl_task = NULL; in range_lock_init()
72 lock->rl_lock_count = 0; in range_lock_init()
73 lock->rl_blocking_ranges = 0; in range_lock_init()
74 lock->rl_sequence = 0; in range_lock_init()
77 static inline struct range_lock *next_lock(struct range_lock *lock) in next_lock() argument
79 return list_entry(lock->rl_next_lock.next, typeof(*lock), rl_next_lock); in next_lock()
[all …]
/drivers/acpi/acpica/
Dutlock.c62 acpi_status acpi_ut_create_rw_lock(struct acpi_rw_lock *lock) in acpi_ut_create_rw_lock() argument
66 lock->num_readers = 0; in acpi_ut_create_rw_lock()
67 status = acpi_os_create_mutex(&lock->reader_mutex); in acpi_ut_create_rw_lock()
72 status = acpi_os_create_mutex(&lock->writer_mutex); in acpi_ut_create_rw_lock()
76 void acpi_ut_delete_rw_lock(struct acpi_rw_lock *lock) in acpi_ut_delete_rw_lock() argument
79 acpi_os_delete_mutex(lock->reader_mutex); in acpi_ut_delete_rw_lock()
80 acpi_os_delete_mutex(lock->writer_mutex); in acpi_ut_delete_rw_lock()
82 lock->num_readers = 0; in acpi_ut_delete_rw_lock()
83 lock->reader_mutex = NULL; in acpi_ut_delete_rw_lock()
84 lock->writer_mutex = NULL; in acpi_ut_delete_rw_lock()
[all …]
/drivers/staging/lustre/lustre/obdclass/
Dcl_lock.c48 const char *prefix, const struct cl_lock *lock, in cl_lock_trace0() argument
51 struct cl_object_header *h = cl_object_header(lock->cll_descr.cld_obj); in cl_lock_trace0()
54 prefix, lock, env, h->coh_nesting, func, line); in cl_lock_trace0()
56 #define cl_lock_trace(level, env, prefix, lock) \ argument
57 cl_lock_trace0(level, env, prefix, lock, __func__, __LINE__)
68 void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice, in cl_lock_slice_add() argument
72 slice->cls_lock = lock; in cl_lock_slice_add()
73 list_add_tail(&slice->cls_linkage, &lock->cll_layers); in cl_lock_slice_add()
79 void cl_lock_fini(const struct lu_env *env, struct cl_lock *lock) in cl_lock_fini() argument
81 cl_lock_trace(D_DLMTRACE, env, "destroy lock", lock); in cl_lock_fini()
[all …]
/drivers/staging/lustre/lustre/include/
Dlustre_dlm.h269 typedef int (*ldlm_cancel_cbt)(struct ldlm_lock *lock);
288 int (*lvbo_size)(struct ldlm_lock *lock);
290 int (*lvbo_fill)(struct ldlm_lock *lock, void *buf, int buflen);
490 typedef int (*ldlm_blocking_callback)(struct ldlm_lock *lock,
494 typedef int (*ldlm_completion_callback)(struct ldlm_lock *lock, __u64 flags,
497 typedef int (*ldlm_glimpse_callback)(struct ldlm_lock *lock, void *data);
890 static inline bool ldlm_has_layout(struct ldlm_lock *lock) in ldlm_has_layout() argument
892 return lock->l_resource->lr_type == LDLM_IBITS && in ldlm_has_layout()
893 lock->l_policy_data.l_inodebits.bits & MDS_INODELOCK_LAYOUT; in ldlm_has_layout()
909 ldlm_lock_to_ns(struct ldlm_lock *lock) in ldlm_lock_to_ns() argument
[all …]
/drivers/clk/mmp/
Dclk-apbc.c32 spinlock_t *lock; member
45 if (apbc->lock) in clk_apbc_prepare()
46 spin_lock_irqsave(apbc->lock, flags); in clk_apbc_prepare()
54 if (apbc->lock) in clk_apbc_prepare()
55 spin_unlock_irqrestore(apbc->lock, flags); in clk_apbc_prepare()
59 if (apbc->lock) in clk_apbc_prepare()
60 spin_lock_irqsave(apbc->lock, flags); in clk_apbc_prepare()
66 if (apbc->lock) in clk_apbc_prepare()
67 spin_unlock_irqrestore(apbc->lock, flags); in clk_apbc_prepare()
72 if (apbc->lock) in clk_apbc_prepare()
[all …]
Dclk-gate.c34 if (gate->lock) in mmp_clk_gate_enable()
35 spin_lock_irqsave(gate->lock, flags); in mmp_clk_gate_enable()
42 if (gate->lock) in mmp_clk_gate_enable()
43 spin_unlock_irqrestore(gate->lock, flags); in mmp_clk_gate_enable()
60 if (gate->lock) in mmp_clk_gate_disable()
61 spin_lock_irqsave(gate->lock, flags); in mmp_clk_gate_disable()
68 if (gate->lock) in mmp_clk_gate_disable()
69 spin_unlock_irqrestore(gate->lock, flags); in mmp_clk_gate_disable()
78 if (gate->lock) in mmp_clk_gate_is_enabled()
79 spin_lock_irqsave(gate->lock, flags); in mmp_clk_gate_is_enabled()
[all …]
/drivers/clk/berlin/
Dberlin2-div.c68 spinlock_t *lock; member
81 if (div->lock) in berlin2_div_is_enabled()
82 spin_lock(div->lock); in berlin2_div_is_enabled()
87 if (div->lock) in berlin2_div_is_enabled()
88 spin_unlock(div->lock); in berlin2_div_is_enabled()
99 if (div->lock) in berlin2_div_enable()
100 spin_lock(div->lock); in berlin2_div_enable()
106 if (div->lock) in berlin2_div_enable()
107 spin_unlock(div->lock); in berlin2_div_enable()
118 if (div->lock) in berlin2_div_disable()
[all …]
/drivers/gpu/drm/via/
Dvia_video.c40 XVMCLOCKPTR(dev_priv->sarea_priv, i)->lock = 0; in via_init_futex()
51 volatile int *lock; in via_release_futex() local
57 lock = (volatile int *)XVMCLOCKPTR(dev_priv->sarea_priv, i); in via_release_futex()
58 if ((_DRM_LOCKING_CONTEXT(*lock) == context)) { in via_release_futex()
59 if (_DRM_LOCK_IS_HELD(*lock) in via_release_futex()
60 && (*lock & _DRM_LOCK_CONT)) { in via_release_futex()
63 *lock = 0; in via_release_futex()
71 volatile int *lock; in via_decoder_futex() local
78 if (fx->lock >= VIA_NR_XVMC_LOCKS) in via_decoder_futex()
81 lock = (volatile int *)XVMCLOCKPTR(sAPriv, fx->lock); in via_decoder_futex()
[all …]
/drivers/base/power/
Druntime.c202 spin_lock_irq(&dev->power.lock); in pm_runtime_set_memalloc_noio()
205 spin_unlock_irq(&dev->power.lock); in pm_runtime_set_memalloc_noio()
267 __releases(&dev->power.lock) __acquires(&dev->power.lock) in __rpm_callback()
272 spin_unlock(&dev->power.lock); in __rpm_callback()
274 spin_unlock_irq(&dev->power.lock); in __rpm_callback()
279 spin_lock(&dev->power.lock); in __rpm_callback()
281 spin_lock_irq(&dev->power.lock); in __rpm_callback()
416 __releases(&dev->power.lock) __acquires(&dev->power.lock) in rpm_suspend()
475 spin_unlock(&dev->power.lock); in rpm_suspend()
479 spin_lock(&dev->power.lock); in rpm_suspend()
[all …]
/drivers/net/wireless/st/cw1200/
Dqueue.c105 spin_lock_bh(&stats->lock); in __cw1200_queue_gc()
109 spin_unlock_bh(&stats->lock); in __cw1200_queue_gc()
139 spin_lock_bh(&queue->lock); in cw1200_queue_gc()
141 spin_unlock_bh(&queue->lock); in cw1200_queue_gc()
154 spin_lock_init(&stats->lock); in cw1200_queue_stats_init()
181 spin_lock_init(&queue->lock); in cw1200_queue_init()
210 spin_lock_bh(&queue->lock); in cw1200_queue_clear()
222 spin_lock_bh(&stats->lock); in cw1200_queue_clear()
228 spin_unlock_bh(&stats->lock); in cw1200_queue_clear()
233 spin_unlock_bh(&queue->lock); in cw1200_queue_clear()
[all …]
/drivers/acpi/
Dacpi_dbg.c54 struct mutex lock; member
139 mutex_lock(&acpi_aml_io.lock); in acpi_aml_running()
141 mutex_unlock(&acpi_aml_io.lock); in acpi_aml_running()
149 mutex_lock(&acpi_aml_io.lock); in acpi_aml_busy()
151 mutex_unlock(&acpi_aml_io.lock); in acpi_aml_busy()
163 mutex_lock(&acpi_aml_io.lock); in acpi_aml_used()
165 mutex_unlock(&acpi_aml_io.lock); in acpi_aml_used()
173 mutex_lock(&acpi_aml_io.lock); in acpi_aml_kern_readable()
176 mutex_unlock(&acpi_aml_io.lock); in acpi_aml_kern_readable()
184 mutex_lock(&acpi_aml_io.lock); in acpi_aml_kern_writable()
[all …]
/drivers/base/
Dmap.c26 int (*lock)(dev_t, void *); member
29 struct mutex *lock; member
34 int (*lock)(dev_t, void *), void *data) in kobj_map()
51 p->lock = lock; in kobj_map()
56 mutex_lock(domain->lock); in kobj_map()
64 mutex_unlock(domain->lock); in kobj_map()
78 mutex_lock(domain->lock); in kobj_unmap()
91 mutex_unlock(domain->lock); in kobj_unmap()
102 mutex_lock(domain->lock); in kobj_lookup()
119 if (p->lock && p->lock(dev, data) < 0) { in kobj_lookup()
[all …]
/drivers/usb/gadget/legacy/
Dinode.c117 spinlock_t lock; member
176 spin_lock_init (&dev->lock); in dev_new()
193 struct mutex lock; member
296 if (!mutex_trylock(&epdata->lock)) in get_ready_ep()
300 mutex_unlock(&epdata->lock); in get_ready_ep()
308 val = mutex_lock_interruptible(&epdata->lock); in get_ready_ep()
326 mutex_unlock(&epdata->lock); in get_ready_ep()
336 spin_lock_irq (&epdata->dev->lock); in ep_io()
347 spin_unlock_irq (&epdata->dev->lock); in ep_io()
352 spin_lock_irq (&epdata->dev->lock); in ep_io()
[all …]
/drivers/usb/serial/
Dcypress_m8.c94 spinlock_t lock; /* private lock */ member
367 spin_lock_irqsave(&priv->lock, flags); in cypress_serial_control()
370 spin_unlock_irqrestore(&priv->lock, flags); in cypress_serial_control()
405 spin_lock_irqsave(&priv->lock, flags); in cypress_serial_control()
410 spin_unlock_irqrestore(&priv->lock, flags); in cypress_serial_control()
413 spin_lock_irqsave(&priv->lock, flags); in cypress_serial_control()
415 spin_unlock_irqrestore(&priv->lock, flags); in cypress_serial_control()
427 spin_lock_irqsave(&priv->lock, flags); in cypress_set_dead()
429 spin_unlock_irqrestore(&priv->lock, flags); in cypress_set_dead()
433 spin_unlock_irqrestore(&priv->lock, flags); in cypress_set_dead()
[all …]
/drivers/misc/mic/scif/
Dscif_epd.c53 spin_lock(&ep->lock); in scif_teardown_ep()
55 spin_unlock(&ep->lock); in scif_teardown_ep()
69 spin_lock(&ep->lock); in scif_add_epd_to_zombie_list()
71 spin_unlock(&ep->lock); in scif_add_epd_to_zombie_list()
143 spin_lock(&ep->lock); in scif_cnctreq()
147 spin_unlock(&ep->lock); in scif_cnctreq()
155 spin_unlock(&ep->lock); in scif_cnctreq()
178 spin_lock(&ep->lock); in scif_cnctgnt()
188 spin_unlock(&ep->lock); in scif_cnctgnt()
204 spin_lock(&ep->lock); in scif_cnctgnt_ack()
[all …]

12345678910>>...124