1 /*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26 /*
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2011, 2012, Intel Corporation.
31 */
32 /*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * Client Extent Lock.
37 *
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
39 */
40
41 #define DEBUG_SUBSYSTEM S_CLASS
42
43 #include "../include/obd_class.h"
44 #include "../include/obd_support.h"
45 #include "../include/lustre_fid.h"
46 #include <linux/list.h>
47 #include "../include/cl_object.h"
48 #include "cl_internal.h"
49
50 /** Lock class of cl_lock::cll_guard */
51 static struct lock_class_key cl_lock_guard_class;
52 static struct kmem_cache *cl_lock_kmem;
53
54 static struct lu_kmem_descr cl_lock_caches[] = {
55 {
56 .ckd_cache = &cl_lock_kmem,
57 .ckd_name = "cl_lock_kmem",
58 .ckd_size = sizeof (struct cl_lock)
59 },
60 {
61 .ckd_cache = NULL
62 }
63 };
64
65 #define CS_LOCK_INC(o, item)
66 #define CS_LOCK_DEC(o, item)
67 #define CS_LOCKSTATE_INC(o, state)
68 #define CS_LOCKSTATE_DEC(o, state)
69
70 /**
71 * Basic lock invariant that is maintained at all times. Caller either has a
72 * reference to \a lock, or somehow assures that \a lock cannot be freed.
73 *
74 * \see cl_lock_invariant()
75 */
cl_lock_invariant_trusted(const struct lu_env * env,const struct cl_lock * lock)76 static int cl_lock_invariant_trusted(const struct lu_env *env,
77 const struct cl_lock *lock)
78 {
79 return ergo(lock->cll_state == CLS_FREEING, lock->cll_holds == 0) &&
80 atomic_read(&lock->cll_ref) >= lock->cll_holds &&
81 lock->cll_holds >= lock->cll_users &&
82 lock->cll_holds >= 0 &&
83 lock->cll_users >= 0 &&
84 lock->cll_depth >= 0;
85 }
86
87 /**
88 * Stronger lock invariant, checking that caller has a reference on a lock.
89 *
90 * \see cl_lock_invariant_trusted()
91 */
cl_lock_invariant(const struct lu_env * env,const struct cl_lock * lock)92 static int cl_lock_invariant(const struct lu_env *env,
93 const struct cl_lock *lock)
94 {
95 int result;
96
97 result = atomic_read(&lock->cll_ref) > 0 &&
98 cl_lock_invariant_trusted(env, lock);
99 if (!result && env != NULL)
100 CL_LOCK_DEBUG(D_ERROR, env, lock, "invariant broken");
101 return result;
102 }
103
104 /**
105 * Returns lock "nesting": 0 for a top-lock and 1 for a sub-lock.
106 */
cl_lock_nesting(const struct cl_lock * lock)107 static enum clt_nesting_level cl_lock_nesting(const struct cl_lock *lock)
108 {
109 return cl_object_header(lock->cll_descr.cld_obj)->coh_nesting;
110 }
111
112 /**
113 * Returns a set of counters for this lock, depending on a lock nesting.
114 */
cl_lock_counters(const struct lu_env * env,const struct cl_lock * lock)115 static struct cl_thread_counters *cl_lock_counters(const struct lu_env *env,
116 const struct cl_lock *lock)
117 {
118 struct cl_thread_info *info;
119 enum clt_nesting_level nesting;
120
121 info = cl_env_info(env);
122 nesting = cl_lock_nesting(lock);
123 LASSERT(nesting < ARRAY_SIZE(info->clt_counters));
124 return &info->clt_counters[nesting];
125 }
126
cl_lock_trace0(int level,const struct lu_env * env,const char * prefix,const struct cl_lock * lock,const char * func,const int line)127 static void cl_lock_trace0(int level, const struct lu_env *env,
128 const char *prefix, const struct cl_lock *lock,
129 const char *func, const int line)
130 {
131 struct cl_object_header *h = cl_object_header(lock->cll_descr.cld_obj);
132
133 CDEBUG(level, "%s: %p@(%d %p %d %d %d %d %d %lx)(%p/%d/%d) at %s():%d\n",
134 prefix, lock, atomic_read(&lock->cll_ref),
135 lock->cll_guarder, lock->cll_depth,
136 lock->cll_state, lock->cll_error, lock->cll_holds,
137 lock->cll_users, lock->cll_flags,
138 env, h->coh_nesting, cl_lock_nr_mutexed(env),
139 func, line);
140 }
141
142 #define cl_lock_trace(level, env, prefix, lock) \
143 cl_lock_trace0(level, env, prefix, lock, __func__, __LINE__)
144
145 #define RETIP ((unsigned long)__builtin_return_address(0))
146
147 #ifdef CONFIG_LOCKDEP
148 static struct lock_class_key cl_lock_key;
149
cl_lock_lockdep_init(struct cl_lock * lock)150 static void cl_lock_lockdep_init(struct cl_lock *lock)
151 {
152 lockdep_set_class_and_name(lock, &cl_lock_key, "EXT");
153 }
154
cl_lock_lockdep_acquire(const struct lu_env * env,struct cl_lock * lock,__u32 enqflags)155 static void cl_lock_lockdep_acquire(const struct lu_env *env,
156 struct cl_lock *lock, __u32 enqflags)
157 {
158 cl_lock_counters(env, lock)->ctc_nr_locks_acquired++;
159 lock_map_acquire(&lock->dep_map);
160 }
161
cl_lock_lockdep_release(const struct lu_env * env,struct cl_lock * lock)162 static void cl_lock_lockdep_release(const struct lu_env *env,
163 struct cl_lock *lock)
164 {
165 cl_lock_counters(env, lock)->ctc_nr_locks_acquired--;
166 lock_release(&lock->dep_map, 0, RETIP);
167 }
168
169 #else /* !CONFIG_LOCKDEP */
170
cl_lock_lockdep_init(struct cl_lock * lock)171 static void cl_lock_lockdep_init(struct cl_lock *lock)
172 {}
cl_lock_lockdep_acquire(const struct lu_env * env,struct cl_lock * lock,__u32 enqflags)173 static void cl_lock_lockdep_acquire(const struct lu_env *env,
174 struct cl_lock *lock, __u32 enqflags)
175 {}
cl_lock_lockdep_release(const struct lu_env * env,struct cl_lock * lock)176 static void cl_lock_lockdep_release(const struct lu_env *env,
177 struct cl_lock *lock)
178 {}
179
180 #endif /* !CONFIG_LOCKDEP */
181
182 /**
183 * Adds lock slice to the compound lock.
184 *
185 * This is called by cl_object_operations::coo_lock_init() methods to add a
186 * per-layer state to the lock. New state is added at the end of
187 * cl_lock::cll_layers list, that is, it is at the bottom of the stack.
188 *
189 * \see cl_req_slice_add(), cl_page_slice_add(), cl_io_slice_add()
190 */
cl_lock_slice_add(struct cl_lock * lock,struct cl_lock_slice * slice,struct cl_object * obj,const struct cl_lock_operations * ops)191 void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice,
192 struct cl_object *obj,
193 const struct cl_lock_operations *ops)
194 {
195 slice->cls_lock = lock;
196 list_add_tail(&slice->cls_linkage, &lock->cll_layers);
197 slice->cls_obj = obj;
198 slice->cls_ops = ops;
199 }
200 EXPORT_SYMBOL(cl_lock_slice_add);
201
202 /**
203 * Returns true iff a lock with the mode \a has provides at least the same
204 * guarantees as a lock with the mode \a need.
205 */
cl_lock_mode_match(enum cl_lock_mode has,enum cl_lock_mode need)206 int cl_lock_mode_match(enum cl_lock_mode has, enum cl_lock_mode need)
207 {
208 LINVRNT(need == CLM_READ || need == CLM_WRITE ||
209 need == CLM_PHANTOM || need == CLM_GROUP);
210 LINVRNT(has == CLM_READ || has == CLM_WRITE ||
211 has == CLM_PHANTOM || has == CLM_GROUP);
212 CLASSERT(CLM_PHANTOM < CLM_READ);
213 CLASSERT(CLM_READ < CLM_WRITE);
214 CLASSERT(CLM_WRITE < CLM_GROUP);
215
216 if (has != CLM_GROUP)
217 return need <= has;
218 else
219 return need == has;
220 }
221 EXPORT_SYMBOL(cl_lock_mode_match);
222
223 /**
224 * Returns true iff extent portions of lock descriptions match.
225 */
cl_lock_ext_match(const struct cl_lock_descr * has,const struct cl_lock_descr * need)226 int cl_lock_ext_match(const struct cl_lock_descr *has,
227 const struct cl_lock_descr *need)
228 {
229 return
230 has->cld_start <= need->cld_start &&
231 has->cld_end >= need->cld_end &&
232 cl_lock_mode_match(has->cld_mode, need->cld_mode) &&
233 (has->cld_mode != CLM_GROUP || has->cld_gid == need->cld_gid);
234 }
235 EXPORT_SYMBOL(cl_lock_ext_match);
236
237 /**
238 * Returns true iff a lock with the description \a has provides at least the
239 * same guarantees as a lock with the description \a need.
240 */
cl_lock_descr_match(const struct cl_lock_descr * has,const struct cl_lock_descr * need)241 int cl_lock_descr_match(const struct cl_lock_descr *has,
242 const struct cl_lock_descr *need)
243 {
244 return
245 cl_object_same(has->cld_obj, need->cld_obj) &&
246 cl_lock_ext_match(has, need);
247 }
248 EXPORT_SYMBOL(cl_lock_descr_match);
249
cl_lock_free(const struct lu_env * env,struct cl_lock * lock)250 static void cl_lock_free(const struct lu_env *env, struct cl_lock *lock)
251 {
252 struct cl_object *obj = lock->cll_descr.cld_obj;
253
254 LINVRNT(!cl_lock_is_mutexed(lock));
255
256 cl_lock_trace(D_DLMTRACE, env, "free lock", lock);
257 might_sleep();
258 while (!list_empty(&lock->cll_layers)) {
259 struct cl_lock_slice *slice;
260
261 slice = list_entry(lock->cll_layers.next,
262 struct cl_lock_slice, cls_linkage);
263 list_del_init(lock->cll_layers.next);
264 slice->cls_ops->clo_fini(env, slice);
265 }
266 CS_LOCK_DEC(obj, total);
267 CS_LOCKSTATE_DEC(obj, lock->cll_state);
268 lu_object_ref_del_at(&obj->co_lu, &lock->cll_obj_ref, "cl_lock", lock);
269 cl_object_put(env, obj);
270 lu_ref_fini(&lock->cll_reference);
271 lu_ref_fini(&lock->cll_holders);
272 mutex_destroy(&lock->cll_guard);
273 kmem_cache_free(cl_lock_kmem, lock);
274 }
275
276 /**
277 * Releases a reference on a lock.
278 *
279 * When last reference is released, lock is returned to the cache, unless it
280 * is in cl_lock_state::CLS_FREEING state, in which case it is destroyed
281 * immediately.
282 *
283 * \see cl_object_put(), cl_page_put()
284 */
cl_lock_put(const struct lu_env * env,struct cl_lock * lock)285 void cl_lock_put(const struct lu_env *env, struct cl_lock *lock)
286 {
287 struct cl_object *obj;
288
289 LINVRNT(cl_lock_invariant(env, lock));
290 obj = lock->cll_descr.cld_obj;
291 LINVRNT(obj != NULL);
292
293 CDEBUG(D_TRACE, "releasing reference: %d %p %lu\n",
294 atomic_read(&lock->cll_ref), lock, RETIP);
295
296 if (atomic_dec_and_test(&lock->cll_ref)) {
297 if (lock->cll_state == CLS_FREEING) {
298 LASSERT(list_empty(&lock->cll_linkage));
299 cl_lock_free(env, lock);
300 }
301 CS_LOCK_DEC(obj, busy);
302 }
303 }
304 EXPORT_SYMBOL(cl_lock_put);
305
306 /**
307 * Acquires an additional reference to a lock.
308 *
309 * This can be called only by caller already possessing a reference to \a
310 * lock.
311 *
312 * \see cl_object_get(), cl_page_get()
313 */
cl_lock_get(struct cl_lock * lock)314 void cl_lock_get(struct cl_lock *lock)
315 {
316 LINVRNT(cl_lock_invariant(NULL, lock));
317 CDEBUG(D_TRACE, "acquiring reference: %d %p %lu\n",
318 atomic_read(&lock->cll_ref), lock, RETIP);
319 atomic_inc(&lock->cll_ref);
320 }
321 EXPORT_SYMBOL(cl_lock_get);
322
323 /**
324 * Acquires a reference to a lock.
325 *
326 * This is much like cl_lock_get(), except that this function can be used to
327 * acquire initial reference to the cached lock. Caller has to deal with all
328 * possible races. Use with care!
329 *
330 * \see cl_page_get_trust()
331 */
cl_lock_get_trust(struct cl_lock * lock)332 void cl_lock_get_trust(struct cl_lock *lock)
333 {
334 CDEBUG(D_TRACE, "acquiring trusted reference: %d %p %lu\n",
335 atomic_read(&lock->cll_ref), lock, RETIP);
336 if (atomic_inc_return(&lock->cll_ref) == 1)
337 CS_LOCK_INC(lock->cll_descr.cld_obj, busy);
338 }
339 EXPORT_SYMBOL(cl_lock_get_trust);
340
341 /**
342 * Helper function destroying the lock that wasn't completely initialized.
343 *
344 * Other threads can acquire references to the top-lock through its
345 * sub-locks. Hence, it cannot be cl_lock_free()-ed immediately.
346 */
cl_lock_finish(const struct lu_env * env,struct cl_lock * lock)347 static void cl_lock_finish(const struct lu_env *env, struct cl_lock *lock)
348 {
349 cl_lock_mutex_get(env, lock);
350 cl_lock_cancel(env, lock);
351 cl_lock_delete(env, lock);
352 cl_lock_mutex_put(env, lock);
353 cl_lock_put(env, lock);
354 }
355
cl_lock_alloc(const struct lu_env * env,struct cl_object * obj,const struct cl_io * io,const struct cl_lock_descr * descr)356 static struct cl_lock *cl_lock_alloc(const struct lu_env *env,
357 struct cl_object *obj,
358 const struct cl_io *io,
359 const struct cl_lock_descr *descr)
360 {
361 struct cl_lock *lock;
362 struct lu_object_header *head;
363
364 lock = kmem_cache_alloc(cl_lock_kmem, GFP_NOFS | __GFP_ZERO);
365 if (lock != NULL) {
366 atomic_set(&lock->cll_ref, 1);
367 lock->cll_descr = *descr;
368 lock->cll_state = CLS_NEW;
369 cl_object_get(obj);
370 lu_object_ref_add_at(&obj->co_lu, &lock->cll_obj_ref, "cl_lock",
371 lock);
372 INIT_LIST_HEAD(&lock->cll_layers);
373 INIT_LIST_HEAD(&lock->cll_linkage);
374 INIT_LIST_HEAD(&lock->cll_inclosure);
375 lu_ref_init(&lock->cll_reference);
376 lu_ref_init(&lock->cll_holders);
377 mutex_init(&lock->cll_guard);
378 lockdep_set_class(&lock->cll_guard, &cl_lock_guard_class);
379 init_waitqueue_head(&lock->cll_wq);
380 head = obj->co_lu.lo_header;
381 CS_LOCKSTATE_INC(obj, CLS_NEW);
382 CS_LOCK_INC(obj, total);
383 CS_LOCK_INC(obj, create);
384 cl_lock_lockdep_init(lock);
385 list_for_each_entry(obj, &head->loh_layers,
386 co_lu.lo_linkage) {
387 int err;
388
389 err = obj->co_ops->coo_lock_init(env, obj, lock, io);
390 if (err != 0) {
391 cl_lock_finish(env, lock);
392 lock = ERR_PTR(err);
393 break;
394 }
395 }
396 } else
397 lock = ERR_PTR(-ENOMEM);
398 return lock;
399 }
400
401 /**
402 * Transfer the lock into INTRANSIT state and return the original state.
403 *
404 * \pre state: CLS_CACHED, CLS_HELD or CLS_ENQUEUED
405 * \post state: CLS_INTRANSIT
406 * \see CLS_INTRANSIT
407 */
cl_lock_intransit(const struct lu_env * env,struct cl_lock * lock)408 static enum cl_lock_state cl_lock_intransit(const struct lu_env *env,
409 struct cl_lock *lock)
410 {
411 enum cl_lock_state state = lock->cll_state;
412
413 LASSERT(cl_lock_is_mutexed(lock));
414 LASSERT(state != CLS_INTRANSIT);
415 LASSERTF(state >= CLS_ENQUEUED && state <= CLS_CACHED,
416 "Malformed lock state %d.\n", state);
417
418 cl_lock_state_set(env, lock, CLS_INTRANSIT);
419 lock->cll_intransit_owner = current;
420 cl_lock_hold_add(env, lock, "intransit", current);
421 return state;
422 }
423
424 /**
425 * Exit the intransit state and restore the lock state to the original state
426 */
cl_lock_extransit(const struct lu_env * env,struct cl_lock * lock,enum cl_lock_state state)427 static void cl_lock_extransit(const struct lu_env *env, struct cl_lock *lock,
428 enum cl_lock_state state)
429 {
430 LASSERT(cl_lock_is_mutexed(lock));
431 LASSERT(lock->cll_state == CLS_INTRANSIT);
432 LASSERT(state != CLS_INTRANSIT);
433 LASSERT(lock->cll_intransit_owner == current);
434
435 lock->cll_intransit_owner = NULL;
436 cl_lock_state_set(env, lock, state);
437 cl_lock_unhold(env, lock, "intransit", current);
438 }
439
440 /**
441 * Checking whether the lock is intransit state
442 */
cl_lock_is_intransit(struct cl_lock * lock)443 int cl_lock_is_intransit(struct cl_lock *lock)
444 {
445 LASSERT(cl_lock_is_mutexed(lock));
446 return lock->cll_state == CLS_INTRANSIT &&
447 lock->cll_intransit_owner != current;
448 }
449 EXPORT_SYMBOL(cl_lock_is_intransit);
450 /**
451 * Returns true iff lock is "suitable" for given io. E.g., locks acquired by
452 * truncate and O_APPEND cannot be reused for read/non-append-write, as they
453 * cover multiple stripes and can trigger cascading timeouts.
454 */
cl_lock_fits_into(const struct lu_env * env,const struct cl_lock * lock,const struct cl_lock_descr * need,const struct cl_io * io)455 static int cl_lock_fits_into(const struct lu_env *env,
456 const struct cl_lock *lock,
457 const struct cl_lock_descr *need,
458 const struct cl_io *io)
459 {
460 const struct cl_lock_slice *slice;
461
462 LINVRNT(cl_lock_invariant_trusted(env, lock));
463 list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
464 if (slice->cls_ops->clo_fits_into != NULL &&
465 !slice->cls_ops->clo_fits_into(env, slice, need, io))
466 return 0;
467 }
468 return 1;
469 }
470
cl_lock_lookup(const struct lu_env * env,struct cl_object * obj,const struct cl_io * io,const struct cl_lock_descr * need)471 static struct cl_lock *cl_lock_lookup(const struct lu_env *env,
472 struct cl_object *obj,
473 const struct cl_io *io,
474 const struct cl_lock_descr *need)
475 {
476 struct cl_lock *lock;
477 struct cl_object_header *head;
478
479 head = cl_object_header(obj);
480 assert_spin_locked(&head->coh_lock_guard);
481 CS_LOCK_INC(obj, lookup);
482 list_for_each_entry(lock, &head->coh_locks, cll_linkage) {
483 int matched;
484
485 matched = cl_lock_ext_match(&lock->cll_descr, need) &&
486 lock->cll_state < CLS_FREEING &&
487 lock->cll_error == 0 &&
488 !(lock->cll_flags & CLF_CANCELLED) &&
489 cl_lock_fits_into(env, lock, need, io);
490 CDEBUG(D_DLMTRACE, "has: "DDESCR"(%d) need: "DDESCR": %d\n",
491 PDESCR(&lock->cll_descr), lock->cll_state, PDESCR(need),
492 matched);
493 if (matched) {
494 cl_lock_get_trust(lock);
495 CS_LOCK_INC(obj, hit);
496 return lock;
497 }
498 }
499 return NULL;
500 }
501
502 /**
503 * Returns a lock matching description \a need.
504 *
505 * This is the main entry point into the cl_lock caching interface. First, a
506 * cache (implemented as a per-object linked list) is consulted. If lock is
507 * found there, it is returned immediately. Otherwise new lock is allocated
508 * and returned. In any case, additional reference to lock is acquired.
509 *
510 * \see cl_object_find(), cl_page_find()
511 */
cl_lock_find(const struct lu_env * env,const struct cl_io * io,const struct cl_lock_descr * need)512 static struct cl_lock *cl_lock_find(const struct lu_env *env,
513 const struct cl_io *io,
514 const struct cl_lock_descr *need)
515 {
516 struct cl_object_header *head;
517 struct cl_object *obj;
518 struct cl_lock *lock;
519
520 obj = need->cld_obj;
521 head = cl_object_header(obj);
522
523 spin_lock(&head->coh_lock_guard);
524 lock = cl_lock_lookup(env, obj, io, need);
525 spin_unlock(&head->coh_lock_guard);
526
527 if (lock == NULL) {
528 lock = cl_lock_alloc(env, obj, io, need);
529 if (!IS_ERR(lock)) {
530 struct cl_lock *ghost;
531
532 spin_lock(&head->coh_lock_guard);
533 ghost = cl_lock_lookup(env, obj, io, need);
534 if (ghost == NULL) {
535 cl_lock_get_trust(lock);
536 list_add_tail(&lock->cll_linkage,
537 &head->coh_locks);
538 spin_unlock(&head->coh_lock_guard);
539 CS_LOCK_INC(obj, busy);
540 } else {
541 spin_unlock(&head->coh_lock_guard);
542 /*
543 * Other threads can acquire references to the
544 * top-lock through its sub-locks. Hence, it
545 * cannot be cl_lock_free()-ed immediately.
546 */
547 cl_lock_finish(env, lock);
548 lock = ghost;
549 }
550 }
551 }
552 return lock;
553 }
554
555 /**
556 * Returns existing lock matching given description. This is similar to
557 * cl_lock_find() except that no new lock is created, and returned lock is
558 * guaranteed to be in enum cl_lock_state::CLS_HELD state.
559 */
cl_lock_peek(const struct lu_env * env,const struct cl_io * io,const struct cl_lock_descr * need,const char * scope,const void * source)560 struct cl_lock *cl_lock_peek(const struct lu_env *env, const struct cl_io *io,
561 const struct cl_lock_descr *need,
562 const char *scope, const void *source)
563 {
564 struct cl_object_header *head;
565 struct cl_object *obj;
566 struct cl_lock *lock;
567
568 obj = need->cld_obj;
569 head = cl_object_header(obj);
570
571 do {
572 spin_lock(&head->coh_lock_guard);
573 lock = cl_lock_lookup(env, obj, io, need);
574 spin_unlock(&head->coh_lock_guard);
575 if (lock == NULL)
576 return NULL;
577
578 cl_lock_mutex_get(env, lock);
579 if (lock->cll_state == CLS_INTRANSIT)
580 /* Don't care return value. */
581 cl_lock_state_wait(env, lock);
582 if (lock->cll_state == CLS_FREEING) {
583 cl_lock_mutex_put(env, lock);
584 cl_lock_put(env, lock);
585 lock = NULL;
586 }
587 } while (lock == NULL);
588
589 cl_lock_hold_add(env, lock, scope, source);
590 cl_lock_user_add(env, lock);
591 if (lock->cll_state == CLS_CACHED)
592 cl_use_try(env, lock, 1);
593 if (lock->cll_state == CLS_HELD) {
594 cl_lock_mutex_put(env, lock);
595 cl_lock_lockdep_acquire(env, lock, 0);
596 cl_lock_put(env, lock);
597 } else {
598 cl_unuse_try(env, lock);
599 cl_lock_unhold(env, lock, scope, source);
600 cl_lock_mutex_put(env, lock);
601 cl_lock_put(env, lock);
602 lock = NULL;
603 }
604
605 return lock;
606 }
607 EXPORT_SYMBOL(cl_lock_peek);
608
609 /**
610 * Returns a slice within a lock, corresponding to the given layer in the
611 * device stack.
612 *
613 * \see cl_page_at()
614 */
cl_lock_at(const struct cl_lock * lock,const struct lu_device_type * dtype)615 const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock,
616 const struct lu_device_type *dtype)
617 {
618 const struct cl_lock_slice *slice;
619
620 LINVRNT(cl_lock_invariant_trusted(NULL, lock));
621
622 list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
623 if (slice->cls_obj->co_lu.lo_dev->ld_type == dtype)
624 return slice;
625 }
626 return NULL;
627 }
628 EXPORT_SYMBOL(cl_lock_at);
629
cl_lock_mutex_tail(const struct lu_env * env,struct cl_lock * lock)630 static void cl_lock_mutex_tail(const struct lu_env *env, struct cl_lock *lock)
631 {
632 struct cl_thread_counters *counters;
633
634 counters = cl_lock_counters(env, lock);
635 lock->cll_depth++;
636 counters->ctc_nr_locks_locked++;
637 lu_ref_add(&counters->ctc_locks_locked, "cll_guard", lock);
638 cl_lock_trace(D_TRACE, env, "got mutex", lock);
639 }
640
641 /**
642 * Locks cl_lock object.
643 *
644 * This is used to manipulate cl_lock fields, and to serialize state
645 * transitions in the lock state machine.
646 *
647 * \post cl_lock_is_mutexed(lock)
648 *
649 * \see cl_lock_mutex_put()
650 */
cl_lock_mutex_get(const struct lu_env * env,struct cl_lock * lock)651 void cl_lock_mutex_get(const struct lu_env *env, struct cl_lock *lock)
652 {
653 LINVRNT(cl_lock_invariant(env, lock));
654
655 if (lock->cll_guarder == current) {
656 LINVRNT(cl_lock_is_mutexed(lock));
657 LINVRNT(lock->cll_depth > 0);
658 } else {
659 struct cl_object_header *hdr;
660 struct cl_thread_info *info;
661 int i;
662
663 LINVRNT(lock->cll_guarder != current);
664 hdr = cl_object_header(lock->cll_descr.cld_obj);
665 /*
666 * Check that mutices are taken in the bottom-to-top order.
667 */
668 info = cl_env_info(env);
669 for (i = 0; i < hdr->coh_nesting; ++i)
670 LASSERT(info->clt_counters[i].ctc_nr_locks_locked == 0);
671 mutex_lock_nested(&lock->cll_guard, hdr->coh_nesting);
672 lock->cll_guarder = current;
673 LINVRNT(lock->cll_depth == 0);
674 }
675 cl_lock_mutex_tail(env, lock);
676 }
677 EXPORT_SYMBOL(cl_lock_mutex_get);
678
679 /**
680 * Try-locks cl_lock object.
681 *
682 * \retval 0 \a lock was successfully locked
683 *
684 * \retval -EBUSY \a lock cannot be locked right now
685 *
686 * \post ergo(result == 0, cl_lock_is_mutexed(lock))
687 *
688 * \see cl_lock_mutex_get()
689 */
cl_lock_mutex_try(const struct lu_env * env,struct cl_lock * lock)690 int cl_lock_mutex_try(const struct lu_env *env, struct cl_lock *lock)
691 {
692 int result;
693
694 LINVRNT(cl_lock_invariant_trusted(env, lock));
695
696 result = 0;
697 if (lock->cll_guarder == current) {
698 LINVRNT(lock->cll_depth > 0);
699 cl_lock_mutex_tail(env, lock);
700 } else if (mutex_trylock(&lock->cll_guard)) {
701 LINVRNT(lock->cll_depth == 0);
702 lock->cll_guarder = current;
703 cl_lock_mutex_tail(env, lock);
704 } else
705 result = -EBUSY;
706 return result;
707 }
708 EXPORT_SYMBOL(cl_lock_mutex_try);
709
710 /**
711 {* Unlocks cl_lock object.
712 *
713 * \pre cl_lock_is_mutexed(lock)
714 *
715 * \see cl_lock_mutex_get()
716 */
cl_lock_mutex_put(const struct lu_env * env,struct cl_lock * lock)717 void cl_lock_mutex_put(const struct lu_env *env, struct cl_lock *lock)
718 {
719 struct cl_thread_counters *counters;
720
721 LINVRNT(cl_lock_invariant(env, lock));
722 LINVRNT(cl_lock_is_mutexed(lock));
723 LINVRNT(lock->cll_guarder == current);
724 LINVRNT(lock->cll_depth > 0);
725
726 counters = cl_lock_counters(env, lock);
727 LINVRNT(counters->ctc_nr_locks_locked > 0);
728
729 cl_lock_trace(D_TRACE, env, "put mutex", lock);
730 lu_ref_del(&counters->ctc_locks_locked, "cll_guard", lock);
731 counters->ctc_nr_locks_locked--;
732 if (--lock->cll_depth == 0) {
733 lock->cll_guarder = NULL;
734 mutex_unlock(&lock->cll_guard);
735 }
736 }
737 EXPORT_SYMBOL(cl_lock_mutex_put);
738
739 /**
740 * Returns true iff lock's mutex is owned by the current thread.
741 */
cl_lock_is_mutexed(struct cl_lock * lock)742 int cl_lock_is_mutexed(struct cl_lock *lock)
743 {
744 return lock->cll_guarder == current;
745 }
746 EXPORT_SYMBOL(cl_lock_is_mutexed);
747
748 /**
749 * Returns number of cl_lock mutices held by the current thread (environment).
750 */
cl_lock_nr_mutexed(const struct lu_env * env)751 int cl_lock_nr_mutexed(const struct lu_env *env)
752 {
753 struct cl_thread_info *info;
754 int i;
755 int locked;
756
757 /*
758 * NOTE: if summation across all nesting levels (currently 2) proves
759 * too expensive, a summary counter can be added to
760 * struct cl_thread_info.
761 */
762 info = cl_env_info(env);
763 for (i = 0, locked = 0; i < ARRAY_SIZE(info->clt_counters); ++i)
764 locked += info->clt_counters[i].ctc_nr_locks_locked;
765 return locked;
766 }
767 EXPORT_SYMBOL(cl_lock_nr_mutexed);
768
cl_lock_cancel0(const struct lu_env * env,struct cl_lock * lock)769 static void cl_lock_cancel0(const struct lu_env *env, struct cl_lock *lock)
770 {
771 LINVRNT(cl_lock_is_mutexed(lock));
772 LINVRNT(cl_lock_invariant(env, lock));
773 if (!(lock->cll_flags & CLF_CANCELLED)) {
774 const struct cl_lock_slice *slice;
775
776 lock->cll_flags |= CLF_CANCELLED;
777 list_for_each_entry_reverse(slice, &lock->cll_layers,
778 cls_linkage) {
779 if (slice->cls_ops->clo_cancel != NULL)
780 slice->cls_ops->clo_cancel(env, slice);
781 }
782 }
783 }
784
cl_lock_delete0(const struct lu_env * env,struct cl_lock * lock)785 static void cl_lock_delete0(const struct lu_env *env, struct cl_lock *lock)
786 {
787 struct cl_object_header *head;
788 const struct cl_lock_slice *slice;
789
790 LINVRNT(cl_lock_is_mutexed(lock));
791 LINVRNT(cl_lock_invariant(env, lock));
792
793 if (lock->cll_state < CLS_FREEING) {
794 bool in_cache;
795
796 LASSERT(lock->cll_state != CLS_INTRANSIT);
797 cl_lock_state_set(env, lock, CLS_FREEING);
798
799 head = cl_object_header(lock->cll_descr.cld_obj);
800
801 spin_lock(&head->coh_lock_guard);
802 in_cache = !list_empty(&lock->cll_linkage);
803 if (in_cache)
804 list_del_init(&lock->cll_linkage);
805 spin_unlock(&head->coh_lock_guard);
806
807 if (in_cache) /* coh_locks cache holds a refcount. */
808 cl_lock_put(env, lock);
809
810 /*
811 * From now on, no new references to this lock can be acquired
812 * by cl_lock_lookup().
813 */
814 list_for_each_entry_reverse(slice, &lock->cll_layers,
815 cls_linkage) {
816 if (slice->cls_ops->clo_delete != NULL)
817 slice->cls_ops->clo_delete(env, slice);
818 }
819 /*
820 * From now on, no new references to this lock can be acquired
821 * by layer-specific means (like a pointer from struct
822 * ldlm_lock in osc, or a pointer from top-lock to sub-lock in
823 * lov).
824 *
825 * Lock will be finally freed in cl_lock_put() when last of
826 * existing references goes away.
827 */
828 }
829 }
830
831 /**
832 * Mod(ifie)s cl_lock::cll_holds counter for a given lock. Also, for a
833 * top-lock (nesting == 0) accounts for this modification in the per-thread
834 * debugging counters. Sub-lock holds can be released by a thread different
835 * from one that acquired it.
836 */
cl_lock_hold_mod(const struct lu_env * env,struct cl_lock * lock,int delta)837 static void cl_lock_hold_mod(const struct lu_env *env, struct cl_lock *lock,
838 int delta)
839 {
840 struct cl_thread_counters *counters;
841 enum clt_nesting_level nesting;
842
843 lock->cll_holds += delta;
844 nesting = cl_lock_nesting(lock);
845 if (nesting == CNL_TOP) {
846 counters = &cl_env_info(env)->clt_counters[CNL_TOP];
847 counters->ctc_nr_held += delta;
848 LASSERT(counters->ctc_nr_held >= 0);
849 }
850 }
851
852 /**
853 * Mod(ifie)s cl_lock::cll_users counter for a given lock. See
854 * cl_lock_hold_mod() for the explanation of the debugging code.
855 */
cl_lock_used_mod(const struct lu_env * env,struct cl_lock * lock,int delta)856 static void cl_lock_used_mod(const struct lu_env *env, struct cl_lock *lock,
857 int delta)
858 {
859 struct cl_thread_counters *counters;
860 enum clt_nesting_level nesting;
861
862 lock->cll_users += delta;
863 nesting = cl_lock_nesting(lock);
864 if (nesting == CNL_TOP) {
865 counters = &cl_env_info(env)->clt_counters[CNL_TOP];
866 counters->ctc_nr_used += delta;
867 LASSERT(counters->ctc_nr_used >= 0);
868 }
869 }
870
cl_lock_hold_release(const struct lu_env * env,struct cl_lock * lock,const char * scope,const void * source)871 void cl_lock_hold_release(const struct lu_env *env, struct cl_lock *lock,
872 const char *scope, const void *source)
873 {
874 LINVRNT(cl_lock_is_mutexed(lock));
875 LINVRNT(cl_lock_invariant(env, lock));
876 LASSERT(lock->cll_holds > 0);
877
878 cl_lock_trace(D_DLMTRACE, env, "hold release lock", lock);
879 lu_ref_del(&lock->cll_holders, scope, source);
880 cl_lock_hold_mod(env, lock, -1);
881 if (lock->cll_holds == 0) {
882 CL_LOCK_ASSERT(lock->cll_state != CLS_HELD, env, lock);
883 if (lock->cll_descr.cld_mode == CLM_PHANTOM ||
884 lock->cll_descr.cld_mode == CLM_GROUP ||
885 lock->cll_state != CLS_CACHED)
886 /*
887 * If lock is still phantom or grouplock when user is
888 * done with it---destroy the lock.
889 */
890 lock->cll_flags |= CLF_CANCELPEND|CLF_DOOMED;
891 if (lock->cll_flags & CLF_CANCELPEND) {
892 lock->cll_flags &= ~CLF_CANCELPEND;
893 cl_lock_cancel0(env, lock);
894 }
895 if (lock->cll_flags & CLF_DOOMED) {
896 /* no longer doomed: it's dead... Jim. */
897 lock->cll_flags &= ~CLF_DOOMED;
898 cl_lock_delete0(env, lock);
899 }
900 }
901 }
902 EXPORT_SYMBOL(cl_lock_hold_release);
903
904 /**
905 * Waits until lock state is changed.
906 *
907 * This function is called with cl_lock mutex locked, atomically releases
908 * mutex and goes to sleep, waiting for a lock state change (signaled by
909 * cl_lock_signal()), and re-acquires the mutex before return.
910 *
911 * This function is used to wait until lock state machine makes some progress
912 * and to emulate synchronous operations on top of asynchronous lock
913 * interface.
914 *
915 * \retval -EINTR wait was interrupted
916 *
917 * \retval 0 wait wasn't interrupted
918 *
919 * \pre cl_lock_is_mutexed(lock)
920 *
921 * \see cl_lock_signal()
922 */
cl_lock_state_wait(const struct lu_env * env,struct cl_lock * lock)923 int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock)
924 {
925 wait_queue_t waiter;
926 sigset_t blocked;
927 int result;
928
929 LINVRNT(cl_lock_is_mutexed(lock));
930 LINVRNT(cl_lock_invariant(env, lock));
931 LASSERT(lock->cll_depth == 1);
932 LASSERT(lock->cll_state != CLS_FREEING); /* too late to wait */
933
934 cl_lock_trace(D_DLMTRACE, env, "state wait lock", lock);
935 result = lock->cll_error;
936 if (result == 0) {
937 /* To avoid being interrupted by the 'non-fatal' signals
938 * (SIGCHLD, for instance), we'd block them temporarily.
939 * LU-305 */
940 blocked = cfs_block_sigsinv(LUSTRE_FATAL_SIGS);
941
942 init_waitqueue_entry(&waiter, current);
943 add_wait_queue(&lock->cll_wq, &waiter);
944 set_current_state(TASK_INTERRUPTIBLE);
945 cl_lock_mutex_put(env, lock);
946
947 LASSERT(cl_lock_nr_mutexed(env) == 0);
948
949 /* Returning ERESTARTSYS instead of EINTR so syscalls
950 * can be restarted if signals are pending here */
951 result = -ERESTARTSYS;
952 if (likely(!OBD_FAIL_CHECK(OBD_FAIL_LOCK_STATE_WAIT_INTR))) {
953 schedule();
954 if (!cfs_signal_pending())
955 result = 0;
956 }
957
958 cl_lock_mutex_get(env, lock);
959 set_current_state(TASK_RUNNING);
960 remove_wait_queue(&lock->cll_wq, &waiter);
961
962 /* Restore old blocked signals */
963 cfs_restore_sigs(blocked);
964 }
965 return result;
966 }
967 EXPORT_SYMBOL(cl_lock_state_wait);
968
cl_lock_state_signal(const struct lu_env * env,struct cl_lock * lock,enum cl_lock_state state)969 static void cl_lock_state_signal(const struct lu_env *env, struct cl_lock *lock,
970 enum cl_lock_state state)
971 {
972 const struct cl_lock_slice *slice;
973
974 LINVRNT(cl_lock_is_mutexed(lock));
975 LINVRNT(cl_lock_invariant(env, lock));
976
977 list_for_each_entry(slice, &lock->cll_layers, cls_linkage)
978 if (slice->cls_ops->clo_state != NULL)
979 slice->cls_ops->clo_state(env, slice, state);
980 wake_up_all(&lock->cll_wq);
981 }
982
983 /**
984 * Notifies waiters that lock state changed.
985 *
986 * Wakes up all waiters sleeping in cl_lock_state_wait(), also notifies all
987 * layers about state change by calling cl_lock_operations::clo_state()
988 * top-to-bottom.
989 */
cl_lock_signal(const struct lu_env * env,struct cl_lock * lock)990 void cl_lock_signal(const struct lu_env *env, struct cl_lock *lock)
991 {
992 cl_lock_trace(D_DLMTRACE, env, "state signal lock", lock);
993 cl_lock_state_signal(env, lock, lock->cll_state);
994 }
995 EXPORT_SYMBOL(cl_lock_signal);
996
997 /**
998 * Changes lock state.
999 *
1000 * This function is invoked to notify layers that lock state changed, possible
1001 * as a result of an asynchronous event such as call-back reception.
1002 *
1003 * \post lock->cll_state == state
1004 *
1005 * \see cl_lock_operations::clo_state()
1006 */
cl_lock_state_set(const struct lu_env * env,struct cl_lock * lock,enum cl_lock_state state)1007 void cl_lock_state_set(const struct lu_env *env, struct cl_lock *lock,
1008 enum cl_lock_state state)
1009 {
1010 LASSERT(lock->cll_state <= state ||
1011 (lock->cll_state == CLS_CACHED &&
1012 (state == CLS_HELD || /* lock found in cache */
1013 state == CLS_NEW || /* sub-lock canceled */
1014 state == CLS_INTRANSIT)) ||
1015 /* lock is in transit state */
1016 lock->cll_state == CLS_INTRANSIT);
1017
1018 if (lock->cll_state != state) {
1019 CS_LOCKSTATE_DEC(lock->cll_descr.cld_obj, lock->cll_state);
1020 CS_LOCKSTATE_INC(lock->cll_descr.cld_obj, state);
1021
1022 cl_lock_state_signal(env, lock, state);
1023 lock->cll_state = state;
1024 }
1025 }
1026 EXPORT_SYMBOL(cl_lock_state_set);
1027
cl_unuse_try_internal(const struct lu_env * env,struct cl_lock * lock)1028 static int cl_unuse_try_internal(const struct lu_env *env, struct cl_lock *lock)
1029 {
1030 const struct cl_lock_slice *slice;
1031 int result;
1032
1033 do {
1034 result = 0;
1035
1036 LINVRNT(cl_lock_is_mutexed(lock));
1037 LINVRNT(cl_lock_invariant(env, lock));
1038 LASSERT(lock->cll_state == CLS_INTRANSIT);
1039
1040 result = -ENOSYS;
1041 list_for_each_entry_reverse(slice, &lock->cll_layers,
1042 cls_linkage) {
1043 if (slice->cls_ops->clo_unuse != NULL) {
1044 result = slice->cls_ops->clo_unuse(env, slice);
1045 if (result != 0)
1046 break;
1047 }
1048 }
1049 LASSERT(result != -ENOSYS);
1050 } while (result == CLO_REPEAT);
1051
1052 return result;
1053 }
1054
1055 /**
1056 * Yanks lock from the cache (cl_lock_state::CLS_CACHED state) by calling
1057 * cl_lock_operations::clo_use() top-to-bottom to notify layers.
1058 * @atomic = 1, it must unuse the lock to recovery the lock to keep the
1059 * use process atomic
1060 */
cl_use_try(const struct lu_env * env,struct cl_lock * lock,int atomic)1061 int cl_use_try(const struct lu_env *env, struct cl_lock *lock, int atomic)
1062 {
1063 const struct cl_lock_slice *slice;
1064 int result;
1065 enum cl_lock_state state;
1066
1067 cl_lock_trace(D_DLMTRACE, env, "use lock", lock);
1068
1069 LASSERT(lock->cll_state == CLS_CACHED);
1070 if (lock->cll_error)
1071 return lock->cll_error;
1072
1073 result = -ENOSYS;
1074 state = cl_lock_intransit(env, lock);
1075 list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1076 if (slice->cls_ops->clo_use != NULL) {
1077 result = slice->cls_ops->clo_use(env, slice);
1078 if (result != 0)
1079 break;
1080 }
1081 }
1082 LASSERT(result != -ENOSYS);
1083
1084 LASSERTF(lock->cll_state == CLS_INTRANSIT, "Wrong state %d.\n",
1085 lock->cll_state);
1086
1087 if (result == 0) {
1088 state = CLS_HELD;
1089 } else {
1090 if (result == -ESTALE) {
1091 /*
1092 * ESTALE means sublock being cancelled
1093 * at this time, and set lock state to
1094 * be NEW here and ask the caller to repeat.
1095 */
1096 state = CLS_NEW;
1097 result = CLO_REPEAT;
1098 }
1099
1100 /* @atomic means back-off-on-failure. */
1101 if (atomic) {
1102 int rc;
1103
1104 rc = cl_unuse_try_internal(env, lock);
1105 /* Vet the results. */
1106 if (rc < 0 && result > 0)
1107 result = rc;
1108 }
1109
1110 }
1111 cl_lock_extransit(env, lock, state);
1112 return result;
1113 }
1114 EXPORT_SYMBOL(cl_use_try);
1115
1116 /**
1117 * Helper for cl_enqueue_try() that calls ->clo_enqueue() across all layers
1118 * top-to-bottom.
1119 */
cl_enqueue_kick(const struct lu_env * env,struct cl_lock * lock,struct cl_io * io,__u32 flags)1120 static int cl_enqueue_kick(const struct lu_env *env,
1121 struct cl_lock *lock,
1122 struct cl_io *io, __u32 flags)
1123 {
1124 int result;
1125 const struct cl_lock_slice *slice;
1126
1127 result = -ENOSYS;
1128 list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1129 if (slice->cls_ops->clo_enqueue != NULL) {
1130 result = slice->cls_ops->clo_enqueue(env,
1131 slice, io, flags);
1132 if (result != 0)
1133 break;
1134 }
1135 }
1136 LASSERT(result != -ENOSYS);
1137 return result;
1138 }
1139
1140 /**
1141 * Tries to enqueue a lock.
1142 *
1143 * This function is called repeatedly by cl_enqueue() until either lock is
1144 * enqueued, or error occurs. This function does not block waiting for
1145 * networking communication to complete.
1146 *
1147 * \post ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1148 * lock->cll_state == CLS_HELD)
1149 *
1150 * \see cl_enqueue() cl_lock_operations::clo_enqueue()
1151 * \see cl_lock_state::CLS_ENQUEUED
1152 */
cl_enqueue_try(const struct lu_env * env,struct cl_lock * lock,struct cl_io * io,__u32 flags)1153 int cl_enqueue_try(const struct lu_env *env, struct cl_lock *lock,
1154 struct cl_io *io, __u32 flags)
1155 {
1156 int result;
1157
1158 cl_lock_trace(D_DLMTRACE, env, "enqueue lock", lock);
1159 do {
1160 LINVRNT(cl_lock_is_mutexed(lock));
1161
1162 result = lock->cll_error;
1163 if (result != 0)
1164 break;
1165
1166 switch (lock->cll_state) {
1167 case CLS_NEW:
1168 cl_lock_state_set(env, lock, CLS_QUEUING);
1169 /* fall-through */
1170 case CLS_QUEUING:
1171 /* kick layers. */
1172 result = cl_enqueue_kick(env, lock, io, flags);
1173 /* For AGL case, the cl_lock::cll_state may
1174 * become CLS_HELD already. */
1175 if (result == 0 && lock->cll_state == CLS_QUEUING)
1176 cl_lock_state_set(env, lock, CLS_ENQUEUED);
1177 break;
1178 case CLS_INTRANSIT:
1179 LASSERT(cl_lock_is_intransit(lock));
1180 result = CLO_WAIT;
1181 break;
1182 case CLS_CACHED:
1183 /* yank lock from the cache. */
1184 result = cl_use_try(env, lock, 0);
1185 break;
1186 case CLS_ENQUEUED:
1187 case CLS_HELD:
1188 result = 0;
1189 break;
1190 default:
1191 case CLS_FREEING:
1192 /*
1193 * impossible, only held locks with increased
1194 * ->cll_holds can be enqueued, and they cannot be
1195 * freed.
1196 */
1197 LBUG();
1198 }
1199 } while (result == CLO_REPEAT);
1200 return result;
1201 }
1202 EXPORT_SYMBOL(cl_enqueue_try);
1203
1204 /**
1205 * Cancel the conflicting lock found during previous enqueue.
1206 *
1207 * \retval 0 conflicting lock has been canceled.
1208 * \retval -ve error code.
1209 */
cl_lock_enqueue_wait(const struct lu_env * env,struct cl_lock * lock,int keep_mutex)1210 int cl_lock_enqueue_wait(const struct lu_env *env,
1211 struct cl_lock *lock,
1212 int keep_mutex)
1213 {
1214 struct cl_lock *conflict;
1215 int rc = 0;
1216
1217 LASSERT(cl_lock_is_mutexed(lock));
1218 LASSERT(lock->cll_state == CLS_QUEUING);
1219 LASSERT(lock->cll_conflict != NULL);
1220
1221 conflict = lock->cll_conflict;
1222 lock->cll_conflict = NULL;
1223
1224 cl_lock_mutex_put(env, lock);
1225 LASSERT(cl_lock_nr_mutexed(env) == 0);
1226
1227 cl_lock_mutex_get(env, conflict);
1228 cl_lock_trace(D_DLMTRACE, env, "enqueue wait", conflict);
1229 cl_lock_cancel(env, conflict);
1230 cl_lock_delete(env, conflict);
1231
1232 while (conflict->cll_state != CLS_FREEING) {
1233 rc = cl_lock_state_wait(env, conflict);
1234 if (rc != 0)
1235 break;
1236 }
1237 cl_lock_mutex_put(env, conflict);
1238 lu_ref_del(&conflict->cll_reference, "cancel-wait", lock);
1239 cl_lock_put(env, conflict);
1240
1241 if (keep_mutex)
1242 cl_lock_mutex_get(env, lock);
1243
1244 LASSERT(rc <= 0);
1245 return rc;
1246 }
1247 EXPORT_SYMBOL(cl_lock_enqueue_wait);
1248
cl_enqueue_locked(const struct lu_env * env,struct cl_lock * lock,struct cl_io * io,__u32 enqflags)1249 static int cl_enqueue_locked(const struct lu_env *env, struct cl_lock *lock,
1250 struct cl_io *io, __u32 enqflags)
1251 {
1252 int result;
1253
1254 LINVRNT(cl_lock_is_mutexed(lock));
1255 LINVRNT(cl_lock_invariant(env, lock));
1256 LASSERT(lock->cll_holds > 0);
1257
1258 cl_lock_user_add(env, lock);
1259 do {
1260 result = cl_enqueue_try(env, lock, io, enqflags);
1261 if (result == CLO_WAIT) {
1262 if (lock->cll_conflict != NULL)
1263 result = cl_lock_enqueue_wait(env, lock, 1);
1264 else
1265 result = cl_lock_state_wait(env, lock);
1266 if (result == 0)
1267 continue;
1268 }
1269 break;
1270 } while (1);
1271 if (result != 0)
1272 cl_unuse_try(env, lock);
1273 LASSERT(ergo(result == 0 && !(enqflags & CEF_AGL),
1274 lock->cll_state == CLS_ENQUEUED ||
1275 lock->cll_state == CLS_HELD));
1276 return result;
1277 }
1278
1279 /**
1280 * Tries to unlock a lock.
1281 *
1282 * This function is called to release underlying resource:
1283 * 1. for top lock, the resource is sublocks it held;
1284 * 2. for sublock, the resource is the reference to dlmlock.
1285 *
1286 * cl_unuse_try is a one-shot operation, so it must NOT return CLO_WAIT.
1287 *
1288 * \see cl_unuse() cl_lock_operations::clo_unuse()
1289 * \see cl_lock_state::CLS_CACHED
1290 */
cl_unuse_try(const struct lu_env * env,struct cl_lock * lock)1291 int cl_unuse_try(const struct lu_env *env, struct cl_lock *lock)
1292 {
1293 int result;
1294 enum cl_lock_state state = CLS_NEW;
1295
1296 cl_lock_trace(D_DLMTRACE, env, "unuse lock", lock);
1297
1298 if (lock->cll_users > 1) {
1299 cl_lock_user_del(env, lock);
1300 return 0;
1301 }
1302
1303 /* Only if the lock is in CLS_HELD or CLS_ENQUEUED state, it can hold
1304 * underlying resources. */
1305 if (!(lock->cll_state == CLS_HELD || lock->cll_state == CLS_ENQUEUED)) {
1306 cl_lock_user_del(env, lock);
1307 return 0;
1308 }
1309
1310 /*
1311 * New lock users (->cll_users) are not protecting unlocking
1312 * from proceeding. From this point, lock eventually reaches
1313 * CLS_CACHED, is reinitialized to CLS_NEW or fails into
1314 * CLS_FREEING.
1315 */
1316 state = cl_lock_intransit(env, lock);
1317
1318 result = cl_unuse_try_internal(env, lock);
1319 LASSERT(lock->cll_state == CLS_INTRANSIT);
1320 LASSERT(result != CLO_WAIT);
1321 cl_lock_user_del(env, lock);
1322 if (result == 0 || result == -ESTALE) {
1323 /*
1324 * Return lock back to the cache. This is the only
1325 * place where lock is moved into CLS_CACHED state.
1326 *
1327 * If one of ->clo_unuse() methods returned -ESTALE, lock
1328 * cannot be placed into cache and has to be
1329 * re-initialized. This happens e.g., when a sub-lock was
1330 * canceled while unlocking was in progress.
1331 */
1332 if (state == CLS_HELD && result == 0)
1333 state = CLS_CACHED;
1334 else
1335 state = CLS_NEW;
1336 cl_lock_extransit(env, lock, state);
1337
1338 /*
1339 * Hide -ESTALE error.
1340 * If the lock is a glimpse lock, and it has multiple
1341 * stripes. Assuming that one of its sublock returned -ENAVAIL,
1342 * and other sublocks are matched write locks. In this case,
1343 * we can't set this lock to error because otherwise some of
1344 * its sublocks may not be canceled. This causes some dirty
1345 * pages won't be written to OSTs. -jay
1346 */
1347 result = 0;
1348 } else {
1349 CERROR("result = %d, this is unlikely!\n", result);
1350 state = CLS_NEW;
1351 cl_lock_extransit(env, lock, state);
1352 }
1353 return result ?: lock->cll_error;
1354 }
1355 EXPORT_SYMBOL(cl_unuse_try);
1356
cl_unuse_locked(const struct lu_env * env,struct cl_lock * lock)1357 static void cl_unuse_locked(const struct lu_env *env, struct cl_lock *lock)
1358 {
1359 int result;
1360
1361 result = cl_unuse_try(env, lock);
1362 if (result)
1363 CL_LOCK_DEBUG(D_ERROR, env, lock, "unuse return %d\n", result);
1364 }
1365
1366 /**
1367 * Unlocks a lock.
1368 */
cl_unuse(const struct lu_env * env,struct cl_lock * lock)1369 void cl_unuse(const struct lu_env *env, struct cl_lock *lock)
1370 {
1371 cl_lock_mutex_get(env, lock);
1372 cl_unuse_locked(env, lock);
1373 cl_lock_mutex_put(env, lock);
1374 cl_lock_lockdep_release(env, lock);
1375 }
1376 EXPORT_SYMBOL(cl_unuse);
1377
1378 /**
1379 * Tries to wait for a lock.
1380 *
1381 * This function is called repeatedly by cl_wait() until either lock is
1382 * granted, or error occurs. This function does not block waiting for network
1383 * communication to complete.
1384 *
1385 * \see cl_wait() cl_lock_operations::clo_wait()
1386 * \see cl_lock_state::CLS_HELD
1387 */
cl_wait_try(const struct lu_env * env,struct cl_lock * lock)1388 int cl_wait_try(const struct lu_env *env, struct cl_lock *lock)
1389 {
1390 const struct cl_lock_slice *slice;
1391 int result;
1392
1393 cl_lock_trace(D_DLMTRACE, env, "wait lock try", lock);
1394 do {
1395 LINVRNT(cl_lock_is_mutexed(lock));
1396 LINVRNT(cl_lock_invariant(env, lock));
1397 LASSERTF(lock->cll_state == CLS_QUEUING ||
1398 lock->cll_state == CLS_ENQUEUED ||
1399 lock->cll_state == CLS_HELD ||
1400 lock->cll_state == CLS_INTRANSIT,
1401 "lock state: %d\n", lock->cll_state);
1402 LASSERT(lock->cll_users > 0);
1403 LASSERT(lock->cll_holds > 0);
1404
1405 result = lock->cll_error;
1406 if (result != 0)
1407 break;
1408
1409 if (cl_lock_is_intransit(lock)) {
1410 result = CLO_WAIT;
1411 break;
1412 }
1413
1414 if (lock->cll_state == CLS_HELD)
1415 /* nothing to do */
1416 break;
1417
1418 result = -ENOSYS;
1419 list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1420 if (slice->cls_ops->clo_wait != NULL) {
1421 result = slice->cls_ops->clo_wait(env, slice);
1422 if (result != 0)
1423 break;
1424 }
1425 }
1426 LASSERT(result != -ENOSYS);
1427 if (result == 0) {
1428 LASSERT(lock->cll_state != CLS_INTRANSIT);
1429 cl_lock_state_set(env, lock, CLS_HELD);
1430 }
1431 } while (result == CLO_REPEAT);
1432 return result;
1433 }
1434 EXPORT_SYMBOL(cl_wait_try);
1435
1436 /**
1437 * Waits until enqueued lock is granted.
1438 *
1439 * \pre current thread or io owns a hold on the lock
1440 * \pre ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1441 * lock->cll_state == CLS_HELD)
1442 *
1443 * \post ergo(result == 0, lock->cll_state == CLS_HELD)
1444 */
cl_wait(const struct lu_env * env,struct cl_lock * lock)1445 int cl_wait(const struct lu_env *env, struct cl_lock *lock)
1446 {
1447 int result;
1448
1449 cl_lock_mutex_get(env, lock);
1450
1451 LINVRNT(cl_lock_invariant(env, lock));
1452 LASSERTF(lock->cll_state == CLS_ENQUEUED || lock->cll_state == CLS_HELD,
1453 "Wrong state %d \n", lock->cll_state);
1454 LASSERT(lock->cll_holds > 0);
1455
1456 do {
1457 result = cl_wait_try(env, lock);
1458 if (result == CLO_WAIT) {
1459 result = cl_lock_state_wait(env, lock);
1460 if (result == 0)
1461 continue;
1462 }
1463 break;
1464 } while (1);
1465 if (result < 0) {
1466 cl_unuse_try(env, lock);
1467 cl_lock_lockdep_release(env, lock);
1468 }
1469 cl_lock_trace(D_DLMTRACE, env, "wait lock", lock);
1470 cl_lock_mutex_put(env, lock);
1471 LASSERT(ergo(result == 0, lock->cll_state == CLS_HELD));
1472 return result;
1473 }
1474 EXPORT_SYMBOL(cl_wait);
1475
1476 /**
1477 * Executes cl_lock_operations::clo_weigh(), and sums results to estimate lock
1478 * value.
1479 */
cl_lock_weigh(const struct lu_env * env,struct cl_lock * lock)1480 unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock)
1481 {
1482 const struct cl_lock_slice *slice;
1483 unsigned long pound;
1484 unsigned long ounce;
1485
1486 LINVRNT(cl_lock_is_mutexed(lock));
1487 LINVRNT(cl_lock_invariant(env, lock));
1488
1489 pound = 0;
1490 list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
1491 if (slice->cls_ops->clo_weigh != NULL) {
1492 ounce = slice->cls_ops->clo_weigh(env, slice);
1493 pound += ounce;
1494 if (pound < ounce) /* over-weight^Wflow */
1495 pound = ~0UL;
1496 }
1497 }
1498 return pound;
1499 }
1500 EXPORT_SYMBOL(cl_lock_weigh);
1501
1502 /**
1503 * Notifies layers that lock description changed.
1504 *
1505 * The server can grant client a lock different from one that was requested
1506 * (e.g., larger in extent). This method is called when actually granted lock
1507 * description becomes known to let layers to accommodate for changed lock
1508 * description.
1509 *
1510 * \see cl_lock_operations::clo_modify()
1511 */
cl_lock_modify(const struct lu_env * env,struct cl_lock * lock,const struct cl_lock_descr * desc)1512 int cl_lock_modify(const struct lu_env *env, struct cl_lock *lock,
1513 const struct cl_lock_descr *desc)
1514 {
1515 const struct cl_lock_slice *slice;
1516 struct cl_object *obj = lock->cll_descr.cld_obj;
1517 struct cl_object_header *hdr = cl_object_header(obj);
1518 int result;
1519
1520 cl_lock_trace(D_DLMTRACE, env, "modify lock", lock);
1521 /* don't allow object to change */
1522 LASSERT(obj == desc->cld_obj);
1523 LINVRNT(cl_lock_is_mutexed(lock));
1524 LINVRNT(cl_lock_invariant(env, lock));
1525
1526 list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
1527 if (slice->cls_ops->clo_modify != NULL) {
1528 result = slice->cls_ops->clo_modify(env, slice, desc);
1529 if (result != 0)
1530 return result;
1531 }
1532 }
1533 CL_LOCK_DEBUG(D_DLMTRACE, env, lock, " -> "DDESCR"@"DFID"\n",
1534 PDESCR(desc), PFID(lu_object_fid(&desc->cld_obj->co_lu)));
1535 /*
1536 * Just replace description in place. Nothing more is needed for
1537 * now. If locks were indexed according to their extent and/or mode,
1538 * that index would have to be updated here.
1539 */
1540 spin_lock(&hdr->coh_lock_guard);
1541 lock->cll_descr = *desc;
1542 spin_unlock(&hdr->coh_lock_guard);
1543 return 0;
1544 }
1545 EXPORT_SYMBOL(cl_lock_modify);
1546
1547 /**
1548 * Initializes lock closure with a given origin.
1549 *
1550 * \see cl_lock_closure
1551 */
cl_lock_closure_init(const struct lu_env * env,struct cl_lock_closure * closure,struct cl_lock * origin,int wait)1552 void cl_lock_closure_init(const struct lu_env *env,
1553 struct cl_lock_closure *closure,
1554 struct cl_lock *origin, int wait)
1555 {
1556 LINVRNT(cl_lock_is_mutexed(origin));
1557 LINVRNT(cl_lock_invariant(env, origin));
1558
1559 INIT_LIST_HEAD(&closure->clc_list);
1560 closure->clc_origin = origin;
1561 closure->clc_wait = wait;
1562 closure->clc_nr = 0;
1563 }
1564 EXPORT_SYMBOL(cl_lock_closure_init);
1565
1566 /**
1567 * Builds a closure of \a lock.
1568 *
1569 * Building of a closure consists of adding initial lock (\a lock) into it,
1570 * and calling cl_lock_operations::clo_closure() methods of \a lock. These
1571 * methods might call cl_lock_closure_build() recursively again, adding more
1572 * locks to the closure, etc.
1573 *
1574 * \see cl_lock_closure
1575 */
cl_lock_closure_build(const struct lu_env * env,struct cl_lock * lock,struct cl_lock_closure * closure)1576 int cl_lock_closure_build(const struct lu_env *env, struct cl_lock *lock,
1577 struct cl_lock_closure *closure)
1578 {
1579 const struct cl_lock_slice *slice;
1580 int result;
1581
1582 LINVRNT(cl_lock_is_mutexed(closure->clc_origin));
1583 LINVRNT(cl_lock_invariant(env, closure->clc_origin));
1584
1585 result = cl_lock_enclosure(env, lock, closure);
1586 if (result == 0) {
1587 list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1588 if (slice->cls_ops->clo_closure != NULL) {
1589 result = slice->cls_ops->clo_closure(env, slice,
1590 closure);
1591 if (result != 0)
1592 break;
1593 }
1594 }
1595 }
1596 if (result != 0)
1597 cl_lock_disclosure(env, closure);
1598 return result;
1599 }
1600 EXPORT_SYMBOL(cl_lock_closure_build);
1601
1602 /**
1603 * Adds new lock to a closure.
1604 *
1605 * Try-locks \a lock and if succeeded, adds it to the closure (never more than
1606 * once). If try-lock failed, returns CLO_REPEAT, after optionally waiting
1607 * until next try-lock is likely to succeed.
1608 */
cl_lock_enclosure(const struct lu_env * env,struct cl_lock * lock,struct cl_lock_closure * closure)1609 int cl_lock_enclosure(const struct lu_env *env, struct cl_lock *lock,
1610 struct cl_lock_closure *closure)
1611 {
1612 int result = 0;
1613
1614 cl_lock_trace(D_DLMTRACE, env, "enclosure lock", lock);
1615 if (!cl_lock_mutex_try(env, lock)) {
1616 /*
1617 * If lock->cll_inclosure is not empty, lock is already in
1618 * this closure.
1619 */
1620 if (list_empty(&lock->cll_inclosure)) {
1621 cl_lock_get_trust(lock);
1622 lu_ref_add(&lock->cll_reference, "closure", closure);
1623 list_add(&lock->cll_inclosure, &closure->clc_list);
1624 closure->clc_nr++;
1625 } else
1626 cl_lock_mutex_put(env, lock);
1627 result = 0;
1628 } else {
1629 cl_lock_disclosure(env, closure);
1630 if (closure->clc_wait) {
1631 cl_lock_get_trust(lock);
1632 lu_ref_add(&lock->cll_reference, "closure-w", closure);
1633 cl_lock_mutex_put(env, closure->clc_origin);
1634
1635 LASSERT(cl_lock_nr_mutexed(env) == 0);
1636 cl_lock_mutex_get(env, lock);
1637 cl_lock_mutex_put(env, lock);
1638
1639 cl_lock_mutex_get(env, closure->clc_origin);
1640 lu_ref_del(&lock->cll_reference, "closure-w", closure);
1641 cl_lock_put(env, lock);
1642 }
1643 result = CLO_REPEAT;
1644 }
1645 return result;
1646 }
1647 EXPORT_SYMBOL(cl_lock_enclosure);
1648
1649 /** Releases mutices of enclosed locks. */
cl_lock_disclosure(const struct lu_env * env,struct cl_lock_closure * closure)1650 void cl_lock_disclosure(const struct lu_env *env,
1651 struct cl_lock_closure *closure)
1652 {
1653 struct cl_lock *scan;
1654 struct cl_lock *temp;
1655
1656 cl_lock_trace(D_DLMTRACE, env, "disclosure lock", closure->clc_origin);
1657 list_for_each_entry_safe(scan, temp, &closure->clc_list,
1658 cll_inclosure){
1659 list_del_init(&scan->cll_inclosure);
1660 cl_lock_mutex_put(env, scan);
1661 lu_ref_del(&scan->cll_reference, "closure", closure);
1662 cl_lock_put(env, scan);
1663 closure->clc_nr--;
1664 }
1665 LASSERT(closure->clc_nr == 0);
1666 }
1667 EXPORT_SYMBOL(cl_lock_disclosure);
1668
1669 /** Finalizes a closure. */
cl_lock_closure_fini(struct cl_lock_closure * closure)1670 void cl_lock_closure_fini(struct cl_lock_closure *closure)
1671 {
1672 LASSERT(closure->clc_nr == 0);
1673 LASSERT(list_empty(&closure->clc_list));
1674 }
1675 EXPORT_SYMBOL(cl_lock_closure_fini);
1676
1677 /**
1678 * Destroys this lock. Notifies layers (bottom-to-top) that lock is being
1679 * destroyed, then destroy the lock. If there are holds on the lock, postpone
1680 * destruction until all holds are released. This is called when a decision is
1681 * made to destroy the lock in the future. E.g., when a blocking AST is
1682 * received on it, or fatal communication error happens.
1683 *
1684 * Caller must have a reference on this lock to prevent a situation, when
1685 * deleted lock lingers in memory for indefinite time, because nobody calls
1686 * cl_lock_put() to finish it.
1687 *
1688 * \pre atomic_read(&lock->cll_ref) > 0
1689 * \pre ergo(cl_lock_nesting(lock) == CNL_TOP,
1690 * cl_lock_nr_mutexed(env) == 1)
1691 * [i.e., if a top-lock is deleted, mutices of no other locks can be
1692 * held, as deletion of sub-locks might require releasing a top-lock
1693 * mutex]
1694 *
1695 * \see cl_lock_operations::clo_delete()
1696 * \see cl_lock::cll_holds
1697 */
cl_lock_delete(const struct lu_env * env,struct cl_lock * lock)1698 void cl_lock_delete(const struct lu_env *env, struct cl_lock *lock)
1699 {
1700 LINVRNT(cl_lock_is_mutexed(lock));
1701 LINVRNT(cl_lock_invariant(env, lock));
1702 LASSERT(ergo(cl_lock_nesting(lock) == CNL_TOP,
1703 cl_lock_nr_mutexed(env) == 1));
1704
1705 cl_lock_trace(D_DLMTRACE, env, "delete lock", lock);
1706 if (lock->cll_holds == 0)
1707 cl_lock_delete0(env, lock);
1708 else
1709 lock->cll_flags |= CLF_DOOMED;
1710 }
1711 EXPORT_SYMBOL(cl_lock_delete);
1712
1713 /**
1714 * Mark lock as irrecoverably failed, and mark it for destruction. This
1715 * happens when, e.g., server fails to grant a lock to us, or networking
1716 * time-out happens.
1717 *
1718 * \pre atomic_read(&lock->cll_ref) > 0
1719 *
1720 * \see clo_lock_delete()
1721 * \see cl_lock::cll_holds
1722 */
cl_lock_error(const struct lu_env * env,struct cl_lock * lock,int error)1723 void cl_lock_error(const struct lu_env *env, struct cl_lock *lock, int error)
1724 {
1725 LINVRNT(cl_lock_is_mutexed(lock));
1726 LINVRNT(cl_lock_invariant(env, lock));
1727
1728 if (lock->cll_error == 0 && error != 0) {
1729 cl_lock_trace(D_DLMTRACE, env, "set lock error", lock);
1730 lock->cll_error = error;
1731 cl_lock_signal(env, lock);
1732 cl_lock_cancel(env, lock);
1733 cl_lock_delete(env, lock);
1734 }
1735 }
1736 EXPORT_SYMBOL(cl_lock_error);
1737
1738 /**
1739 * Cancels this lock. Notifies layers
1740 * (bottom-to-top) that lock is being cancelled, then destroy the lock. If
1741 * there are holds on the lock, postpone cancellation until
1742 * all holds are released.
1743 *
1744 * Cancellation notification is delivered to layers at most once.
1745 *
1746 * \see cl_lock_operations::clo_cancel()
1747 * \see cl_lock::cll_holds
1748 */
cl_lock_cancel(const struct lu_env * env,struct cl_lock * lock)1749 void cl_lock_cancel(const struct lu_env *env, struct cl_lock *lock)
1750 {
1751 LINVRNT(cl_lock_is_mutexed(lock));
1752 LINVRNT(cl_lock_invariant(env, lock));
1753
1754 cl_lock_trace(D_DLMTRACE, env, "cancel lock", lock);
1755 if (lock->cll_holds == 0)
1756 cl_lock_cancel0(env, lock);
1757 else
1758 lock->cll_flags |= CLF_CANCELPEND;
1759 }
1760 EXPORT_SYMBOL(cl_lock_cancel);
1761
1762 /**
1763 * Finds an existing lock covering given index and optionally different from a
1764 * given \a except lock.
1765 */
cl_lock_at_pgoff(const struct lu_env * env,struct cl_object * obj,pgoff_t index,struct cl_lock * except,int pending,int canceld)1766 struct cl_lock *cl_lock_at_pgoff(const struct lu_env *env,
1767 struct cl_object *obj, pgoff_t index,
1768 struct cl_lock *except,
1769 int pending, int canceld)
1770 {
1771 struct cl_object_header *head;
1772 struct cl_lock *scan;
1773 struct cl_lock *lock;
1774 struct cl_lock_descr *need;
1775
1776 head = cl_object_header(obj);
1777 need = &cl_env_info(env)->clt_descr;
1778 lock = NULL;
1779
1780 need->cld_mode = CLM_READ; /* CLM_READ matches both READ & WRITE, but
1781 * not PHANTOM */
1782 need->cld_start = need->cld_end = index;
1783 need->cld_enq_flags = 0;
1784
1785 spin_lock(&head->coh_lock_guard);
1786 /* It is fine to match any group lock since there could be only one
1787 * with a uniq gid and it conflicts with all other lock modes too */
1788 list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
1789 if (scan != except &&
1790 (scan->cll_descr.cld_mode == CLM_GROUP ||
1791 cl_lock_ext_match(&scan->cll_descr, need)) &&
1792 scan->cll_state >= CLS_HELD &&
1793 scan->cll_state < CLS_FREEING &&
1794 /*
1795 * This check is racy as the lock can be canceled right
1796 * after it is done, but this is fine, because page exists
1797 * already.
1798 */
1799 (canceld || !(scan->cll_flags & CLF_CANCELLED)) &&
1800 (pending || !(scan->cll_flags & CLF_CANCELPEND))) {
1801 /* Don't increase cs_hit here since this
1802 * is just a helper function. */
1803 cl_lock_get_trust(scan);
1804 lock = scan;
1805 break;
1806 }
1807 }
1808 spin_unlock(&head->coh_lock_guard);
1809 return lock;
1810 }
1811 EXPORT_SYMBOL(cl_lock_at_pgoff);
1812
1813 /**
1814 * Calculate the page offset at the layer of @lock.
1815 * At the time of this writing, @page is top page and @lock is sub lock.
1816 */
pgoff_at_lock(struct cl_page * page,struct cl_lock * lock)1817 static pgoff_t pgoff_at_lock(struct cl_page *page, struct cl_lock *lock)
1818 {
1819 struct lu_device_type *dtype;
1820 const struct cl_page_slice *slice;
1821
1822 dtype = lock->cll_descr.cld_obj->co_lu.lo_dev->ld_type;
1823 slice = cl_page_at(page, dtype);
1824 LASSERT(slice != NULL);
1825 return slice->cpl_page->cp_index;
1826 }
1827
1828 /**
1829 * Check if page @page is covered by an extra lock or discard it.
1830 */
check_and_discard_cb(const struct lu_env * env,struct cl_io * io,struct cl_page * page,void * cbdata)1831 static int check_and_discard_cb(const struct lu_env *env, struct cl_io *io,
1832 struct cl_page *page, void *cbdata)
1833 {
1834 struct cl_thread_info *info = cl_env_info(env);
1835 struct cl_lock *lock = cbdata;
1836 pgoff_t index = pgoff_at_lock(page, lock);
1837
1838 if (index >= info->clt_fn_index) {
1839 struct cl_lock *tmp;
1840
1841 /* refresh non-overlapped index */
1842 tmp = cl_lock_at_pgoff(env, lock->cll_descr.cld_obj, index,
1843 lock, 1, 0);
1844 if (tmp != NULL) {
1845 /* Cache the first-non-overlapped index so as to skip
1846 * all pages within [index, clt_fn_index). This
1847 * is safe because if tmp lock is canceled, it will
1848 * discard these pages. */
1849 info->clt_fn_index = tmp->cll_descr.cld_end + 1;
1850 if (tmp->cll_descr.cld_end == CL_PAGE_EOF)
1851 info->clt_fn_index = CL_PAGE_EOF;
1852 cl_lock_put(env, tmp);
1853 } else if (cl_page_own(env, io, page) == 0) {
1854 /* discard the page */
1855 cl_page_unmap(env, io, page);
1856 cl_page_discard(env, io, page);
1857 cl_page_disown(env, io, page);
1858 } else {
1859 LASSERT(page->cp_state == CPS_FREEING);
1860 }
1861 }
1862
1863 info->clt_next_index = index + 1;
1864 return CLP_GANG_OKAY;
1865 }
1866
discard_cb(const struct lu_env * env,struct cl_io * io,struct cl_page * page,void * cbdata)1867 static int discard_cb(const struct lu_env *env, struct cl_io *io,
1868 struct cl_page *page, void *cbdata)
1869 {
1870 struct cl_thread_info *info = cl_env_info(env);
1871 struct cl_lock *lock = cbdata;
1872
1873 LASSERT(lock->cll_descr.cld_mode >= CLM_WRITE);
1874 KLASSERT(ergo(page->cp_type == CPT_CACHEABLE,
1875 !PageWriteback(cl_page_vmpage(env, page))));
1876 KLASSERT(ergo(page->cp_type == CPT_CACHEABLE,
1877 !PageDirty(cl_page_vmpage(env, page))));
1878
1879 info->clt_next_index = pgoff_at_lock(page, lock) + 1;
1880 if (cl_page_own(env, io, page) == 0) {
1881 /* discard the page */
1882 cl_page_unmap(env, io, page);
1883 cl_page_discard(env, io, page);
1884 cl_page_disown(env, io, page);
1885 } else {
1886 LASSERT(page->cp_state == CPS_FREEING);
1887 }
1888
1889 return CLP_GANG_OKAY;
1890 }
1891
1892 /**
1893 * Discard pages protected by the given lock. This function traverses radix
1894 * tree to find all covering pages and discard them. If a page is being covered
1895 * by other locks, it should remain in cache.
1896 *
1897 * If error happens on any step, the process continues anyway (the reasoning
1898 * behind this being that lock cancellation cannot be delayed indefinitely).
1899 */
cl_lock_discard_pages(const struct lu_env * env,struct cl_lock * lock)1900 int cl_lock_discard_pages(const struct lu_env *env, struct cl_lock *lock)
1901 {
1902 struct cl_thread_info *info = cl_env_info(env);
1903 struct cl_io *io = &info->clt_io;
1904 struct cl_lock_descr *descr = &lock->cll_descr;
1905 cl_page_gang_cb_t cb;
1906 int res;
1907 int result;
1908
1909 LINVRNT(cl_lock_invariant(env, lock));
1910
1911 io->ci_obj = cl_object_top(descr->cld_obj);
1912 io->ci_ignore_layout = 1;
1913 result = cl_io_init(env, io, CIT_MISC, io->ci_obj);
1914 if (result != 0)
1915 goto out;
1916
1917 cb = descr->cld_mode == CLM_READ ? check_and_discard_cb : discard_cb;
1918 info->clt_fn_index = info->clt_next_index = descr->cld_start;
1919 do {
1920 res = cl_page_gang_lookup(env, descr->cld_obj, io,
1921 info->clt_next_index, descr->cld_end,
1922 cb, (void *)lock);
1923 if (info->clt_next_index > descr->cld_end)
1924 break;
1925
1926 if (res == CLP_GANG_RESCHED)
1927 cond_resched();
1928 } while (res != CLP_GANG_OKAY);
1929 out:
1930 cl_io_fini(env, io);
1931 return result;
1932 }
1933 EXPORT_SYMBOL(cl_lock_discard_pages);
1934
1935 /**
1936 * Eliminate all locks for a given object.
1937 *
1938 * Caller has to guarantee that no lock is in active use.
1939 *
1940 * \param cancel when this is set, cl_locks_prune() cancels locks before
1941 * destroying.
1942 */
cl_locks_prune(const struct lu_env * env,struct cl_object * obj,int cancel)1943 void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int cancel)
1944 {
1945 struct cl_object_header *head;
1946 struct cl_lock *lock;
1947
1948 head = cl_object_header(obj);
1949 /*
1950 * If locks are destroyed without cancellation, all pages must be
1951 * already destroyed (as otherwise they will be left unprotected).
1952 */
1953 LASSERT(ergo(!cancel,
1954 head->coh_tree.rnode == NULL && head->coh_pages == 0));
1955
1956 spin_lock(&head->coh_lock_guard);
1957 while (!list_empty(&head->coh_locks)) {
1958 lock = container_of(head->coh_locks.next,
1959 struct cl_lock, cll_linkage);
1960 cl_lock_get_trust(lock);
1961 spin_unlock(&head->coh_lock_guard);
1962 lu_ref_add(&lock->cll_reference, "prune", current);
1963
1964 again:
1965 cl_lock_mutex_get(env, lock);
1966 if (lock->cll_state < CLS_FREEING) {
1967 LASSERT(lock->cll_users <= 1);
1968 if (unlikely(lock->cll_users == 1)) {
1969 struct l_wait_info lwi = { 0 };
1970
1971 cl_lock_mutex_put(env, lock);
1972 l_wait_event(lock->cll_wq,
1973 lock->cll_users == 0,
1974 &lwi);
1975 goto again;
1976 }
1977
1978 if (cancel)
1979 cl_lock_cancel(env, lock);
1980 cl_lock_delete(env, lock);
1981 }
1982 cl_lock_mutex_put(env, lock);
1983 lu_ref_del(&lock->cll_reference, "prune", current);
1984 cl_lock_put(env, lock);
1985 spin_lock(&head->coh_lock_guard);
1986 }
1987 spin_unlock(&head->coh_lock_guard);
1988 }
1989 EXPORT_SYMBOL(cl_locks_prune);
1990
cl_lock_hold_mutex(const struct lu_env * env,const struct cl_io * io,const struct cl_lock_descr * need,const char * scope,const void * source)1991 static struct cl_lock *cl_lock_hold_mutex(const struct lu_env *env,
1992 const struct cl_io *io,
1993 const struct cl_lock_descr *need,
1994 const char *scope, const void *source)
1995 {
1996 struct cl_lock *lock;
1997
1998 while (1) {
1999 lock = cl_lock_find(env, io, need);
2000 if (IS_ERR(lock))
2001 break;
2002 cl_lock_mutex_get(env, lock);
2003 if (lock->cll_state < CLS_FREEING &&
2004 !(lock->cll_flags & CLF_CANCELLED)) {
2005 cl_lock_hold_mod(env, lock, 1);
2006 lu_ref_add(&lock->cll_holders, scope, source);
2007 lu_ref_add(&lock->cll_reference, scope, source);
2008 break;
2009 }
2010 cl_lock_mutex_put(env, lock);
2011 cl_lock_put(env, lock);
2012 }
2013 return lock;
2014 }
2015
2016 /**
2017 * Returns a lock matching \a need description with a reference and a hold on
2018 * it.
2019 *
2020 * This is much like cl_lock_find(), except that cl_lock_hold() additionally
2021 * guarantees that lock is not in the CLS_FREEING state on return.
2022 */
cl_lock_hold(const struct lu_env * env,const struct cl_io * io,const struct cl_lock_descr * need,const char * scope,const void * source)2023 struct cl_lock *cl_lock_hold(const struct lu_env *env, const struct cl_io *io,
2024 const struct cl_lock_descr *need,
2025 const char *scope, const void *source)
2026 {
2027 struct cl_lock *lock;
2028
2029 lock = cl_lock_hold_mutex(env, io, need, scope, source);
2030 if (!IS_ERR(lock))
2031 cl_lock_mutex_put(env, lock);
2032 return lock;
2033 }
2034 EXPORT_SYMBOL(cl_lock_hold);
2035
2036 /**
2037 * Main high-level entry point of cl_lock interface that finds existing or
2038 * enqueues new lock matching given description.
2039 */
cl_lock_request(const struct lu_env * env,struct cl_io * io,const struct cl_lock_descr * need,const char * scope,const void * source)2040 struct cl_lock *cl_lock_request(const struct lu_env *env, struct cl_io *io,
2041 const struct cl_lock_descr *need,
2042 const char *scope, const void *source)
2043 {
2044 struct cl_lock *lock;
2045 int rc;
2046 __u32 enqflags = need->cld_enq_flags;
2047
2048 do {
2049 lock = cl_lock_hold_mutex(env, io, need, scope, source);
2050 if (IS_ERR(lock))
2051 break;
2052
2053 rc = cl_enqueue_locked(env, lock, io, enqflags);
2054 if (rc == 0) {
2055 if (cl_lock_fits_into(env, lock, need, io)) {
2056 if (!(enqflags & CEF_AGL)) {
2057 cl_lock_mutex_put(env, lock);
2058 cl_lock_lockdep_acquire(env, lock,
2059 enqflags);
2060 break;
2061 }
2062 rc = 1;
2063 }
2064 cl_unuse_locked(env, lock);
2065 }
2066 cl_lock_trace(D_DLMTRACE, env,
2067 rc <= 0 ? "enqueue failed" : "agl succeed", lock);
2068 cl_lock_hold_release(env, lock, scope, source);
2069 cl_lock_mutex_put(env, lock);
2070 lu_ref_del(&lock->cll_reference, scope, source);
2071 cl_lock_put(env, lock);
2072 if (rc > 0) {
2073 LASSERT(enqflags & CEF_AGL);
2074 lock = NULL;
2075 } else if (rc != 0) {
2076 lock = ERR_PTR(rc);
2077 }
2078 } while (rc == 0);
2079 return lock;
2080 }
2081 EXPORT_SYMBOL(cl_lock_request);
2082
2083 /**
2084 * Adds a hold to a known lock.
2085 */
cl_lock_hold_add(const struct lu_env * env,struct cl_lock * lock,const char * scope,const void * source)2086 void cl_lock_hold_add(const struct lu_env *env, struct cl_lock *lock,
2087 const char *scope, const void *source)
2088 {
2089 LINVRNT(cl_lock_is_mutexed(lock));
2090 LINVRNT(cl_lock_invariant(env, lock));
2091 LASSERT(lock->cll_state != CLS_FREEING);
2092
2093 cl_lock_hold_mod(env, lock, 1);
2094 cl_lock_get(lock);
2095 lu_ref_add(&lock->cll_holders, scope, source);
2096 lu_ref_add(&lock->cll_reference, scope, source);
2097 }
2098 EXPORT_SYMBOL(cl_lock_hold_add);
2099
2100 /**
2101 * Releases a hold and a reference on a lock, on which caller acquired a
2102 * mutex.
2103 */
cl_lock_unhold(const struct lu_env * env,struct cl_lock * lock,const char * scope,const void * source)2104 void cl_lock_unhold(const struct lu_env *env, struct cl_lock *lock,
2105 const char *scope, const void *source)
2106 {
2107 LINVRNT(cl_lock_invariant(env, lock));
2108 cl_lock_hold_release(env, lock, scope, source);
2109 lu_ref_del(&lock->cll_reference, scope, source);
2110 cl_lock_put(env, lock);
2111 }
2112 EXPORT_SYMBOL(cl_lock_unhold);
2113
2114 /**
2115 * Releases a hold and a reference on a lock, obtained by cl_lock_hold().
2116 */
cl_lock_release(const struct lu_env * env,struct cl_lock * lock,const char * scope,const void * source)2117 void cl_lock_release(const struct lu_env *env, struct cl_lock *lock,
2118 const char *scope, const void *source)
2119 {
2120 LINVRNT(cl_lock_invariant(env, lock));
2121 cl_lock_trace(D_DLMTRACE, env, "release lock", lock);
2122 cl_lock_mutex_get(env, lock);
2123 cl_lock_hold_release(env, lock, scope, source);
2124 cl_lock_mutex_put(env, lock);
2125 lu_ref_del(&lock->cll_reference, scope, source);
2126 cl_lock_put(env, lock);
2127 }
2128 EXPORT_SYMBOL(cl_lock_release);
2129
cl_lock_user_add(const struct lu_env * env,struct cl_lock * lock)2130 void cl_lock_user_add(const struct lu_env *env, struct cl_lock *lock)
2131 {
2132 LINVRNT(cl_lock_is_mutexed(lock));
2133 LINVRNT(cl_lock_invariant(env, lock));
2134
2135 cl_lock_used_mod(env, lock, 1);
2136 }
2137 EXPORT_SYMBOL(cl_lock_user_add);
2138
cl_lock_user_del(const struct lu_env * env,struct cl_lock * lock)2139 void cl_lock_user_del(const struct lu_env *env, struct cl_lock *lock)
2140 {
2141 LINVRNT(cl_lock_is_mutexed(lock));
2142 LINVRNT(cl_lock_invariant(env, lock));
2143 LASSERT(lock->cll_users > 0);
2144
2145 cl_lock_used_mod(env, lock, -1);
2146 if (lock->cll_users == 0)
2147 wake_up_all(&lock->cll_wq);
2148 }
2149 EXPORT_SYMBOL(cl_lock_user_del);
2150
cl_lock_mode_name(const enum cl_lock_mode mode)2151 const char *cl_lock_mode_name(const enum cl_lock_mode mode)
2152 {
2153 static const char *names[] = {
2154 [CLM_PHANTOM] = "P",
2155 [CLM_READ] = "R",
2156 [CLM_WRITE] = "W",
2157 [CLM_GROUP] = "G"
2158 };
2159 if (0 <= mode && mode < ARRAY_SIZE(names))
2160 return names[mode];
2161 else
2162 return "U";
2163 }
2164 EXPORT_SYMBOL(cl_lock_mode_name);
2165
2166 /**
2167 * Prints human readable representation of a lock description.
2168 */
cl_lock_descr_print(const struct lu_env * env,void * cookie,lu_printer_t printer,const struct cl_lock_descr * descr)2169 void cl_lock_descr_print(const struct lu_env *env, void *cookie,
2170 lu_printer_t printer,
2171 const struct cl_lock_descr *descr)
2172 {
2173 const struct lu_fid *fid;
2174
2175 fid = lu_object_fid(&descr->cld_obj->co_lu);
2176 (*printer)(env, cookie, DDESCR"@"DFID, PDESCR(descr), PFID(fid));
2177 }
2178 EXPORT_SYMBOL(cl_lock_descr_print);
2179
2180 /**
2181 * Prints human readable representation of \a lock to the \a f.
2182 */
cl_lock_print(const struct lu_env * env,void * cookie,lu_printer_t printer,const struct cl_lock * lock)2183 void cl_lock_print(const struct lu_env *env, void *cookie,
2184 lu_printer_t printer, const struct cl_lock *lock)
2185 {
2186 const struct cl_lock_slice *slice;
2187 (*printer)(env, cookie, "lock@%p[%d %d %d %d %d %08lx] ",
2188 lock, atomic_read(&lock->cll_ref),
2189 lock->cll_state, lock->cll_error, lock->cll_holds,
2190 lock->cll_users, lock->cll_flags);
2191 cl_lock_descr_print(env, cookie, printer, &lock->cll_descr);
2192 (*printer)(env, cookie, " {\n");
2193
2194 list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
2195 (*printer)(env, cookie, " %s@%p: ",
2196 slice->cls_obj->co_lu.lo_dev->ld_type->ldt_name,
2197 slice);
2198 if (slice->cls_ops->clo_print != NULL)
2199 slice->cls_ops->clo_print(env, cookie, printer, slice);
2200 (*printer)(env, cookie, "\n");
2201 }
2202 (*printer)(env, cookie, "} lock@%p\n", lock);
2203 }
2204 EXPORT_SYMBOL(cl_lock_print);
2205
cl_lock_init(void)2206 int cl_lock_init(void)
2207 {
2208 return lu_kmem_init(cl_lock_caches);
2209 }
2210
cl_lock_fini(void)2211 void cl_lock_fini(void)
2212 {
2213 lu_kmem_fini(cl_lock_caches);
2214 }
2215