1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* AFS file locking support
3 *
4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
8 #include "internal.h"
9
10 #define AFS_LOCK_GRANTED 0
11 #define AFS_LOCK_PENDING 1
12 #define AFS_LOCK_YOUR_TRY 2
13
14 struct workqueue_struct *afs_lock_manager;
15
16 static void afs_next_locker(struct afs_vnode *vnode, int error);
17 static void afs_fl_copy_lock(struct file_lock *new, struct file_lock *fl);
18 static void afs_fl_release_private(struct file_lock *fl);
19
20 static const struct file_lock_operations afs_lock_ops = {
21 .fl_copy_lock = afs_fl_copy_lock,
22 .fl_release_private = afs_fl_release_private,
23 };
24
afs_set_lock_state(struct afs_vnode * vnode,enum afs_lock_state state)25 static inline void afs_set_lock_state(struct afs_vnode *vnode, enum afs_lock_state state)
26 {
27 _debug("STATE %u -> %u", vnode->lock_state, state);
28 vnode->lock_state = state;
29 }
30
31 static atomic_t afs_file_lock_debug_id;
32
33 /*
34 * if the callback is broken on this vnode, then the lock may now be available
35 */
afs_lock_may_be_available(struct afs_vnode * vnode)36 void afs_lock_may_be_available(struct afs_vnode *vnode)
37 {
38 _enter("{%llx:%llu}", vnode->fid.vid, vnode->fid.vnode);
39
40 spin_lock(&vnode->lock);
41 if (vnode->lock_state == AFS_VNODE_LOCK_WAITING_FOR_CB)
42 afs_next_locker(vnode, 0);
43 trace_afs_flock_ev(vnode, NULL, afs_flock_callback_break, 0);
44 spin_unlock(&vnode->lock);
45 }
46
47 /*
48 * the lock will time out in 5 minutes unless we extend it, so schedule
49 * extension in a bit less than that time
50 */
afs_schedule_lock_extension(struct afs_vnode * vnode)51 static void afs_schedule_lock_extension(struct afs_vnode *vnode)
52 {
53 ktime_t expires_at, now, duration;
54 u64 duration_j;
55
56 expires_at = ktime_add_ms(vnode->locked_at, AFS_LOCKWAIT * 1000 / 2);
57 now = ktime_get_real();
58 duration = ktime_sub(expires_at, now);
59 if (duration <= 0)
60 duration_j = 0;
61 else
62 duration_j = nsecs_to_jiffies(ktime_to_ns(duration));
63
64 queue_delayed_work(afs_lock_manager, &vnode->lock_work, duration_j);
65 }
66
67 /*
68 * In the case of successful completion of a lock operation, record the time
69 * the reply appeared and start the lock extension timer.
70 */
afs_lock_op_done(struct afs_call * call)71 void afs_lock_op_done(struct afs_call *call)
72 {
73 struct afs_vnode *vnode = call->lvnode;
74
75 if (call->error == 0) {
76 spin_lock(&vnode->lock);
77 trace_afs_flock_ev(vnode, NULL, afs_flock_timestamp, 0);
78 vnode->locked_at = call->issue_time;
79 afs_schedule_lock_extension(vnode);
80 spin_unlock(&vnode->lock);
81 }
82 }
83
84 /*
85 * grant one or more locks (readlocks are allowed to jump the queue if the
86 * first lock in the queue is itself a readlock)
87 * - the caller must hold the vnode lock
88 */
afs_grant_locks(struct afs_vnode * vnode)89 static void afs_grant_locks(struct afs_vnode *vnode)
90 {
91 struct file_lock *p, *_p;
92 bool exclusive = (vnode->lock_type == AFS_LOCK_WRITE);
93
94 list_for_each_entry_safe(p, _p, &vnode->pending_locks, fl_u.afs.link) {
95 if (!exclusive && p->fl_type == F_WRLCK)
96 continue;
97
98 list_move_tail(&p->fl_u.afs.link, &vnode->granted_locks);
99 p->fl_u.afs.state = AFS_LOCK_GRANTED;
100 trace_afs_flock_op(vnode, p, afs_flock_op_grant);
101 wake_up(&p->fl_wait);
102 }
103 }
104
105 /*
106 * If an error is specified, reject every pending lock that matches the
107 * authentication and type of the lock we failed to get. If there are any
108 * remaining lockers, try to wake up one of them to have a go.
109 */
afs_next_locker(struct afs_vnode * vnode,int error)110 static void afs_next_locker(struct afs_vnode *vnode, int error)
111 {
112 struct file_lock *p, *_p, *next = NULL;
113 struct key *key = vnode->lock_key;
114 unsigned int fl_type = F_RDLCK;
115
116 _enter("");
117
118 if (vnode->lock_type == AFS_LOCK_WRITE)
119 fl_type = F_WRLCK;
120
121 list_for_each_entry_safe(p, _p, &vnode->pending_locks, fl_u.afs.link) {
122 if (error &&
123 p->fl_type == fl_type &&
124 afs_file_key(p->fl_file) == key) {
125 list_del_init(&p->fl_u.afs.link);
126 p->fl_u.afs.state = error;
127 wake_up(&p->fl_wait);
128 }
129
130 /* Select the next locker to hand off to. */
131 if (next &&
132 (next->fl_type == F_WRLCK || p->fl_type == F_RDLCK))
133 continue;
134 next = p;
135 }
136
137 vnode->lock_key = NULL;
138 key_put(key);
139
140 if (next) {
141 afs_set_lock_state(vnode, AFS_VNODE_LOCK_SETTING);
142 next->fl_u.afs.state = AFS_LOCK_YOUR_TRY;
143 trace_afs_flock_op(vnode, next, afs_flock_op_wake);
144 wake_up(&next->fl_wait);
145 } else {
146 afs_set_lock_state(vnode, AFS_VNODE_LOCK_NONE);
147 trace_afs_flock_ev(vnode, NULL, afs_flock_no_lockers, 0);
148 }
149
150 _leave("");
151 }
152
153 /*
154 * Kill off all waiters in the the pending lock queue due to the vnode being
155 * deleted.
156 */
afs_kill_lockers_enoent(struct afs_vnode * vnode)157 static void afs_kill_lockers_enoent(struct afs_vnode *vnode)
158 {
159 struct file_lock *p;
160
161 afs_set_lock_state(vnode, AFS_VNODE_LOCK_DELETED);
162
163 while (!list_empty(&vnode->pending_locks)) {
164 p = list_entry(vnode->pending_locks.next,
165 struct file_lock, fl_u.afs.link);
166 list_del_init(&p->fl_u.afs.link);
167 p->fl_u.afs.state = -ENOENT;
168 wake_up(&p->fl_wait);
169 }
170
171 key_put(vnode->lock_key);
172 vnode->lock_key = NULL;
173 }
174
175 /*
176 * Get a lock on a file
177 */
afs_set_lock(struct afs_vnode * vnode,struct key * key,afs_lock_type_t type)178 static int afs_set_lock(struct afs_vnode *vnode, struct key *key,
179 afs_lock_type_t type)
180 {
181 struct afs_status_cb *scb;
182 struct afs_fs_cursor fc;
183 int ret;
184
185 _enter("%s{%llx:%llu.%u},%x,%u",
186 vnode->volume->name,
187 vnode->fid.vid,
188 vnode->fid.vnode,
189 vnode->fid.unique,
190 key_serial(key), type);
191
192 scb = kzalloc(sizeof(struct afs_status_cb), GFP_KERNEL);
193 if (!scb)
194 return -ENOMEM;
195
196 ret = -ERESTARTSYS;
197 if (afs_begin_vnode_operation(&fc, vnode, key, true)) {
198 while (afs_select_fileserver(&fc)) {
199 fc.cb_break = afs_calc_vnode_cb_break(vnode);
200 afs_fs_set_lock(&fc, type, scb);
201 }
202
203 afs_check_for_remote_deletion(&fc, vnode);
204 afs_vnode_commit_status(&fc, vnode, fc.cb_break, NULL, scb);
205 ret = afs_end_vnode_operation(&fc);
206 }
207
208 kfree(scb);
209 _leave(" = %d", ret);
210 return ret;
211 }
212
213 /*
214 * Extend a lock on a file
215 */
afs_extend_lock(struct afs_vnode * vnode,struct key * key)216 static int afs_extend_lock(struct afs_vnode *vnode, struct key *key)
217 {
218 struct afs_status_cb *scb;
219 struct afs_fs_cursor fc;
220 int ret;
221
222 _enter("%s{%llx:%llu.%u},%x",
223 vnode->volume->name,
224 vnode->fid.vid,
225 vnode->fid.vnode,
226 vnode->fid.unique,
227 key_serial(key));
228
229 scb = kzalloc(sizeof(struct afs_status_cb), GFP_KERNEL);
230 if (!scb)
231 return -ENOMEM;
232
233 ret = -ERESTARTSYS;
234 if (afs_begin_vnode_operation(&fc, vnode, key, false)) {
235 while (afs_select_current_fileserver(&fc)) {
236 fc.cb_break = afs_calc_vnode_cb_break(vnode);
237 afs_fs_extend_lock(&fc, scb);
238 }
239
240 afs_check_for_remote_deletion(&fc, vnode);
241 afs_vnode_commit_status(&fc, vnode, fc.cb_break, NULL, scb);
242 ret = afs_end_vnode_operation(&fc);
243 }
244
245 kfree(scb);
246 _leave(" = %d", ret);
247 return ret;
248 }
249
250 /*
251 * Release a lock on a file
252 */
afs_release_lock(struct afs_vnode * vnode,struct key * key)253 static int afs_release_lock(struct afs_vnode *vnode, struct key *key)
254 {
255 struct afs_status_cb *scb;
256 struct afs_fs_cursor fc;
257 int ret;
258
259 _enter("%s{%llx:%llu.%u},%x",
260 vnode->volume->name,
261 vnode->fid.vid,
262 vnode->fid.vnode,
263 vnode->fid.unique,
264 key_serial(key));
265
266 scb = kzalloc(sizeof(struct afs_status_cb), GFP_KERNEL);
267 if (!scb)
268 return -ENOMEM;
269
270 ret = -ERESTARTSYS;
271 if (afs_begin_vnode_operation(&fc, vnode, key, false)) {
272 while (afs_select_current_fileserver(&fc)) {
273 fc.cb_break = afs_calc_vnode_cb_break(vnode);
274 afs_fs_release_lock(&fc, scb);
275 }
276
277 afs_check_for_remote_deletion(&fc, vnode);
278 afs_vnode_commit_status(&fc, vnode, fc.cb_break, NULL, scb);
279 ret = afs_end_vnode_operation(&fc);
280 }
281
282 kfree(scb);
283 _leave(" = %d", ret);
284 return ret;
285 }
286
287 /*
288 * do work for a lock, including:
289 * - probing for a lock we're waiting on but didn't get immediately
290 * - extending a lock that's close to timing out
291 */
afs_lock_work(struct work_struct * work)292 void afs_lock_work(struct work_struct *work)
293 {
294 struct afs_vnode *vnode =
295 container_of(work, struct afs_vnode, lock_work.work);
296 struct key *key;
297 int ret;
298
299 _enter("{%llx:%llu}", vnode->fid.vid, vnode->fid.vnode);
300
301 spin_lock(&vnode->lock);
302
303 again:
304 _debug("wstate %u for %p", vnode->lock_state, vnode);
305 switch (vnode->lock_state) {
306 case AFS_VNODE_LOCK_NEED_UNLOCK:
307 afs_set_lock_state(vnode, AFS_VNODE_LOCK_UNLOCKING);
308 trace_afs_flock_ev(vnode, NULL, afs_flock_work_unlocking, 0);
309 spin_unlock(&vnode->lock);
310
311 /* attempt to release the server lock; if it fails, we just
312 * wait 5 minutes and it'll expire anyway */
313 ret = afs_release_lock(vnode, vnode->lock_key);
314 if (ret < 0 && vnode->lock_state != AFS_VNODE_LOCK_DELETED) {
315 trace_afs_flock_ev(vnode, NULL, afs_flock_release_fail,
316 ret);
317 printk(KERN_WARNING "AFS:"
318 " Failed to release lock on {%llx:%llx} error %d\n",
319 vnode->fid.vid, vnode->fid.vnode, ret);
320 }
321
322 spin_lock(&vnode->lock);
323 if (ret == -ENOENT)
324 afs_kill_lockers_enoent(vnode);
325 else
326 afs_next_locker(vnode, 0);
327 spin_unlock(&vnode->lock);
328 return;
329
330 /* If we've already got a lock, then it must be time to extend that
331 * lock as AFS locks time out after 5 minutes.
332 */
333 case AFS_VNODE_LOCK_GRANTED:
334 _debug("extend");
335
336 ASSERT(!list_empty(&vnode->granted_locks));
337
338 key = key_get(vnode->lock_key);
339 afs_set_lock_state(vnode, AFS_VNODE_LOCK_EXTENDING);
340 trace_afs_flock_ev(vnode, NULL, afs_flock_work_extending, 0);
341 spin_unlock(&vnode->lock);
342
343 ret = afs_extend_lock(vnode, key); /* RPC */
344 key_put(key);
345
346 if (ret < 0) {
347 trace_afs_flock_ev(vnode, NULL, afs_flock_extend_fail,
348 ret);
349 pr_warning("AFS: Failed to extend lock on {%llx:%llx} error %d\n",
350 vnode->fid.vid, vnode->fid.vnode, ret);
351 }
352
353 spin_lock(&vnode->lock);
354
355 if (ret == -ENOENT) {
356 afs_kill_lockers_enoent(vnode);
357 spin_unlock(&vnode->lock);
358 return;
359 }
360
361 if (vnode->lock_state != AFS_VNODE_LOCK_EXTENDING)
362 goto again;
363 afs_set_lock_state(vnode, AFS_VNODE_LOCK_GRANTED);
364
365 if (ret != 0)
366 queue_delayed_work(afs_lock_manager, &vnode->lock_work,
367 HZ * 10);
368 spin_unlock(&vnode->lock);
369 _leave(" [ext]");
370 return;
371
372 /* If we're waiting for a callback to indicate lock release, we can't
373 * actually rely on this, so need to recheck at regular intervals. The
374 * problem is that the server might not notify us if the lock just
375 * expires (say because a client died) rather than being explicitly
376 * released.
377 */
378 case AFS_VNODE_LOCK_WAITING_FOR_CB:
379 _debug("retry");
380 afs_next_locker(vnode, 0);
381 spin_unlock(&vnode->lock);
382 return;
383
384 case AFS_VNODE_LOCK_DELETED:
385 afs_kill_lockers_enoent(vnode);
386 spin_unlock(&vnode->lock);
387 return;
388
389 /* Fall through */
390 default:
391 /* Looks like a lock request was withdrawn. */
392 spin_unlock(&vnode->lock);
393 _leave(" [no]");
394 return;
395 }
396 }
397
398 /*
399 * pass responsibility for the unlocking of a vnode on the server to the
400 * manager thread, lest a pending signal in the calling thread interrupt
401 * AF_RXRPC
402 * - the caller must hold the vnode lock
403 */
afs_defer_unlock(struct afs_vnode * vnode)404 static void afs_defer_unlock(struct afs_vnode *vnode)
405 {
406 _enter("%u", vnode->lock_state);
407
408 if (list_empty(&vnode->granted_locks) &&
409 (vnode->lock_state == AFS_VNODE_LOCK_GRANTED ||
410 vnode->lock_state == AFS_VNODE_LOCK_EXTENDING)) {
411 cancel_delayed_work(&vnode->lock_work);
412
413 afs_set_lock_state(vnode, AFS_VNODE_LOCK_NEED_UNLOCK);
414 trace_afs_flock_ev(vnode, NULL, afs_flock_defer_unlock, 0);
415 queue_delayed_work(afs_lock_manager, &vnode->lock_work, 0);
416 }
417 }
418
419 /*
420 * Check that our view of the file metadata is up to date and check to see
421 * whether we think that we have a locking permit.
422 */
afs_do_setlk_check(struct afs_vnode * vnode,struct key * key,enum afs_flock_mode mode,afs_lock_type_t type)423 static int afs_do_setlk_check(struct afs_vnode *vnode, struct key *key,
424 enum afs_flock_mode mode, afs_lock_type_t type)
425 {
426 afs_access_t access;
427 int ret;
428
429 /* Make sure we've got a callback on this file and that our view of the
430 * data version is up to date.
431 */
432 ret = afs_validate(vnode, key);
433 if (ret < 0)
434 return ret;
435
436 /* Check the permission set to see if we're actually going to be
437 * allowed to get a lock on this file.
438 */
439 ret = afs_check_permit(vnode, key, &access);
440 if (ret < 0)
441 return ret;
442
443 /* At a rough estimation, you need LOCK, WRITE or INSERT perm to
444 * read-lock a file and WRITE or INSERT perm to write-lock a file.
445 *
446 * We can't rely on the server to do this for us since if we want to
447 * share a read lock that we already have, we won't go the server.
448 */
449 if (type == AFS_LOCK_READ) {
450 if (!(access & (AFS_ACE_INSERT | AFS_ACE_WRITE | AFS_ACE_LOCK)))
451 return -EACCES;
452 } else {
453 if (!(access & (AFS_ACE_INSERT | AFS_ACE_WRITE)))
454 return -EACCES;
455 }
456
457 return 0;
458 }
459
460 /*
461 * request a lock on a file on the server
462 */
afs_do_setlk(struct file * file,struct file_lock * fl)463 static int afs_do_setlk(struct file *file, struct file_lock *fl)
464 {
465 struct inode *inode = locks_inode(file);
466 struct afs_vnode *vnode = AFS_FS_I(inode);
467 enum afs_flock_mode mode = AFS_FS_S(inode->i_sb)->flock_mode;
468 afs_lock_type_t type;
469 struct key *key = afs_file_key(file);
470 bool partial, no_server_lock = false;
471 int ret;
472
473 if (mode == afs_flock_mode_unset)
474 mode = afs_flock_mode_openafs;
475
476 _enter("{%llx:%llu},%llu-%llu,%u,%u",
477 vnode->fid.vid, vnode->fid.vnode,
478 fl->fl_start, fl->fl_end, fl->fl_type, mode);
479
480 fl->fl_ops = &afs_lock_ops;
481 INIT_LIST_HEAD(&fl->fl_u.afs.link);
482 fl->fl_u.afs.state = AFS_LOCK_PENDING;
483
484 partial = (fl->fl_start != 0 || fl->fl_end != OFFSET_MAX);
485 type = (fl->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE;
486 if (mode == afs_flock_mode_write && partial)
487 type = AFS_LOCK_WRITE;
488
489 ret = afs_do_setlk_check(vnode, key, mode, type);
490 if (ret < 0)
491 return ret;
492
493 trace_afs_flock_op(vnode, fl, afs_flock_op_set_lock);
494
495 /* AFS3 protocol only supports full-file locks and doesn't provide any
496 * method of upgrade/downgrade, so we need to emulate for partial-file
497 * locks.
498 *
499 * The OpenAFS client only gets a server lock for a full-file lock and
500 * keeps partial-file locks local. Allow this behaviour to be emulated
501 * (as the default).
502 */
503 if (mode == afs_flock_mode_local ||
504 (partial && mode == afs_flock_mode_openafs)) {
505 no_server_lock = true;
506 goto skip_server_lock;
507 }
508
509 spin_lock(&vnode->lock);
510 list_add_tail(&fl->fl_u.afs.link, &vnode->pending_locks);
511
512 ret = -ENOENT;
513 if (vnode->lock_state == AFS_VNODE_LOCK_DELETED)
514 goto error_unlock;
515
516 /* If we've already got a lock on the server then try to move to having
517 * the VFS grant the requested lock. Note that this means that other
518 * clients may get starved out.
519 */
520 _debug("try %u", vnode->lock_state);
521 if (vnode->lock_state == AFS_VNODE_LOCK_GRANTED) {
522 if (type == AFS_LOCK_READ) {
523 _debug("instant readlock");
524 list_move_tail(&fl->fl_u.afs.link, &vnode->granted_locks);
525 fl->fl_u.afs.state = AFS_LOCK_GRANTED;
526 goto vnode_is_locked_u;
527 }
528
529 if (vnode->lock_type == AFS_LOCK_WRITE) {
530 _debug("instant writelock");
531 list_move_tail(&fl->fl_u.afs.link, &vnode->granted_locks);
532 fl->fl_u.afs.state = AFS_LOCK_GRANTED;
533 goto vnode_is_locked_u;
534 }
535 }
536
537 if (vnode->lock_state == AFS_VNODE_LOCK_NONE &&
538 !(fl->fl_flags & FL_SLEEP)) {
539 ret = -EAGAIN;
540 if (type == AFS_LOCK_READ) {
541 if (vnode->status.lock_count == -1)
542 goto lock_is_contended; /* Write locked */
543 } else {
544 if (vnode->status.lock_count != 0)
545 goto lock_is_contended; /* Locked */
546 }
547 }
548
549 if (vnode->lock_state != AFS_VNODE_LOCK_NONE)
550 goto need_to_wait;
551
552 try_to_lock:
553 /* We don't have a lock on this vnode and we aren't currently waiting
554 * for one either, so ask the server for a lock.
555 *
556 * Note that we need to be careful if we get interrupted by a signal
557 * after dispatching the request as we may still get the lock, even
558 * though we don't wait for the reply (it's not too bad a problem - the
559 * lock will expire in 5 mins anyway).
560 */
561 trace_afs_flock_ev(vnode, fl, afs_flock_try_to_lock, 0);
562 vnode->lock_key = key_get(key);
563 vnode->lock_type = type;
564 afs_set_lock_state(vnode, AFS_VNODE_LOCK_SETTING);
565 spin_unlock(&vnode->lock);
566
567 ret = afs_set_lock(vnode, key, type); /* RPC */
568
569 spin_lock(&vnode->lock);
570 switch (ret) {
571 case -EKEYREJECTED:
572 case -EKEYEXPIRED:
573 case -EKEYREVOKED:
574 case -EPERM:
575 case -EACCES:
576 fl->fl_u.afs.state = ret;
577 trace_afs_flock_ev(vnode, fl, afs_flock_fail_perm, ret);
578 list_del_init(&fl->fl_u.afs.link);
579 afs_next_locker(vnode, ret);
580 goto error_unlock;
581
582 case -ENOENT:
583 fl->fl_u.afs.state = ret;
584 trace_afs_flock_ev(vnode, fl, afs_flock_fail_other, ret);
585 list_del_init(&fl->fl_u.afs.link);
586 afs_kill_lockers_enoent(vnode);
587 goto error_unlock;
588
589 default:
590 fl->fl_u.afs.state = ret;
591 trace_afs_flock_ev(vnode, fl, afs_flock_fail_other, ret);
592 list_del_init(&fl->fl_u.afs.link);
593 afs_next_locker(vnode, 0);
594 goto error_unlock;
595
596 case -EWOULDBLOCK:
597 /* The server doesn't have a lock-waiting queue, so the client
598 * will have to retry. The server will break the outstanding
599 * callbacks on a file when a lock is released.
600 */
601 ASSERT(list_empty(&vnode->granted_locks));
602 ASSERTCMP(vnode->pending_locks.next, ==, &fl->fl_u.afs.link);
603 goto lock_is_contended;
604
605 case 0:
606 afs_set_lock_state(vnode, AFS_VNODE_LOCK_GRANTED);
607 trace_afs_flock_ev(vnode, fl, afs_flock_acquired, type);
608 afs_grant_locks(vnode);
609 goto vnode_is_locked_u;
610 }
611
612 vnode_is_locked_u:
613 spin_unlock(&vnode->lock);
614 vnode_is_locked:
615 /* the lock has been granted by the server... */
616 ASSERTCMP(fl->fl_u.afs.state, ==, AFS_LOCK_GRANTED);
617
618 skip_server_lock:
619 /* ... but the VFS still needs to distribute access on this client. */
620 trace_afs_flock_ev(vnode, fl, afs_flock_vfs_locking, 0);
621 ret = locks_lock_file_wait(file, fl);
622 trace_afs_flock_ev(vnode, fl, afs_flock_vfs_lock, ret);
623 if (ret < 0)
624 goto vfs_rejected_lock;
625
626 /* Again, make sure we've got a callback on this file and, again, make
627 * sure that our view of the data version is up to date (we ignore
628 * errors incurred here and deal with the consequences elsewhere).
629 */
630 afs_validate(vnode, key);
631 _leave(" = 0");
632 return 0;
633
634 lock_is_contended:
635 if (!(fl->fl_flags & FL_SLEEP)) {
636 list_del_init(&fl->fl_u.afs.link);
637 afs_next_locker(vnode, 0);
638 ret = -EAGAIN;
639 goto error_unlock;
640 }
641
642 afs_set_lock_state(vnode, AFS_VNODE_LOCK_WAITING_FOR_CB);
643 trace_afs_flock_ev(vnode, fl, afs_flock_would_block, ret);
644 queue_delayed_work(afs_lock_manager, &vnode->lock_work, HZ * 5);
645
646 need_to_wait:
647 /* We're going to have to wait. Either this client doesn't have a lock
648 * on the server yet and we need to wait for a callback to occur, or
649 * the client does have a lock on the server, but it's shared and we
650 * need an exclusive lock.
651 */
652 spin_unlock(&vnode->lock);
653
654 trace_afs_flock_ev(vnode, fl, afs_flock_waiting, 0);
655 ret = wait_event_interruptible(fl->fl_wait,
656 fl->fl_u.afs.state != AFS_LOCK_PENDING);
657 trace_afs_flock_ev(vnode, fl, afs_flock_waited, ret);
658
659 if (fl->fl_u.afs.state >= 0 && fl->fl_u.afs.state != AFS_LOCK_GRANTED) {
660 spin_lock(&vnode->lock);
661
662 switch (fl->fl_u.afs.state) {
663 case AFS_LOCK_YOUR_TRY:
664 fl->fl_u.afs.state = AFS_LOCK_PENDING;
665 goto try_to_lock;
666 case AFS_LOCK_PENDING:
667 if (ret > 0) {
668 /* We need to retry the lock. We may not be
669 * notified by the server if it just expired
670 * rather than being released.
671 */
672 ASSERTCMP(vnode->lock_state, ==, AFS_VNODE_LOCK_WAITING_FOR_CB);
673 afs_set_lock_state(vnode, AFS_VNODE_LOCK_SETTING);
674 fl->fl_u.afs.state = AFS_LOCK_PENDING;
675 goto try_to_lock;
676 }
677 goto error_unlock;
678 case AFS_LOCK_GRANTED:
679 default:
680 break;
681 }
682
683 spin_unlock(&vnode->lock);
684 }
685
686 if (fl->fl_u.afs.state == AFS_LOCK_GRANTED)
687 goto vnode_is_locked;
688 ret = fl->fl_u.afs.state;
689 goto error;
690
691 vfs_rejected_lock:
692 /* The VFS rejected the lock we just obtained, so we have to discard
693 * what we just got. We defer this to the lock manager work item to
694 * deal with.
695 */
696 _debug("vfs refused %d", ret);
697 if (no_server_lock)
698 goto error;
699 spin_lock(&vnode->lock);
700 list_del_init(&fl->fl_u.afs.link);
701 afs_defer_unlock(vnode);
702
703 error_unlock:
704 spin_unlock(&vnode->lock);
705 error:
706 _leave(" = %d", ret);
707 return ret;
708 }
709
710 /*
711 * unlock on a file on the server
712 */
afs_do_unlk(struct file * file,struct file_lock * fl)713 static int afs_do_unlk(struct file *file, struct file_lock *fl)
714 {
715 struct afs_vnode *vnode = AFS_FS_I(locks_inode(file));
716 int ret;
717
718 _enter("{%llx:%llu},%u", vnode->fid.vid, vnode->fid.vnode, fl->fl_type);
719
720 trace_afs_flock_op(vnode, fl, afs_flock_op_unlock);
721
722 /* Flush all pending writes before doing anything with locks. */
723 vfs_fsync(file, 0);
724
725 ret = locks_lock_file_wait(file, fl);
726 _leave(" = %d [%u]", ret, vnode->lock_state);
727 return ret;
728 }
729
730 /*
731 * return information about a lock we currently hold, if indeed we hold one
732 */
afs_do_getlk(struct file * file,struct file_lock * fl)733 static int afs_do_getlk(struct file *file, struct file_lock *fl)
734 {
735 struct afs_vnode *vnode = AFS_FS_I(locks_inode(file));
736 struct key *key = afs_file_key(file);
737 int ret, lock_count;
738
739 _enter("");
740
741 if (vnode->lock_state == AFS_VNODE_LOCK_DELETED)
742 return -ENOENT;
743
744 fl->fl_type = F_UNLCK;
745
746 /* check local lock records first */
747 posix_test_lock(file, fl);
748 if (fl->fl_type == F_UNLCK) {
749 /* no local locks; consult the server */
750 ret = afs_fetch_status(vnode, key, false, NULL);
751 if (ret < 0)
752 goto error;
753
754 lock_count = READ_ONCE(vnode->status.lock_count);
755 if (lock_count != 0) {
756 if (lock_count > 0)
757 fl->fl_type = F_RDLCK;
758 else
759 fl->fl_type = F_WRLCK;
760 fl->fl_start = 0;
761 fl->fl_end = OFFSET_MAX;
762 fl->fl_pid = 0;
763 }
764 }
765
766 ret = 0;
767 error:
768 _leave(" = %d [%hd]", ret, fl->fl_type);
769 return ret;
770 }
771
772 /*
773 * manage POSIX locks on a file
774 */
afs_lock(struct file * file,int cmd,struct file_lock * fl)775 int afs_lock(struct file *file, int cmd, struct file_lock *fl)
776 {
777 struct afs_vnode *vnode = AFS_FS_I(locks_inode(file));
778 enum afs_flock_operation op;
779 int ret;
780
781 _enter("{%llx:%llu},%d,{t=%x,fl=%x,r=%Ld:%Ld}",
782 vnode->fid.vid, vnode->fid.vnode, cmd,
783 fl->fl_type, fl->fl_flags,
784 (long long) fl->fl_start, (long long) fl->fl_end);
785
786 /* AFS doesn't support mandatory locks */
787 if (__mandatory_lock(&vnode->vfs_inode) && fl->fl_type != F_UNLCK)
788 return -ENOLCK;
789
790 if (IS_GETLK(cmd))
791 return afs_do_getlk(file, fl);
792
793 fl->fl_u.afs.debug_id = atomic_inc_return(&afs_file_lock_debug_id);
794 trace_afs_flock_op(vnode, fl, afs_flock_op_lock);
795
796 if (fl->fl_type == F_UNLCK)
797 ret = afs_do_unlk(file, fl);
798 else
799 ret = afs_do_setlk(file, fl);
800
801 switch (ret) {
802 case 0: op = afs_flock_op_return_ok; break;
803 case -EAGAIN: op = afs_flock_op_return_eagain; break;
804 case -EDEADLK: op = afs_flock_op_return_edeadlk; break;
805 default: op = afs_flock_op_return_error; break;
806 }
807 trace_afs_flock_op(vnode, fl, op);
808 return ret;
809 }
810
811 /*
812 * manage FLOCK locks on a file
813 */
afs_flock(struct file * file,int cmd,struct file_lock * fl)814 int afs_flock(struct file *file, int cmd, struct file_lock *fl)
815 {
816 struct afs_vnode *vnode = AFS_FS_I(locks_inode(file));
817 enum afs_flock_operation op;
818 int ret;
819
820 _enter("{%llx:%llu},%d,{t=%x,fl=%x}",
821 vnode->fid.vid, vnode->fid.vnode, cmd,
822 fl->fl_type, fl->fl_flags);
823
824 /*
825 * No BSD flocks over NFS allowed.
826 * Note: we could try to fake a POSIX lock request here by
827 * using ((u32) filp | 0x80000000) or some such as the pid.
828 * Not sure whether that would be unique, though, or whether
829 * that would break in other places.
830 */
831 if (!(fl->fl_flags & FL_FLOCK))
832 return -ENOLCK;
833
834 fl->fl_u.afs.debug_id = atomic_inc_return(&afs_file_lock_debug_id);
835 trace_afs_flock_op(vnode, fl, afs_flock_op_flock);
836
837 /* we're simulating flock() locks using posix locks on the server */
838 if (fl->fl_type == F_UNLCK)
839 ret = afs_do_unlk(file, fl);
840 else
841 ret = afs_do_setlk(file, fl);
842
843 switch (ret) {
844 case 0: op = afs_flock_op_return_ok; break;
845 case -EAGAIN: op = afs_flock_op_return_eagain; break;
846 case -EDEADLK: op = afs_flock_op_return_edeadlk; break;
847 default: op = afs_flock_op_return_error; break;
848 }
849 trace_afs_flock_op(vnode, fl, op);
850 return ret;
851 }
852
853 /*
854 * the POSIX lock management core VFS code copies the lock record and adds the
855 * copy into its own list, so we need to add that copy to the vnode's lock
856 * queue in the same place as the original (which will be deleted shortly
857 * after)
858 */
afs_fl_copy_lock(struct file_lock * new,struct file_lock * fl)859 static void afs_fl_copy_lock(struct file_lock *new, struct file_lock *fl)
860 {
861 struct afs_vnode *vnode = AFS_FS_I(locks_inode(fl->fl_file));
862
863 _enter("");
864
865 new->fl_u.afs.debug_id = atomic_inc_return(&afs_file_lock_debug_id);
866
867 spin_lock(&vnode->lock);
868 trace_afs_flock_op(vnode, new, afs_flock_op_copy_lock);
869 list_add(&new->fl_u.afs.link, &fl->fl_u.afs.link);
870 spin_unlock(&vnode->lock);
871 }
872
873 /*
874 * need to remove this lock from the vnode queue when it's removed from the
875 * VFS's list
876 */
afs_fl_release_private(struct file_lock * fl)877 static void afs_fl_release_private(struct file_lock *fl)
878 {
879 struct afs_vnode *vnode = AFS_FS_I(locks_inode(fl->fl_file));
880
881 _enter("");
882
883 spin_lock(&vnode->lock);
884
885 trace_afs_flock_op(vnode, fl, afs_flock_op_release_lock);
886 list_del_init(&fl->fl_u.afs.link);
887 if (list_empty(&vnode->granted_locks))
888 afs_defer_unlock(vnode);
889
890 _debug("state %u for %p", vnode->lock_state, vnode);
891 spin_unlock(&vnode->lock);
892 }
893