• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* AFS volume location management
2  *
3  * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
17 #include "internal.h"
18 
19 static unsigned afs_vlocation_timeout = 10;	/* volume location timeout in seconds */
20 static unsigned afs_vlocation_update_timeout = 10 * 60;
21 
22 static void afs_vlocation_reaper(struct work_struct *);
23 static void afs_vlocation_updater(struct work_struct *);
24 
25 static LIST_HEAD(afs_vlocation_updates);
26 static LIST_HEAD(afs_vlocation_graveyard);
27 static DEFINE_SPINLOCK(afs_vlocation_updates_lock);
28 static DEFINE_SPINLOCK(afs_vlocation_graveyard_lock);
29 static DECLARE_DELAYED_WORK(afs_vlocation_reap, afs_vlocation_reaper);
30 static DECLARE_DELAYED_WORK(afs_vlocation_update, afs_vlocation_updater);
31 static struct workqueue_struct *afs_vlocation_update_worker;
32 
33 /*
34  * iterate through the VL servers in a cell until one of them admits knowing
35  * about the volume in question
36  */
afs_vlocation_access_vl_by_name(struct afs_vlocation * vl,struct key * key,struct afs_cache_vlocation * vldb)37 static int afs_vlocation_access_vl_by_name(struct afs_vlocation *vl,
38 					   struct key *key,
39 					   struct afs_cache_vlocation *vldb)
40 {
41 	struct afs_cell *cell = vl->cell;
42 	struct in_addr addr;
43 	int count, ret;
44 
45 	_enter("%s,%s", cell->name, vl->vldb.name);
46 
47 	down_write(&vl->cell->vl_sem);
48 	ret = -ENOMEDIUM;
49 	for (count = cell->vl_naddrs; count > 0; count--) {
50 		addr = cell->vl_addrs[cell->vl_curr_svix];
51 
52 		_debug("CellServ[%hu]: %08x", cell->vl_curr_svix, addr.s_addr);
53 
54 		/* attempt to access the VL server */
55 		ret = afs_vl_get_entry_by_name(&addr, key, vl->vldb.name, vldb,
56 					       &afs_sync_call);
57 		switch (ret) {
58 		case 0:
59 			goto out;
60 		case -ENOMEM:
61 		case -ENONET:
62 		case -ENETUNREACH:
63 		case -EHOSTUNREACH:
64 		case -ECONNREFUSED:
65 			if (ret == -ENOMEM || ret == -ENONET)
66 				goto out;
67 			goto rotate;
68 		case -ENOMEDIUM:
69 		case -EKEYREJECTED:
70 		case -EKEYEXPIRED:
71 			goto out;
72 		default:
73 			ret = -EIO;
74 			goto rotate;
75 		}
76 
77 		/* rotate the server records upon lookup failure */
78 	rotate:
79 		cell->vl_curr_svix++;
80 		cell->vl_curr_svix %= cell->vl_naddrs;
81 	}
82 
83 out:
84 	up_write(&vl->cell->vl_sem);
85 	_leave(" = %d", ret);
86 	return ret;
87 }
88 
89 /*
90  * iterate through the VL servers in a cell until one of them admits knowing
91  * about the volume in question
92  */
afs_vlocation_access_vl_by_id(struct afs_vlocation * vl,struct key * key,afs_volid_t volid,afs_voltype_t voltype,struct afs_cache_vlocation * vldb)93 static int afs_vlocation_access_vl_by_id(struct afs_vlocation *vl,
94 					 struct key *key,
95 					 afs_volid_t volid,
96 					 afs_voltype_t voltype,
97 					 struct afs_cache_vlocation *vldb)
98 {
99 	struct afs_cell *cell = vl->cell;
100 	struct in_addr addr;
101 	int count, ret;
102 
103 	_enter("%s,%x,%d,", cell->name, volid, voltype);
104 
105 	down_write(&vl->cell->vl_sem);
106 	ret = -ENOMEDIUM;
107 	for (count = cell->vl_naddrs; count > 0; count--) {
108 		addr = cell->vl_addrs[cell->vl_curr_svix];
109 
110 		_debug("CellServ[%hu]: %08x", cell->vl_curr_svix, addr.s_addr);
111 
112 		/* attempt to access the VL server */
113 		ret = afs_vl_get_entry_by_id(&addr, key, volid, voltype, vldb,
114 					     &afs_sync_call);
115 		switch (ret) {
116 		case 0:
117 			goto out;
118 		case -ENOMEM:
119 		case -ENONET:
120 		case -ENETUNREACH:
121 		case -EHOSTUNREACH:
122 		case -ECONNREFUSED:
123 			if (ret == -ENOMEM || ret == -ENONET)
124 				goto out;
125 			goto rotate;
126 		case -EBUSY:
127 			vl->upd_busy_cnt++;
128 			if (vl->upd_busy_cnt <= 3) {
129 				if (vl->upd_busy_cnt > 1) {
130 					/* second+ BUSY - sleep a little bit */
131 					set_current_state(TASK_UNINTERRUPTIBLE);
132 					schedule_timeout(1);
133 				}
134 				continue;
135 			}
136 			break;
137 		case -ENOMEDIUM:
138 			vl->upd_rej_cnt++;
139 			goto rotate;
140 		default:
141 			ret = -EIO;
142 			goto rotate;
143 		}
144 
145 		/* rotate the server records upon lookup failure */
146 	rotate:
147 		cell->vl_curr_svix++;
148 		cell->vl_curr_svix %= cell->vl_naddrs;
149 		vl->upd_busy_cnt = 0;
150 	}
151 
152 out:
153 	if (ret < 0 && vl->upd_rej_cnt > 0) {
154 		printk(KERN_NOTICE "kAFS:"
155 		       " Active volume no longer valid '%s'\n",
156 		       vl->vldb.name);
157 		vl->valid = 0;
158 		ret = -ENOMEDIUM;
159 	}
160 
161 	up_write(&vl->cell->vl_sem);
162 	_leave(" = %d", ret);
163 	return ret;
164 }
165 
166 /*
167  * allocate a volume location record
168  */
afs_vlocation_alloc(struct afs_cell * cell,const char * name,size_t namesz)169 static struct afs_vlocation *afs_vlocation_alloc(struct afs_cell *cell,
170 						 const char *name,
171 						 size_t namesz)
172 {
173 	struct afs_vlocation *vl;
174 
175 	vl = kzalloc(sizeof(struct afs_vlocation), GFP_KERNEL);
176 	if (vl) {
177 		vl->cell = cell;
178 		vl->state = AFS_VL_NEW;
179 		atomic_set(&vl->usage, 1);
180 		INIT_LIST_HEAD(&vl->link);
181 		INIT_LIST_HEAD(&vl->grave);
182 		INIT_LIST_HEAD(&vl->update);
183 		init_waitqueue_head(&vl->waitq);
184 		spin_lock_init(&vl->lock);
185 		memcpy(vl->vldb.name, name, namesz);
186 	}
187 
188 	_leave(" = %p", vl);
189 	return vl;
190 }
191 
192 /*
193  * update record if we found it in the cache
194  */
afs_vlocation_update_record(struct afs_vlocation * vl,struct key * key,struct afs_cache_vlocation * vldb)195 static int afs_vlocation_update_record(struct afs_vlocation *vl,
196 				       struct key *key,
197 				       struct afs_cache_vlocation *vldb)
198 {
199 	afs_voltype_t voltype;
200 	afs_volid_t vid;
201 	int ret;
202 
203 	/* try to look up a cached volume in the cell VL databases by ID */
204 	_debug("Locally Cached: %s %02x { %08x(%x) %08x(%x) %08x(%x) }",
205 	       vl->vldb.name,
206 	       vl->vldb.vidmask,
207 	       ntohl(vl->vldb.servers[0].s_addr),
208 	       vl->vldb.srvtmask[0],
209 	       ntohl(vl->vldb.servers[1].s_addr),
210 	       vl->vldb.srvtmask[1],
211 	       ntohl(vl->vldb.servers[2].s_addr),
212 	       vl->vldb.srvtmask[2]);
213 
214 	_debug("Vids: %08x %08x %08x",
215 	       vl->vldb.vid[0],
216 	       vl->vldb.vid[1],
217 	       vl->vldb.vid[2]);
218 
219 	if (vl->vldb.vidmask & AFS_VOL_VTM_RW) {
220 		vid = vl->vldb.vid[0];
221 		voltype = AFSVL_RWVOL;
222 	} else if (vl->vldb.vidmask & AFS_VOL_VTM_RO) {
223 		vid = vl->vldb.vid[1];
224 		voltype = AFSVL_ROVOL;
225 	} else if (vl->vldb.vidmask & AFS_VOL_VTM_BAK) {
226 		vid = vl->vldb.vid[2];
227 		voltype = AFSVL_BACKVOL;
228 	} else {
229 		BUG();
230 		vid = 0;
231 		voltype = 0;
232 	}
233 
234 	/* contact the server to make sure the volume is still available
235 	 * - TODO: need to handle disconnected operation here
236 	 */
237 	ret = afs_vlocation_access_vl_by_id(vl, key, vid, voltype, vldb);
238 	switch (ret) {
239 		/* net error */
240 	default:
241 		printk(KERN_WARNING "kAFS:"
242 		       " failed to update volume '%s' (%x) up in '%s': %d\n",
243 		       vl->vldb.name, vid, vl->cell->name, ret);
244 		_leave(" = %d", ret);
245 		return ret;
246 
247 		/* pulled from local cache into memory */
248 	case 0:
249 		_leave(" = 0");
250 		return 0;
251 
252 		/* uh oh... looks like the volume got deleted */
253 	case -ENOMEDIUM:
254 		printk(KERN_ERR "kAFS:"
255 		       " volume '%s' (%x) does not exist '%s'\n",
256 		       vl->vldb.name, vid, vl->cell->name);
257 
258 		/* TODO: make existing record unavailable */
259 		_leave(" = %d", ret);
260 		return ret;
261 	}
262 }
263 
264 /*
265  * apply the update to a VL record
266  */
afs_vlocation_apply_update(struct afs_vlocation * vl,struct afs_cache_vlocation * vldb)267 static void afs_vlocation_apply_update(struct afs_vlocation *vl,
268 				       struct afs_cache_vlocation *vldb)
269 {
270 	_debug("Done VL Lookup: %s %02x { %08x(%x) %08x(%x) %08x(%x) }",
271 	       vldb->name, vldb->vidmask,
272 	       ntohl(vldb->servers[0].s_addr), vldb->srvtmask[0],
273 	       ntohl(vldb->servers[1].s_addr), vldb->srvtmask[1],
274 	       ntohl(vldb->servers[2].s_addr), vldb->srvtmask[2]);
275 
276 	_debug("Vids: %08x %08x %08x",
277 	       vldb->vid[0], vldb->vid[1], vldb->vid[2]);
278 
279 	if (strcmp(vldb->name, vl->vldb.name) != 0)
280 		printk(KERN_NOTICE "kAFS:"
281 		       " name of volume '%s' changed to '%s' on server\n",
282 		       vl->vldb.name, vldb->name);
283 
284 	vl->vldb = *vldb;
285 
286 #ifdef CONFIG_AFS_FSCACHE
287 	fscache_update_cookie(vl->cache);
288 #endif
289 }
290 
291 /*
292  * fill in a volume location record, consulting the cache and the VL server
293  * both
294  */
afs_vlocation_fill_in_record(struct afs_vlocation * vl,struct key * key)295 static int afs_vlocation_fill_in_record(struct afs_vlocation *vl,
296 					struct key *key)
297 {
298 	struct afs_cache_vlocation vldb;
299 	int ret;
300 
301 	_enter("");
302 
303 	ASSERTCMP(vl->valid, ==, 0);
304 
305 	memset(&vldb, 0, sizeof(vldb));
306 
307 	/* see if we have an in-cache copy (will set vl->valid if there is) */
308 #ifdef CONFIG_AFS_FSCACHE
309 	vl->cache = fscache_acquire_cookie(vl->cell->cache,
310 					   &afs_vlocation_cache_index_def, vl,
311 					   true);
312 #endif
313 
314 	if (vl->valid) {
315 		/* try to update a known volume in the cell VL databases by
316 		 * ID as the name may have changed */
317 		_debug("found in cache");
318 		ret = afs_vlocation_update_record(vl, key, &vldb);
319 	} else {
320 		/* try to look up an unknown volume in the cell VL databases by
321 		 * name */
322 		ret = afs_vlocation_access_vl_by_name(vl, key, &vldb);
323 		if (ret < 0) {
324 			printk("kAFS: failed to locate '%s' in cell '%s'\n",
325 			       vl->vldb.name, vl->cell->name);
326 			return ret;
327 		}
328 	}
329 
330 	afs_vlocation_apply_update(vl, &vldb);
331 	_leave(" = 0");
332 	return 0;
333 }
334 
335 /*
336  * queue a vlocation record for updates
337  */
afs_vlocation_queue_for_updates(struct afs_vlocation * vl)338 static void afs_vlocation_queue_for_updates(struct afs_vlocation *vl)
339 {
340 	struct afs_vlocation *xvl;
341 
342 	/* wait at least 10 minutes before updating... */
343 	vl->update_at = ktime_get_real_seconds() +
344 			afs_vlocation_update_timeout;
345 
346 	spin_lock(&afs_vlocation_updates_lock);
347 
348 	if (!list_empty(&afs_vlocation_updates)) {
349 		/* ... but wait at least 1 second more than the newest record
350 		 * already queued so that we don't spam the VL server suddenly
351 		 * with lots of requests
352 		 */
353 		xvl = list_entry(afs_vlocation_updates.prev,
354 				 struct afs_vlocation, update);
355 		if (vl->update_at <= xvl->update_at)
356 			vl->update_at = xvl->update_at + 1;
357 	} else {
358 		queue_delayed_work(afs_vlocation_update_worker,
359 				   &afs_vlocation_update,
360 				   afs_vlocation_update_timeout * HZ);
361 	}
362 
363 	list_add_tail(&vl->update, &afs_vlocation_updates);
364 	spin_unlock(&afs_vlocation_updates_lock);
365 }
366 
367 /*
368  * lookup volume location
369  * - iterate through the VL servers in a cell until one of them admits knowing
370  *   about the volume in question
371  * - lookup in the local cache if not able to find on the VL server
372  * - insert/update in the local cache if did get a VL response
373  */
afs_vlocation_lookup(struct afs_cell * cell,struct key * key,const char * name,size_t namesz)374 struct afs_vlocation *afs_vlocation_lookup(struct afs_cell *cell,
375 					   struct key *key,
376 					   const char *name,
377 					   size_t namesz)
378 {
379 	struct afs_vlocation *vl;
380 	int ret;
381 
382 	_enter("{%s},{%x},%*.*s,%zu",
383 	       cell->name, key_serial(key),
384 	       (int) namesz, (int) namesz, name, namesz);
385 
386 	if (namesz >= sizeof(vl->vldb.name)) {
387 		_leave(" = -ENAMETOOLONG");
388 		return ERR_PTR(-ENAMETOOLONG);
389 	}
390 
391 	/* see if we have an in-memory copy first */
392 	down_write(&cell->vl_sem);
393 	spin_lock(&cell->vl_lock);
394 	list_for_each_entry(vl, &cell->vl_list, link) {
395 		if (vl->vldb.name[namesz] != '\0')
396 			continue;
397 		if (memcmp(vl->vldb.name, name, namesz) == 0)
398 			goto found_in_memory;
399 	}
400 	spin_unlock(&cell->vl_lock);
401 
402 	/* not in the cell's in-memory lists - create a new record */
403 	vl = afs_vlocation_alloc(cell, name, namesz);
404 	if (!vl) {
405 		up_write(&cell->vl_sem);
406 		return ERR_PTR(-ENOMEM);
407 	}
408 
409 	afs_get_cell(cell);
410 
411 	list_add_tail(&vl->link, &cell->vl_list);
412 	vl->state = AFS_VL_CREATING;
413 	up_write(&cell->vl_sem);
414 
415 fill_in_record:
416 	ret = afs_vlocation_fill_in_record(vl, key);
417 	if (ret < 0)
418 		goto error_abandon;
419 	spin_lock(&vl->lock);
420 	vl->state = AFS_VL_VALID;
421 	spin_unlock(&vl->lock);
422 	wake_up(&vl->waitq);
423 
424 	/* update volume entry in local cache */
425 #ifdef CONFIG_AFS_FSCACHE
426 	fscache_update_cookie(vl->cache);
427 #endif
428 
429 	/* schedule for regular updates */
430 	afs_vlocation_queue_for_updates(vl);
431 	goto success;
432 
433 found_in_memory:
434 	/* found in memory */
435 	_debug("found in memory");
436 	atomic_inc(&vl->usage);
437 	spin_unlock(&cell->vl_lock);
438 	if (!list_empty(&vl->grave)) {
439 		spin_lock(&afs_vlocation_graveyard_lock);
440 		list_del_init(&vl->grave);
441 		spin_unlock(&afs_vlocation_graveyard_lock);
442 	}
443 	up_write(&cell->vl_sem);
444 
445 	/* see if it was an abandoned record that we might try filling in */
446 	spin_lock(&vl->lock);
447 	while (vl->state != AFS_VL_VALID) {
448 		afs_vlocation_state_t state = vl->state;
449 
450 		_debug("invalid [state %d]", state);
451 
452 		if (state == AFS_VL_NEW || state == AFS_VL_NO_VOLUME) {
453 			vl->state = AFS_VL_CREATING;
454 			spin_unlock(&vl->lock);
455 			goto fill_in_record;
456 		}
457 
458 		/* must now wait for creation or update by someone else to
459 		 * complete */
460 		_debug("wait");
461 
462 		spin_unlock(&vl->lock);
463 		ret = wait_event_interruptible(vl->waitq,
464 					       vl->state == AFS_VL_NEW ||
465 					       vl->state == AFS_VL_VALID ||
466 					       vl->state == AFS_VL_NO_VOLUME);
467 		if (ret < 0)
468 			goto error;
469 		spin_lock(&vl->lock);
470 	}
471 	spin_unlock(&vl->lock);
472 
473 success:
474 	_leave(" = %p", vl);
475 	return vl;
476 
477 error_abandon:
478 	spin_lock(&vl->lock);
479 	vl->state = AFS_VL_NEW;
480 	spin_unlock(&vl->lock);
481 	wake_up(&vl->waitq);
482 error:
483 	ASSERT(vl != NULL);
484 	afs_put_vlocation(vl);
485 	_leave(" = %d", ret);
486 	return ERR_PTR(ret);
487 }
488 
489 /*
490  * finish using a volume location record
491  */
afs_put_vlocation(struct afs_vlocation * vl)492 void afs_put_vlocation(struct afs_vlocation *vl)
493 {
494 	if (!vl)
495 		return;
496 
497 	_enter("%s", vl->vldb.name);
498 
499 	ASSERTCMP(atomic_read(&vl->usage), >, 0);
500 
501 	if (likely(!atomic_dec_and_test(&vl->usage))) {
502 		_leave("");
503 		return;
504 	}
505 
506 	spin_lock(&afs_vlocation_graveyard_lock);
507 	if (atomic_read(&vl->usage) == 0) {
508 		_debug("buried");
509 		list_move_tail(&vl->grave, &afs_vlocation_graveyard);
510 		vl->time_of_death = ktime_get_real_seconds();
511 		queue_delayed_work(afs_wq, &afs_vlocation_reap,
512 				   afs_vlocation_timeout * HZ);
513 
514 		/* suspend updates on this record */
515 		if (!list_empty(&vl->update)) {
516 			spin_lock(&afs_vlocation_updates_lock);
517 			list_del_init(&vl->update);
518 			spin_unlock(&afs_vlocation_updates_lock);
519 		}
520 	}
521 	spin_unlock(&afs_vlocation_graveyard_lock);
522 	_leave(" [killed?]");
523 }
524 
525 /*
526  * destroy a dead volume location record
527  */
afs_vlocation_destroy(struct afs_vlocation * vl)528 static void afs_vlocation_destroy(struct afs_vlocation *vl)
529 {
530 	_enter("%p", vl);
531 
532 #ifdef CONFIG_AFS_FSCACHE
533 	fscache_relinquish_cookie(vl->cache, 0);
534 #endif
535 	afs_put_cell(vl->cell);
536 	kfree(vl);
537 }
538 
539 /*
540  * reap dead volume location records
541  */
afs_vlocation_reaper(struct work_struct * work)542 static void afs_vlocation_reaper(struct work_struct *work)
543 {
544 	LIST_HEAD(corpses);
545 	struct afs_vlocation *vl;
546 	unsigned long delay, expiry;
547 	time64_t now;
548 
549 	_enter("");
550 
551 	now = ktime_get_real_seconds();
552 	spin_lock(&afs_vlocation_graveyard_lock);
553 
554 	while (!list_empty(&afs_vlocation_graveyard)) {
555 		vl = list_entry(afs_vlocation_graveyard.next,
556 				struct afs_vlocation, grave);
557 
558 		_debug("check %p", vl);
559 
560 		/* the queue is ordered most dead first */
561 		expiry = vl->time_of_death + afs_vlocation_timeout;
562 		if (expiry > now) {
563 			delay = (expiry - now) * HZ;
564 			_debug("delay %lu", delay);
565 			mod_delayed_work(afs_wq, &afs_vlocation_reap, delay);
566 			break;
567 		}
568 
569 		spin_lock(&vl->cell->vl_lock);
570 		if (atomic_read(&vl->usage) > 0) {
571 			_debug("no reap");
572 			list_del_init(&vl->grave);
573 		} else {
574 			_debug("reap");
575 			list_move_tail(&vl->grave, &corpses);
576 			list_del_init(&vl->link);
577 		}
578 		spin_unlock(&vl->cell->vl_lock);
579 	}
580 
581 	spin_unlock(&afs_vlocation_graveyard_lock);
582 
583 	/* now reap the corpses we've extracted */
584 	while (!list_empty(&corpses)) {
585 		vl = list_entry(corpses.next, struct afs_vlocation, grave);
586 		list_del(&vl->grave);
587 		afs_vlocation_destroy(vl);
588 	}
589 
590 	_leave("");
591 }
592 
593 /*
594  * initialise the VL update process
595  */
afs_vlocation_update_init(void)596 int __init afs_vlocation_update_init(void)
597 {
598 	afs_vlocation_update_worker = alloc_workqueue("kafs_vlupdated",
599 						      WQ_MEM_RECLAIM, 0);
600 	return afs_vlocation_update_worker ? 0 : -ENOMEM;
601 }
602 
603 /*
604  * discard all the volume location records for rmmod
605  */
afs_vlocation_purge(void)606 void afs_vlocation_purge(void)
607 {
608 	afs_vlocation_timeout = 0;
609 
610 	spin_lock(&afs_vlocation_updates_lock);
611 	list_del_init(&afs_vlocation_updates);
612 	spin_unlock(&afs_vlocation_updates_lock);
613 	mod_delayed_work(afs_vlocation_update_worker, &afs_vlocation_update, 0);
614 	destroy_workqueue(afs_vlocation_update_worker);
615 
616 	mod_delayed_work(afs_wq, &afs_vlocation_reap, 0);
617 }
618 
619 /*
620  * update a volume location
621  */
afs_vlocation_updater(struct work_struct * work)622 static void afs_vlocation_updater(struct work_struct *work)
623 {
624 	struct afs_cache_vlocation vldb;
625 	struct afs_vlocation *vl, *xvl;
626 	time64_t now;
627 	long timeout;
628 	int ret;
629 
630 	_enter("");
631 
632 	now = ktime_get_real_seconds();
633 
634 	/* find a record to update */
635 	spin_lock(&afs_vlocation_updates_lock);
636 	for (;;) {
637 		if (list_empty(&afs_vlocation_updates)) {
638 			spin_unlock(&afs_vlocation_updates_lock);
639 			_leave(" [nothing]");
640 			return;
641 		}
642 
643 		vl = list_entry(afs_vlocation_updates.next,
644 				struct afs_vlocation, update);
645 		if (atomic_read(&vl->usage) > 0)
646 			break;
647 		list_del_init(&vl->update);
648 	}
649 
650 	timeout = vl->update_at - now;
651 	if (timeout > 0) {
652 		queue_delayed_work(afs_vlocation_update_worker,
653 				   &afs_vlocation_update, timeout * HZ);
654 		spin_unlock(&afs_vlocation_updates_lock);
655 		_leave(" [nothing]");
656 		return;
657 	}
658 
659 	list_del_init(&vl->update);
660 	atomic_inc(&vl->usage);
661 	spin_unlock(&afs_vlocation_updates_lock);
662 
663 	/* we can now perform the update */
664 	_debug("update %s", vl->vldb.name);
665 	vl->state = AFS_VL_UPDATING;
666 	vl->upd_rej_cnt = 0;
667 	vl->upd_busy_cnt = 0;
668 
669 	ret = afs_vlocation_update_record(vl, NULL, &vldb);
670 	spin_lock(&vl->lock);
671 	switch (ret) {
672 	case 0:
673 		afs_vlocation_apply_update(vl, &vldb);
674 		vl->state = AFS_VL_VALID;
675 		break;
676 	case -ENOMEDIUM:
677 		vl->state = AFS_VL_VOLUME_DELETED;
678 		break;
679 	default:
680 		vl->state = AFS_VL_UNCERTAIN;
681 		break;
682 	}
683 	spin_unlock(&vl->lock);
684 	wake_up(&vl->waitq);
685 
686 	/* and then reschedule */
687 	_debug("reschedule");
688 	vl->update_at = ktime_get_real_seconds() +
689 			afs_vlocation_update_timeout;
690 
691 	spin_lock(&afs_vlocation_updates_lock);
692 
693 	if (!list_empty(&afs_vlocation_updates)) {
694 		/* next update in 10 minutes, but wait at least 1 second more
695 		 * than the newest record already queued so that we don't spam
696 		 * the VL server suddenly with lots of requests
697 		 */
698 		xvl = list_entry(afs_vlocation_updates.prev,
699 				 struct afs_vlocation, update);
700 		if (vl->update_at <= xvl->update_at)
701 			vl->update_at = xvl->update_at + 1;
702 		xvl = list_entry(afs_vlocation_updates.next,
703 				 struct afs_vlocation, update);
704 		timeout = xvl->update_at - now;
705 		if (timeout < 0)
706 			timeout = 0;
707 	} else {
708 		timeout = afs_vlocation_update_timeout;
709 	}
710 
711 	ASSERT(list_empty(&vl->update));
712 
713 	list_add_tail(&vl->update, &afs_vlocation_updates);
714 
715 	_debug("timeout %ld", timeout);
716 	queue_delayed_work(afs_vlocation_update_worker,
717 			   &afs_vlocation_update, timeout * HZ);
718 	spin_unlock(&afs_vlocation_updates_lock);
719 	afs_put_vlocation(vl);
720 }
721