• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * Implementation of cl_page for VVP layer.
37  *
38  *   Author: Nikita Danilov <nikita.danilov@sun.com>
39  *   Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
40  */
41 
42 #define DEBUG_SUBSYSTEM S_LLITE
43 
44 #include "../include/obd.h"
45 #include "../include/lustre_lite.h"
46 
47 #include "vvp_internal.h"
48 
49 /*****************************************************************************
50  *
51  * Page operations.
52  *
53  */
54 
vvp_page_fini_common(struct ccc_page * cp)55 static void vvp_page_fini_common(struct ccc_page *cp)
56 {
57 	struct page *vmpage = cp->cpg_page;
58 
59 	LASSERT(vmpage != NULL);
60 	page_cache_release(vmpage);
61 }
62 
vvp_page_fini(const struct lu_env * env,struct cl_page_slice * slice)63 static void vvp_page_fini(const struct lu_env *env,
64 			  struct cl_page_slice *slice)
65 {
66 	struct ccc_page *cp = cl2ccc_page(slice);
67 	struct page *vmpage  = cp->cpg_page;
68 
69 	/*
70 	 * vmpage->private was already cleared when page was moved into
71 	 * VPG_FREEING state.
72 	 */
73 	LASSERT((struct cl_page *)vmpage->private != slice->cpl_page);
74 	vvp_page_fini_common(cp);
75 }
76 
vvp_page_own(const struct lu_env * env,const struct cl_page_slice * slice,struct cl_io * io,int nonblock)77 static int vvp_page_own(const struct lu_env *env,
78 			const struct cl_page_slice *slice, struct cl_io *io,
79 			int nonblock)
80 {
81 	struct ccc_page *vpg    = cl2ccc_page(slice);
82 	struct page      *vmpage = vpg->cpg_page;
83 
84 	LASSERT(vmpage != NULL);
85 	if (nonblock) {
86 		if (!trylock_page(vmpage))
87 			return -EAGAIN;
88 
89 		if (unlikely(PageWriteback(vmpage))) {
90 			unlock_page(vmpage);
91 			return -EAGAIN;
92 		}
93 
94 		return 0;
95 	}
96 
97 	lock_page(vmpage);
98 	wait_on_page_writeback(vmpage);
99 	return 0;
100 }
101 
vvp_page_assume(const struct lu_env * env,const struct cl_page_slice * slice,struct cl_io * unused)102 static void vvp_page_assume(const struct lu_env *env,
103 			    const struct cl_page_slice *slice,
104 			    struct cl_io *unused)
105 {
106 	struct page *vmpage = cl2vm_page(slice);
107 
108 	LASSERT(vmpage != NULL);
109 	LASSERT(PageLocked(vmpage));
110 	wait_on_page_writeback(vmpage);
111 }
112 
vvp_page_unassume(const struct lu_env * env,const struct cl_page_slice * slice,struct cl_io * unused)113 static void vvp_page_unassume(const struct lu_env *env,
114 			      const struct cl_page_slice *slice,
115 			      struct cl_io *unused)
116 {
117 	struct page *vmpage = cl2vm_page(slice);
118 
119 	LASSERT(vmpage != NULL);
120 	LASSERT(PageLocked(vmpage));
121 }
122 
vvp_page_disown(const struct lu_env * env,const struct cl_page_slice * slice,struct cl_io * io)123 static void vvp_page_disown(const struct lu_env *env,
124 			    const struct cl_page_slice *slice, struct cl_io *io)
125 {
126 	struct page *vmpage = cl2vm_page(slice);
127 
128 	LASSERT(vmpage != NULL);
129 	LASSERT(PageLocked(vmpage));
130 
131 	unlock_page(cl2vm_page(slice));
132 }
133 
vvp_page_discard(const struct lu_env * env,const struct cl_page_slice * slice,struct cl_io * unused)134 static void vvp_page_discard(const struct lu_env *env,
135 			     const struct cl_page_slice *slice,
136 			     struct cl_io *unused)
137 {
138 	struct page	   *vmpage  = cl2vm_page(slice);
139 	struct address_space *mapping;
140 	struct ccc_page      *cpg     = cl2ccc_page(slice);
141 
142 	LASSERT(vmpage != NULL);
143 	LASSERT(PageLocked(vmpage));
144 
145 	mapping = vmpage->mapping;
146 
147 	if (cpg->cpg_defer_uptodate && !cpg->cpg_ra_used)
148 		ll_ra_stats_inc(mapping, RA_STAT_DISCARDED);
149 
150 	/*
151 	 * truncate_complete_page() calls
152 	 * a_ops->invalidatepage()->cl_page_delete()->vvp_page_delete().
153 	 */
154 	truncate_complete_page(mapping, vmpage);
155 }
156 
vvp_page_unmap(const struct lu_env * env,const struct cl_page_slice * slice,struct cl_io * unused)157 static int vvp_page_unmap(const struct lu_env *env,
158 			  const struct cl_page_slice *slice,
159 			  struct cl_io *unused)
160 {
161 	struct page *vmpage = cl2vm_page(slice);
162 	__u64       offset;
163 
164 	LASSERT(vmpage != NULL);
165 	LASSERT(PageLocked(vmpage));
166 
167 	offset = vmpage->index << PAGE_CACHE_SHIFT;
168 
169 	/*
170 	 * XXX is it safe to call this with the page lock held?
171 	 */
172 	ll_teardown_mmaps(vmpage->mapping, offset, offset + PAGE_CACHE_SIZE);
173 	return 0;
174 }
175 
vvp_page_delete(const struct lu_env * env,const struct cl_page_slice * slice)176 static void vvp_page_delete(const struct lu_env *env,
177 			    const struct cl_page_slice *slice)
178 {
179 	struct page       *vmpage = cl2vm_page(slice);
180 	struct inode     *inode  = vmpage->mapping->host;
181 	struct cl_object *obj    = slice->cpl_obj;
182 
183 	LASSERT(PageLocked(vmpage));
184 	LASSERT((struct cl_page *)vmpage->private == slice->cpl_page);
185 	LASSERT(inode == ccc_object_inode(obj));
186 
187 	vvp_write_complete(cl2ccc(obj), cl2ccc_page(slice));
188 	ClearPagePrivate(vmpage);
189 	vmpage->private = 0;
190 	/*
191 	 * Reference from vmpage to cl_page is removed, but the reference back
192 	 * is still here. It is removed later in vvp_page_fini().
193 	 */
194 }
195 
vvp_page_export(const struct lu_env * env,const struct cl_page_slice * slice,int uptodate)196 static void vvp_page_export(const struct lu_env *env,
197 			    const struct cl_page_slice *slice,
198 			    int uptodate)
199 {
200 	struct page *vmpage = cl2vm_page(slice);
201 
202 	LASSERT(vmpage != NULL);
203 	LASSERT(PageLocked(vmpage));
204 	if (uptodate)
205 		SetPageUptodate(vmpage);
206 	else
207 		ClearPageUptodate(vmpage);
208 }
209 
vvp_page_is_vmlocked(const struct lu_env * env,const struct cl_page_slice * slice)210 static int vvp_page_is_vmlocked(const struct lu_env *env,
211 				const struct cl_page_slice *slice)
212 {
213 	return PageLocked(cl2vm_page(slice)) ? -EBUSY : -ENODATA;
214 }
215 
vvp_page_prep_read(const struct lu_env * env,const struct cl_page_slice * slice,struct cl_io * unused)216 static int vvp_page_prep_read(const struct lu_env *env,
217 			      const struct cl_page_slice *slice,
218 			      struct cl_io *unused)
219 {
220 	/* Skip the page already marked as PG_uptodate. */
221 	return PageUptodate(cl2vm_page(slice)) ? -EALREADY : 0;
222 }
223 
vvp_page_prep_write(const struct lu_env * env,const struct cl_page_slice * slice,struct cl_io * unused)224 static int vvp_page_prep_write(const struct lu_env *env,
225 			       const struct cl_page_slice *slice,
226 			       struct cl_io *unused)
227 {
228 	struct page *vmpage = cl2vm_page(slice);
229 	struct cl_page *pg = slice->cpl_page;
230 
231 	LASSERT(PageLocked(vmpage));
232 	LASSERT(!PageDirty(vmpage));
233 
234 	/* ll_writepage path is not a sync write, so need to set page writeback
235 	 * flag */
236 	if (!pg->cp_sync_io)
237 		set_page_writeback(vmpage);
238 
239 	vvp_write_pending(cl2ccc(slice->cpl_obj), cl2ccc_page(slice));
240 
241 	return 0;
242 }
243 
244 /**
245  * Handles page transfer errors at VM level.
246  *
247  * This takes inode as a separate argument, because inode on which error is to
248  * be set can be different from \a vmpage inode in case of direct-io.
249  */
vvp_vmpage_error(struct inode * inode,struct page * vmpage,int ioret)250 static void vvp_vmpage_error(struct inode *inode, struct page *vmpage, int ioret)
251 {
252 	struct ccc_object *obj = cl_inode2ccc(inode);
253 
254 	if (ioret == 0) {
255 		ClearPageError(vmpage);
256 		obj->cob_discard_page_warned = 0;
257 	} else {
258 		SetPageError(vmpage);
259 		if (ioret == -ENOSPC)
260 			set_bit(AS_ENOSPC, &inode->i_mapping->flags);
261 		else
262 			set_bit(AS_EIO, &inode->i_mapping->flags);
263 
264 		if ((ioret == -ESHUTDOWN || ioret == -EINTR) &&
265 		     obj->cob_discard_page_warned == 0) {
266 			obj->cob_discard_page_warned = 1;
267 			ll_dirty_page_discard_warn(vmpage, ioret);
268 		}
269 	}
270 }
271 
vvp_page_completion_read(const struct lu_env * env,const struct cl_page_slice * slice,int ioret)272 static void vvp_page_completion_read(const struct lu_env *env,
273 				     const struct cl_page_slice *slice,
274 				     int ioret)
275 {
276 	struct ccc_page *cp     = cl2ccc_page(slice);
277 	struct page      *vmpage = cp->cpg_page;
278 	struct cl_page  *page   = cl_page_top(slice->cpl_page);
279 	struct inode    *inode  = ccc_object_inode(page->cp_obj);
280 
281 	LASSERT(PageLocked(vmpage));
282 	CL_PAGE_HEADER(D_PAGE, env, page, "completing READ with %d\n", ioret);
283 
284 	if (cp->cpg_defer_uptodate)
285 		ll_ra_count_put(ll_i2sbi(inode), 1);
286 
287 	if (ioret == 0)  {
288 		if (!cp->cpg_defer_uptodate)
289 			cl_page_export(env, page, 1);
290 	} else
291 		cp->cpg_defer_uptodate = 0;
292 
293 	if (page->cp_sync_io == NULL)
294 		unlock_page(vmpage);
295 }
296 
vvp_page_completion_write(const struct lu_env * env,const struct cl_page_slice * slice,int ioret)297 static void vvp_page_completion_write(const struct lu_env *env,
298 				      const struct cl_page_slice *slice,
299 				      int ioret)
300 {
301 	struct ccc_page *cp     = cl2ccc_page(slice);
302 	struct cl_page  *pg     = slice->cpl_page;
303 	struct page      *vmpage = cp->cpg_page;
304 
305 	CL_PAGE_HEADER(D_PAGE, env, pg, "completing WRITE with %d\n", ioret);
306 
307 	/*
308 	 * TODO: Actually it makes sense to add the page into oap pending
309 	 * list again and so that we don't need to take the page out from
310 	 * SoM write pending list, if we just meet a recoverable error,
311 	 * -ENOMEM, etc.
312 	 * To implement this, we just need to return a non zero value in
313 	 * ->cpo_completion method. The underlying transfer should be notified
314 	 * and then re-add the page into pending transfer queue.  -jay
315 	 */
316 
317 	cp->cpg_write_queued = 0;
318 	vvp_write_complete(cl2ccc(slice->cpl_obj), cp);
319 
320 	if (pg->cp_sync_io != NULL) {
321 		LASSERT(PageLocked(vmpage));
322 		LASSERT(!PageWriteback(vmpage));
323 	} else {
324 		LASSERT(PageWriteback(vmpage));
325 		/*
326 		 * Only mark the page error only when it's an async write
327 		 * because applications won't wait for IO to finish.
328 		 */
329 		vvp_vmpage_error(ccc_object_inode(pg->cp_obj), vmpage, ioret);
330 
331 		end_page_writeback(vmpage);
332 	}
333 }
334 
335 /**
336  * Implements cl_page_operations::cpo_make_ready() method.
337  *
338  * This is called to yank a page from the transfer cache and to send it out as
339  * a part of transfer. This function try-locks the page. If try-lock failed,
340  * page is owned by some concurrent IO, and should be skipped (this is bad,
341  * but hopefully rare situation, as it usually results in transfer being
342  * shorter than possible).
343  *
344  * \retval 0      success, page can be placed into transfer
345  *
346  * \retval -EAGAIN page is either used by concurrent IO has been
347  * truncated. Skip it.
348  */
vvp_page_make_ready(const struct lu_env * env,const struct cl_page_slice * slice)349 static int vvp_page_make_ready(const struct lu_env *env,
350 			       const struct cl_page_slice *slice)
351 {
352 	struct page *vmpage = cl2vm_page(slice);
353 	struct cl_page *pg = slice->cpl_page;
354 	int result = 0;
355 
356 	lock_page(vmpage);
357 	if (clear_page_dirty_for_io(vmpage)) {
358 		LASSERT(pg->cp_state == CPS_CACHED);
359 		/* This actually clears the dirty bit in the radix
360 		 * tree. */
361 		set_page_writeback(vmpage);
362 		vvp_write_pending(cl2ccc(slice->cpl_obj),
363 				cl2ccc_page(slice));
364 		CL_PAGE_HEADER(D_PAGE, env, pg, "readied\n");
365 	} else if (pg->cp_state == CPS_PAGEOUT) {
366 		/* is it possible for osc_flush_async_page() to already
367 		 * make it ready? */
368 		result = -EALREADY;
369 	} else {
370 		CL_PAGE_DEBUG(D_ERROR, env, pg, "Unexpecting page state %d.\n",
371 			      pg->cp_state);
372 		LBUG();
373 	}
374 	unlock_page(vmpage);
375 	return result;
376 }
377 
vvp_page_print(const struct lu_env * env,const struct cl_page_slice * slice,void * cookie,lu_printer_t printer)378 static int vvp_page_print(const struct lu_env *env,
379 			  const struct cl_page_slice *slice,
380 			  void *cookie, lu_printer_t printer)
381 {
382 	struct ccc_page *vp = cl2ccc_page(slice);
383 	struct page      *vmpage = vp->cpg_page;
384 
385 	(*printer)(env, cookie, LUSTRE_VVP_NAME "-page@%p(%d:%d:%d) vm@%p ",
386 		   vp, vp->cpg_defer_uptodate, vp->cpg_ra_used,
387 		   vp->cpg_write_queued, vmpage);
388 	if (vmpage != NULL) {
389 		(*printer)(env, cookie, "%lx %d:%d %lx %lu %slru",
390 			   (long)vmpage->flags, page_count(vmpage),
391 			   page_mapcount(vmpage), vmpage->private,
392 			   page_index(vmpage),
393 			   list_empty(&vmpage->lru) ? "not-" : "");
394 	}
395 	(*printer)(env, cookie, "\n");
396 	return 0;
397 }
398 
399 static const struct cl_page_operations vvp_page_ops = {
400 	.cpo_own	   = vvp_page_own,
401 	.cpo_assume	= vvp_page_assume,
402 	.cpo_unassume      = vvp_page_unassume,
403 	.cpo_disown	= vvp_page_disown,
404 	.cpo_vmpage	= ccc_page_vmpage,
405 	.cpo_discard       = vvp_page_discard,
406 	.cpo_delete	= vvp_page_delete,
407 	.cpo_unmap	 = vvp_page_unmap,
408 	.cpo_export	= vvp_page_export,
409 	.cpo_is_vmlocked   = vvp_page_is_vmlocked,
410 	.cpo_fini	  = vvp_page_fini,
411 	.cpo_print	 = vvp_page_print,
412 	.cpo_is_under_lock = ccc_page_is_under_lock,
413 	.io = {
414 		[CRT_READ] = {
415 			.cpo_prep	= vvp_page_prep_read,
416 			.cpo_completion  = vvp_page_completion_read,
417 			.cpo_make_ready  = ccc_fail,
418 		},
419 		[CRT_WRITE] = {
420 			.cpo_prep	= vvp_page_prep_write,
421 			.cpo_completion  = vvp_page_completion_write,
422 			.cpo_make_ready  = vvp_page_make_ready,
423 		}
424 	}
425 };
426 
vvp_transient_page_verify(const struct cl_page * page)427 static void vvp_transient_page_verify(const struct cl_page *page)
428 {
429 	struct inode *inode = ccc_object_inode(page->cp_obj);
430 
431 	LASSERT(!mutex_trylock(&inode->i_mutex));
432 }
433 
vvp_transient_page_own(const struct lu_env * env,const struct cl_page_slice * slice,struct cl_io * unused,int nonblock)434 static int vvp_transient_page_own(const struct lu_env *env,
435 				  const struct cl_page_slice *slice,
436 				  struct cl_io *unused, int nonblock)
437 {
438 	vvp_transient_page_verify(slice->cpl_page);
439 	return 0;
440 }
441 
vvp_transient_page_assume(const struct lu_env * env,const struct cl_page_slice * slice,struct cl_io * unused)442 static void vvp_transient_page_assume(const struct lu_env *env,
443 				      const struct cl_page_slice *slice,
444 				      struct cl_io *unused)
445 {
446 	vvp_transient_page_verify(slice->cpl_page);
447 }
448 
vvp_transient_page_unassume(const struct lu_env * env,const struct cl_page_slice * slice,struct cl_io * unused)449 static void vvp_transient_page_unassume(const struct lu_env *env,
450 					const struct cl_page_slice *slice,
451 					struct cl_io *unused)
452 {
453 	vvp_transient_page_verify(slice->cpl_page);
454 }
455 
vvp_transient_page_disown(const struct lu_env * env,const struct cl_page_slice * slice,struct cl_io * unused)456 static void vvp_transient_page_disown(const struct lu_env *env,
457 				      const struct cl_page_slice *slice,
458 				      struct cl_io *unused)
459 {
460 	vvp_transient_page_verify(slice->cpl_page);
461 }
462 
vvp_transient_page_discard(const struct lu_env * env,const struct cl_page_slice * slice,struct cl_io * unused)463 static void vvp_transient_page_discard(const struct lu_env *env,
464 				       const struct cl_page_slice *slice,
465 				       struct cl_io *unused)
466 {
467 	struct cl_page *page = slice->cpl_page;
468 
469 	vvp_transient_page_verify(slice->cpl_page);
470 
471 	/*
472 	 * For transient pages, remove it from the radix tree.
473 	 */
474 	cl_page_delete(env, page);
475 }
476 
vvp_transient_page_is_vmlocked(const struct lu_env * env,const struct cl_page_slice * slice)477 static int vvp_transient_page_is_vmlocked(const struct lu_env *env,
478 					  const struct cl_page_slice *slice)
479 {
480 	struct inode    *inode = ccc_object_inode(slice->cpl_obj);
481 	int	locked;
482 
483 	locked = !mutex_trylock(&inode->i_mutex);
484 	if (!locked)
485 		mutex_unlock(&inode->i_mutex);
486 	return locked ? -EBUSY : -ENODATA;
487 }
488 
489 static void
vvp_transient_page_completion(const struct lu_env * env,const struct cl_page_slice * slice,int ioret)490 vvp_transient_page_completion(const struct lu_env *env,
491 			      const struct cl_page_slice *slice,
492 			      int ioret)
493 {
494 	vvp_transient_page_verify(slice->cpl_page);
495 }
496 
vvp_transient_page_fini(const struct lu_env * env,struct cl_page_slice * slice)497 static void vvp_transient_page_fini(const struct lu_env *env,
498 				    struct cl_page_slice *slice)
499 {
500 	struct ccc_page *cp = cl2ccc_page(slice);
501 	struct cl_page *clp = slice->cpl_page;
502 	struct ccc_object *clobj = cl2ccc(clp->cp_obj);
503 
504 	vvp_page_fini_common(cp);
505 	LASSERT(!mutex_trylock(&clobj->cob_inode->i_mutex));
506 	clobj->cob_transient_pages--;
507 }
508 
509 static const struct cl_page_operations vvp_transient_page_ops = {
510 	.cpo_own	   = vvp_transient_page_own,
511 	.cpo_assume	= vvp_transient_page_assume,
512 	.cpo_unassume      = vvp_transient_page_unassume,
513 	.cpo_disown	= vvp_transient_page_disown,
514 	.cpo_discard       = vvp_transient_page_discard,
515 	.cpo_vmpage	= ccc_page_vmpage,
516 	.cpo_fini	  = vvp_transient_page_fini,
517 	.cpo_is_vmlocked   = vvp_transient_page_is_vmlocked,
518 	.cpo_print	 = vvp_page_print,
519 	.cpo_is_under_lock = ccc_page_is_under_lock,
520 	.io = {
521 		[CRT_READ] = {
522 			.cpo_prep	= ccc_transient_page_prep,
523 			.cpo_completion  = vvp_transient_page_completion,
524 		},
525 		[CRT_WRITE] = {
526 			.cpo_prep	= ccc_transient_page_prep,
527 			.cpo_completion  = vvp_transient_page_completion,
528 		}
529 	}
530 };
531 
vvp_page_init(const struct lu_env * env,struct cl_object * obj,struct cl_page * page,struct page * vmpage)532 int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
533 		struct cl_page *page, struct page *vmpage)
534 {
535 	struct ccc_page *cpg = cl_object_page_slice(obj, page);
536 
537 	CLOBINVRNT(env, obj, ccc_object_invariant(obj));
538 
539 	cpg->cpg_page = vmpage;
540 	page_cache_get(vmpage);
541 
542 	INIT_LIST_HEAD(&cpg->cpg_pending_linkage);
543 	if (page->cp_type == CPT_CACHEABLE) {
544 		SetPagePrivate(vmpage);
545 		vmpage->private = (unsigned long)page;
546 		cl_page_slice_add(page, &cpg->cpg_cl, obj,
547 				&vvp_page_ops);
548 	} else {
549 		struct ccc_object *clobj = cl2ccc(obj);
550 
551 		LASSERT(!mutex_trylock(&clobj->cob_inode->i_mutex));
552 		cl_page_slice_add(page, &cpg->cpg_cl, obj,
553 				&vvp_transient_page_ops);
554 		clobj->cob_transient_pages++;
555 	}
556 	return 0;
557 }
558