• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2012, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * cl_device and cl_device_type implementation for VVP layer.
37  *
38  *   Author: Nikita Danilov <nikita.danilov@sun.com>
39  */
40 
41 #define DEBUG_SUBSYSTEM S_LLITE
42 
43 #include "../include/obd.h"
44 #include "../include/lustre_lite.h"
45 #include "llite_internal.h"
46 #include "vvp_internal.h"
47 
48 /*****************************************************************************
49  *
50  * Vvp device and device type functions.
51  *
52  */
53 
54 /*
55  * vvp_ prefix stands for "Vfs Vm Posix". It corresponds to historical
56  * "llite_" (var. "ll_") prefix.
57  */
58 
59 static struct kmem_cache *vvp_thread_kmem;
60 static struct kmem_cache *vvp_session_kmem;
61 static struct lu_kmem_descr vvp_caches[] = {
62 	{
63 		.ckd_cache = &vvp_thread_kmem,
64 		.ckd_name  = "vvp_thread_kmem",
65 		.ckd_size  = sizeof(struct vvp_thread_info),
66 	},
67 	{
68 		.ckd_cache = &vvp_session_kmem,
69 		.ckd_name  = "vvp_session_kmem",
70 		.ckd_size  = sizeof(struct vvp_session)
71 	},
72 	{
73 		.ckd_cache = NULL
74 	}
75 };
76 
vvp_key_init(const struct lu_context * ctx,struct lu_context_key * key)77 static void *vvp_key_init(const struct lu_context *ctx,
78 			  struct lu_context_key *key)
79 {
80 	struct vvp_thread_info *info;
81 
82 	info = kmem_cache_alloc(vvp_thread_kmem, GFP_NOFS | __GFP_ZERO);
83 	if (info == NULL)
84 		info = ERR_PTR(-ENOMEM);
85 	return info;
86 }
87 
vvp_key_fini(const struct lu_context * ctx,struct lu_context_key * key,void * data)88 static void vvp_key_fini(const struct lu_context *ctx,
89 			 struct lu_context_key *key, void *data)
90 {
91 	struct vvp_thread_info *info = data;
92 
93 	kmem_cache_free(vvp_thread_kmem, info);
94 }
95 
vvp_session_key_init(const struct lu_context * ctx,struct lu_context_key * key)96 static void *vvp_session_key_init(const struct lu_context *ctx,
97 				  struct lu_context_key *key)
98 {
99 	struct vvp_session *session;
100 
101 	session = kmem_cache_alloc(vvp_session_kmem, GFP_NOFS | __GFP_ZERO);
102 	if (session == NULL)
103 		session = ERR_PTR(-ENOMEM);
104 	return session;
105 }
106 
vvp_session_key_fini(const struct lu_context * ctx,struct lu_context_key * key,void * data)107 static void vvp_session_key_fini(const struct lu_context *ctx,
108 				 struct lu_context_key *key, void *data)
109 {
110 	struct vvp_session *session = data;
111 
112 	kmem_cache_free(vvp_session_kmem, session);
113 }
114 
115 struct lu_context_key vvp_key = {
116 	.lct_tags = LCT_CL_THREAD,
117 	.lct_init = vvp_key_init,
118 	.lct_fini = vvp_key_fini
119 };
120 
121 struct lu_context_key vvp_session_key = {
122 	.lct_tags = LCT_SESSION,
123 	.lct_init = vvp_session_key_init,
124 	.lct_fini = vvp_session_key_fini
125 };
126 
127 /* type constructor/destructor: vvp_type_{init,fini,start,stop}(). */
128 LU_TYPE_INIT_FINI(vvp, &ccc_key, &ccc_session_key, &vvp_key, &vvp_session_key);
129 
130 static const struct lu_device_operations vvp_lu_ops = {
131 	.ldo_object_alloc      = vvp_object_alloc
132 };
133 
134 static const struct cl_device_operations vvp_cl_ops = {
135 	.cdo_req_init = ccc_req_init
136 };
137 
vvp_device_alloc(const struct lu_env * env,struct lu_device_type * t,struct lustre_cfg * cfg)138 static struct lu_device *vvp_device_alloc(const struct lu_env *env,
139 					  struct lu_device_type *t,
140 					  struct lustre_cfg *cfg)
141 {
142 	return ccc_device_alloc(env, t, cfg, &vvp_lu_ops, &vvp_cl_ops);
143 }
144 
145 static const struct lu_device_type_operations vvp_device_type_ops = {
146 	.ldto_init = vvp_type_init,
147 	.ldto_fini = vvp_type_fini,
148 
149 	.ldto_start = vvp_type_start,
150 	.ldto_stop  = vvp_type_stop,
151 
152 	.ldto_device_alloc = vvp_device_alloc,
153 	.ldto_device_free  = ccc_device_free,
154 	.ldto_device_init  = ccc_device_init,
155 	.ldto_device_fini  = ccc_device_fini
156 };
157 
158 struct lu_device_type vvp_device_type = {
159 	.ldt_tags     = LU_DEVICE_CL,
160 	.ldt_name     = LUSTRE_VVP_NAME,
161 	.ldt_ops      = &vvp_device_type_ops,
162 	.ldt_ctx_tags = LCT_CL_THREAD
163 };
164 
165 /**
166  * A mutex serializing calls to vvp_inode_fini() under extreme memory
167  * pressure, when environments cannot be allocated.
168  */
vvp_global_init(void)169 int vvp_global_init(void)
170 {
171 	int result;
172 
173 	result = lu_kmem_init(vvp_caches);
174 	if (result == 0) {
175 		result = ccc_global_init(&vvp_device_type);
176 		if (result != 0)
177 			lu_kmem_fini(vvp_caches);
178 	}
179 	return result;
180 }
181 
vvp_global_fini(void)182 void vvp_global_fini(void)
183 {
184 	ccc_global_fini(&vvp_device_type);
185 	lu_kmem_fini(vvp_caches);
186 }
187 
188 /*****************************************************************************
189  *
190  * mirror obd-devices into cl devices.
191  *
192  */
193 
cl_sb_init(struct super_block * sb)194 int cl_sb_init(struct super_block *sb)
195 {
196 	struct ll_sb_info *sbi;
197 	struct cl_device  *cl;
198 	struct lu_env     *env;
199 	int rc = 0;
200 	int refcheck;
201 
202 	sbi  = ll_s2sbi(sb);
203 	env = cl_env_get(&refcheck);
204 	if (!IS_ERR(env)) {
205 		cl = cl_type_setup(env, NULL, &vvp_device_type,
206 				   sbi->ll_dt_exp->exp_obd->obd_lu_dev);
207 		if (!IS_ERR(cl)) {
208 			cl2ccc_dev(cl)->cdv_sb = sb;
209 			sbi->ll_cl = cl;
210 			sbi->ll_site = cl2lu_dev(cl)->ld_site;
211 		}
212 		cl_env_put(env, &refcheck);
213 	} else
214 		rc = PTR_ERR(env);
215 	return rc;
216 }
217 
cl_sb_fini(struct super_block * sb)218 int cl_sb_fini(struct super_block *sb)
219 {
220 	struct ll_sb_info *sbi;
221 	struct lu_env     *env;
222 	struct cl_device  *cld;
223 	int		refcheck;
224 	int		result;
225 
226 	sbi = ll_s2sbi(sb);
227 	env = cl_env_get(&refcheck);
228 	if (!IS_ERR(env)) {
229 		cld = sbi->ll_cl;
230 
231 		if (cld != NULL) {
232 			cl_stack_fini(env, cld);
233 			sbi->ll_cl = NULL;
234 			sbi->ll_site = NULL;
235 		}
236 		cl_env_put(env, &refcheck);
237 		result = 0;
238 	} else {
239 		CERROR("Cannot cleanup cl-stack due to memory shortage.\n");
240 		result = PTR_ERR(env);
241 	}
242 	/*
243 	 * If mount failed (sbi->ll_cl == NULL), and this there are no other
244 	 * mounts, stop device types manually (this usually happens
245 	 * automatically when last device is destroyed).
246 	 */
247 	lu_types_stop();
248 	return result;
249 }
250 
251 /****************************************************************************
252  *
253  * debugfs/lustre/llite/$MNT/dump_page_cache
254  *
255  ****************************************************************************/
256 
257 /*
258  * To represent contents of a page cache as a byte stream, following
259  * information if encoded in 64bit offset:
260  *
261  *       - file hash bucket in lu_site::ls_hash[]       28bits
262  *
263  *       - how far file is from bucket head	      4bits
264  *
265  *       - page index				   32bits
266  *
267  * First two data identify a file in the cache uniquely.
268  */
269 
270 #define PGC_OBJ_SHIFT (32 + 4)
271 #define PGC_DEPTH_SHIFT (32)
272 
273 struct vvp_pgcache_id {
274 	unsigned		 vpi_bucket;
275 	unsigned		 vpi_depth;
276 	uint32_t		 vpi_index;
277 
278 	unsigned		 vpi_curdep;
279 	struct lu_object_header *vpi_obj;
280 };
281 
vvp_pgcache_id_unpack(loff_t pos,struct vvp_pgcache_id * id)282 static void vvp_pgcache_id_unpack(loff_t pos, struct vvp_pgcache_id *id)
283 {
284 	CLASSERT(sizeof(pos) == sizeof(__u64));
285 
286 	id->vpi_index  = pos & 0xffffffff;
287 	id->vpi_depth  = (pos >> PGC_DEPTH_SHIFT) & 0xf;
288 	id->vpi_bucket = (unsigned long long)pos >> PGC_OBJ_SHIFT;
289 }
290 
vvp_pgcache_id_pack(struct vvp_pgcache_id * id)291 static loff_t vvp_pgcache_id_pack(struct vvp_pgcache_id *id)
292 {
293 	return
294 		((__u64)id->vpi_index) |
295 		((__u64)id->vpi_depth  << PGC_DEPTH_SHIFT) |
296 		((__u64)id->vpi_bucket << PGC_OBJ_SHIFT);
297 }
298 
vvp_pgcache_obj_get(struct cfs_hash * hs,struct cfs_hash_bd * bd,struct hlist_node * hnode,void * data)299 static int vvp_pgcache_obj_get(struct cfs_hash *hs, struct cfs_hash_bd *bd,
300 			       struct hlist_node *hnode, void *data)
301 {
302 	struct vvp_pgcache_id   *id  = data;
303 	struct lu_object_header *hdr = cfs_hash_object(hs, hnode);
304 
305 	if (id->vpi_curdep-- > 0)
306 		return 0; /* continue */
307 
308 	if (lu_object_is_dying(hdr))
309 		return 1;
310 
311 	cfs_hash_get(hs, hnode);
312 	id->vpi_obj = hdr;
313 	return 1;
314 }
315 
vvp_pgcache_obj(const struct lu_env * env,struct lu_device * dev,struct vvp_pgcache_id * id)316 static struct cl_object *vvp_pgcache_obj(const struct lu_env *env,
317 					 struct lu_device *dev,
318 					 struct vvp_pgcache_id *id)
319 {
320 	LASSERT(lu_device_is_cl(dev));
321 
322 	id->vpi_depth &= 0xf;
323 	id->vpi_obj    = NULL;
324 	id->vpi_curdep = id->vpi_depth;
325 
326 	cfs_hash_hlist_for_each(dev->ld_site->ls_obj_hash, id->vpi_bucket,
327 				vvp_pgcache_obj_get, id);
328 	if (id->vpi_obj != NULL) {
329 		struct lu_object *lu_obj;
330 
331 		lu_obj = lu_object_locate(id->vpi_obj, dev->ld_type);
332 		if (lu_obj != NULL) {
333 			lu_object_ref_add(lu_obj, "dump", current);
334 			return lu2cl(lu_obj);
335 		}
336 		lu_object_put(env, lu_object_top(id->vpi_obj));
337 
338 	} else if (id->vpi_curdep > 0) {
339 		id->vpi_depth = 0xf;
340 	}
341 	return NULL;
342 }
343 
vvp_pgcache_find(const struct lu_env * env,struct lu_device * dev,loff_t pos)344 static loff_t vvp_pgcache_find(const struct lu_env *env,
345 			       struct lu_device *dev, loff_t pos)
346 {
347 	struct cl_object     *clob;
348 	struct lu_site       *site;
349 	struct vvp_pgcache_id id;
350 
351 	site = dev->ld_site;
352 	vvp_pgcache_id_unpack(pos, &id);
353 
354 	while (1) {
355 		if (id.vpi_bucket >= CFS_HASH_NHLIST(site->ls_obj_hash))
356 			return ~0ULL;
357 		clob = vvp_pgcache_obj(env, dev, &id);
358 		if (clob != NULL) {
359 			struct cl_object_header *hdr;
360 			int		      nr;
361 			struct cl_page	  *pg;
362 
363 			/* got an object. Find next page. */
364 			hdr = cl_object_header(clob);
365 
366 			spin_lock(&hdr->coh_page_guard);
367 			nr = radix_tree_gang_lookup(&hdr->coh_tree,
368 						    (void **)&pg,
369 						    id.vpi_index, 1);
370 			if (nr > 0) {
371 				id.vpi_index = pg->cp_index;
372 				/* Cant support over 16T file */
373 				nr = !(pg->cp_index > 0xffffffff);
374 			}
375 			spin_unlock(&hdr->coh_page_guard);
376 
377 			lu_object_ref_del(&clob->co_lu, "dump", current);
378 			cl_object_put(env, clob);
379 			if (nr > 0)
380 				return vvp_pgcache_id_pack(&id);
381 		}
382 		/* to the next object. */
383 		++id.vpi_depth;
384 		id.vpi_depth &= 0xf;
385 		if (id.vpi_depth == 0 && ++id.vpi_bucket == 0)
386 			return ~0ULL;
387 		id.vpi_index = 0;
388 	}
389 }
390 
391 #define seq_page_flag(seq, page, flag, has_flags) do {		  \
392 	if (test_bit(PG_##flag, &(page)->flags)) {		  \
393 		seq_printf(seq, "%s"#flag, has_flags ? "|" : "");       \
394 		has_flags = 1;					  \
395 	}							       \
396 } while (0)
397 
vvp_pgcache_page_show(const struct lu_env * env,struct seq_file * seq,struct cl_page * page)398 static void vvp_pgcache_page_show(const struct lu_env *env,
399 				  struct seq_file *seq, struct cl_page *page)
400 {
401 	struct ccc_page *cpg;
402 	struct page      *vmpage;
403 	int	      has_flags;
404 
405 	cpg = cl2ccc_page(cl_page_at(page, &vvp_device_type));
406 	vmpage = cpg->cpg_page;
407 	seq_printf(seq, " %5i | %p %p %s %s %s %s | %p %lu/%u(%p) %lu %u [",
408 		   0 /* gen */,
409 		   cpg, page,
410 		   "none",
411 		   cpg->cpg_write_queued ? "wq" : "- ",
412 		   cpg->cpg_defer_uptodate ? "du" : "- ",
413 		   PageWriteback(vmpage) ? "wb" : "-",
414 		   vmpage, vmpage->mapping->host->i_ino,
415 		   vmpage->mapping->host->i_generation,
416 		   vmpage->mapping->host, vmpage->index,
417 		   page_count(vmpage));
418 	has_flags = 0;
419 	seq_page_flag(seq, vmpage, locked, has_flags);
420 	seq_page_flag(seq, vmpage, error, has_flags);
421 	seq_page_flag(seq, vmpage, referenced, has_flags);
422 	seq_page_flag(seq, vmpage, uptodate, has_flags);
423 	seq_page_flag(seq, vmpage, dirty, has_flags);
424 	seq_page_flag(seq, vmpage, writeback, has_flags);
425 	seq_printf(seq, "%s]\n", has_flags ? "" : "-");
426 }
427 
vvp_pgcache_show(struct seq_file * f,void * v)428 static int vvp_pgcache_show(struct seq_file *f, void *v)
429 {
430 	loff_t		   pos;
431 	struct ll_sb_info       *sbi;
432 	struct cl_object	*clob;
433 	struct lu_env	   *env;
434 	struct cl_page	  *page;
435 	struct cl_object_header *hdr;
436 	struct vvp_pgcache_id    id;
437 	int		      refcheck;
438 	int		      result;
439 
440 	env = cl_env_get(&refcheck);
441 	if (!IS_ERR(env)) {
442 		pos = *(loff_t *) v;
443 		vvp_pgcache_id_unpack(pos, &id);
444 		sbi = f->private;
445 		clob = vvp_pgcache_obj(env, &sbi->ll_cl->cd_lu_dev, &id);
446 		if (clob != NULL) {
447 			hdr = cl_object_header(clob);
448 
449 			spin_lock(&hdr->coh_page_guard);
450 			page = cl_page_lookup(hdr, id.vpi_index);
451 			spin_unlock(&hdr->coh_page_guard);
452 
453 			seq_printf(f, "%8x@"DFID": ",
454 				   id.vpi_index, PFID(&hdr->coh_lu.loh_fid));
455 			if (page != NULL) {
456 				vvp_pgcache_page_show(env, f, page);
457 				cl_page_put(env, page);
458 			} else
459 				seq_puts(f, "missing\n");
460 			lu_object_ref_del(&clob->co_lu, "dump", current);
461 			cl_object_put(env, clob);
462 		} else
463 			seq_printf(f, "%llx missing\n", pos);
464 		cl_env_put(env, &refcheck);
465 		result = 0;
466 	} else
467 		result = PTR_ERR(env);
468 	return result;
469 }
470 
vvp_pgcache_start(struct seq_file * f,loff_t * pos)471 static void *vvp_pgcache_start(struct seq_file *f, loff_t *pos)
472 {
473 	struct ll_sb_info *sbi;
474 	struct lu_env     *env;
475 	int		refcheck;
476 
477 	sbi = f->private;
478 
479 	env = cl_env_get(&refcheck);
480 	if (!IS_ERR(env)) {
481 		sbi = f->private;
482 		if (sbi->ll_site->ls_obj_hash->hs_cur_bits > 64 - PGC_OBJ_SHIFT)
483 			pos = ERR_PTR(-EFBIG);
484 		else {
485 			*pos = vvp_pgcache_find(env, &sbi->ll_cl->cd_lu_dev,
486 						*pos);
487 			if (*pos == ~0ULL)
488 				pos = NULL;
489 		}
490 		cl_env_put(env, &refcheck);
491 	}
492 	return pos;
493 }
494 
vvp_pgcache_next(struct seq_file * f,void * v,loff_t * pos)495 static void *vvp_pgcache_next(struct seq_file *f, void *v, loff_t *pos)
496 {
497 	struct ll_sb_info *sbi;
498 	struct lu_env     *env;
499 	int		refcheck;
500 
501 	env = cl_env_get(&refcheck);
502 	if (!IS_ERR(env)) {
503 		sbi = f->private;
504 		*pos = vvp_pgcache_find(env, &sbi->ll_cl->cd_lu_dev, *pos + 1);
505 		if (*pos == ~0ULL)
506 			pos = NULL;
507 		cl_env_put(env, &refcheck);
508 	}
509 	return pos;
510 }
511 
vvp_pgcache_stop(struct seq_file * f,void * v)512 static void vvp_pgcache_stop(struct seq_file *f, void *v)
513 {
514 	/* Nothing to do */
515 }
516 
517 static const struct seq_operations vvp_pgcache_ops = {
518 	.start = vvp_pgcache_start,
519 	.next  = vvp_pgcache_next,
520 	.stop  = vvp_pgcache_stop,
521 	.show  = vvp_pgcache_show
522 };
523 
vvp_dump_pgcache_seq_open(struct inode * inode,struct file * filp)524 static int vvp_dump_pgcache_seq_open(struct inode *inode, struct file *filp)
525 {
526 	struct seq_file *seq;
527 	int rc;
528 
529 	rc = seq_open(filp, &vvp_pgcache_ops);
530 	if (rc)
531 		return rc;
532 
533 	seq = filp->private_data;
534 	seq->private = inode->i_private;
535 
536 	return 0;
537 }
538 
539 const struct file_operations vvp_dump_pgcache_file_ops = {
540 	.owner   = THIS_MODULE,
541 	.open    = vvp_dump_pgcache_seq_open,
542 	.read    = seq_read,
543 	.llseek	 = seq_lseek,
544 	.release = seq_release,
545 };
546