• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/llite/dir.c
37  *
38  * Directory code for lustre client.
39  */
40 
41 #include <linux/fs.h>
42 #include <linux/pagemap.h>
43 #include <linux/mm.h>
44 #include <linux/uaccess.h>
45 #include <linux/buffer_head.h>   /* for wait_on_buffer */
46 #include <linux/pagevec.h>
47 #include <linux/prefetch.h>
48 
49 #define DEBUG_SUBSYSTEM S_LLITE
50 
51 #include "../include/obd_support.h"
52 #include "../include/obd_class.h"
53 #include "../include/lustre_lib.h"
54 #include "../include/lustre/lustre_idl.h"
55 #include "../include/lustre_lite.h"
56 #include "../include/lustre_dlm.h"
57 #include "../include/lustre_fid.h"
58 #include "llite_internal.h"
59 
60 /*
61  * (new) readdir implementation overview.
62  *
63  * Original lustre readdir implementation cached exact copy of raw directory
64  * pages on the client. These pages were indexed in client page cache by
65  * logical offset in the directory file. This design, while very simple and
66  * intuitive had some inherent problems:
67  *
68  *     . it implies that byte offset to the directory entry serves as a
69  *     telldir(3)/seekdir(3) cookie, but that offset is not stable: in
70  *     ext3/htree directory entries may move due to splits, and more
71  *     importantly,
72  *
73  *     . it is incompatible with the design of split directories for cmd3,
74  *     that assumes that names are distributed across nodes based on their
75  *     hash, and so readdir should be done in hash order.
76  *
77  * New readdir implementation does readdir in hash order, and uses hash of a
78  * file name as a telldir/seekdir cookie. This led to number of complications:
79  *
80  *     . hash is not unique, so it cannot be used to index cached directory
81  *     pages on the client (note, that it requires a whole pageful of hash
82  *     collided entries to cause two pages to have identical hashes);
83  *
84  *     . hash is not unique, so it cannot, strictly speaking, be used as an
85  *     entry cookie. ext3/htree has the same problem and lustre implementation
86  *     mimics their solution: seekdir(hash) positions directory at the first
87  *     entry with the given hash.
88  *
89  * Client side.
90  *
91  * 0. caching
92  *
93  * Client caches directory pages using hash of the first entry as an index. As
94  * noted above hash is not unique, so this solution doesn't work as is:
95  * special processing is needed for "page hash chains" (i.e., sequences of
96  * pages filled with entries all having the same hash value).
97  *
98  * First, such chains have to be detected. To this end, server returns to the
99  * client the hash of the first entry on the page next to one returned. When
100  * client detects that this hash is the same as hash of the first entry on the
101  * returned page, page hash collision has to be handled. Pages in the
102  * hash chain, except first one, are termed "overflow pages".
103  *
104  * Solution to index uniqueness problem is to not cache overflow
105  * pages. Instead, when page hash collision is detected, all overflow pages
106  * from emerging chain are immediately requested from the server and placed in
107  * a special data structure (struct ll_dir_chain). This data structure is used
108  * by ll_readdir() to process entries from overflow pages. When readdir
109  * invocation finishes, overflow pages are discarded. If page hash collision
110  * chain weren't completely processed, next call to readdir will again detect
111  * page hash collision, again read overflow pages in, process next portion of
112  * entries and again discard the pages. This is not as wasteful as it looks,
113  * because, given reasonable hash, page hash collisions are extremely rare.
114  *
115  * 1. directory positioning
116  *
117  * When seekdir(hash) is called, original
118  *
119  *
120  *
121  *
122  *
123  *
124  *
125  *
126  * Server.
127  *
128  * identification of and access to overflow pages
129  *
130  * page format
131  *
132  * Page in MDS_READPAGE RPC is packed in LU_PAGE_SIZE, and each page contains
133  * a header lu_dirpage which describes the start/end hash, and whether this
134  * page is empty (contains no dir entry) or hash collide with next page.
135  * After client receives reply, several pages will be integrated into dir page
136  * in PAGE_CACHE_SIZE (if PAGE_CACHE_SIZE greater than LU_PAGE_SIZE), and the
137  * lu_dirpage for this integrated page will be adjusted. See
138  * lmv_adjust_dirpages().
139  *
140  */
141 
142 /* returns the page unlocked, but with a reference */
ll_dir_filler(void * _hash,struct page * page0)143 static int ll_dir_filler(void *_hash, struct page *page0)
144 {
145 	struct inode *inode = page0->mapping->host;
146 	int hash64 = ll_i2sbi(inode)->ll_flags & LL_SBI_64BIT_HASH;
147 	struct obd_export *exp = ll_i2sbi(inode)->ll_md_exp;
148 	struct ptlrpc_request *request;
149 	struct mdt_body *body;
150 	struct md_op_data *op_data;
151 	__u64 hash = *((__u64 *)_hash);
152 	struct page **page_pool;
153 	struct page *page;
154 	struct lu_dirpage *dp;
155 	int max_pages = ll_i2sbi(inode)->ll_md_brw_size >> PAGE_CACHE_SHIFT;
156 	int nrdpgs = 0; /* number of pages read actually */
157 	int npages;
158 	int i;
159 	int rc;
160 
161 	CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) hash %llu\n",
162 	       inode->i_ino, inode->i_generation, inode, hash);
163 
164 	LASSERT(max_pages > 0 && max_pages <= MD_MAX_BRW_PAGES);
165 
166 	page_pool = kcalloc(max_pages, sizeof(page), GFP_NOFS);
167 	if (page_pool) {
168 		page_pool[0] = page0;
169 	} else {
170 		page_pool = &page0;
171 		max_pages = 1;
172 	}
173 	for (npages = 1; npages < max_pages; npages++) {
174 		page = page_cache_alloc_cold(inode->i_mapping);
175 		if (!page)
176 			break;
177 		page_pool[npages] = page;
178 	}
179 
180 	op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
181 				     LUSTRE_OPC_ANY, NULL);
182 	op_data->op_npages = npages;
183 	op_data->op_offset = hash;
184 	rc = md_readpage(exp, op_data, page_pool, &request);
185 	ll_finish_md_op_data(op_data);
186 	if (rc < 0) {
187 		/* page0 is special, which was added into page cache early */
188 		delete_from_page_cache(page0);
189 	} else if (rc == 0) {
190 		body = req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY);
191 		/* Checked by mdc_readpage() */
192 		LASSERT(body != NULL);
193 
194 		if (body->valid & OBD_MD_FLSIZE)
195 			cl_isize_write(inode, body->size);
196 
197 		nrdpgs = (request->rq_bulk->bd_nob_transferred+PAGE_CACHE_SIZE-1)
198 			 >> PAGE_CACHE_SHIFT;
199 		SetPageUptodate(page0);
200 	}
201 	unlock_page(page0);
202 	ptlrpc_req_finished(request);
203 
204 	CDEBUG(D_VFSTRACE, "read %d/%d pages\n", nrdpgs, npages);
205 
206 	for (i = 1; i < npages; i++) {
207 		unsigned long offset;
208 		int ret;
209 
210 		page = page_pool[i];
211 
212 		if (rc < 0 || i >= nrdpgs) {
213 			page_cache_release(page);
214 			continue;
215 		}
216 
217 		SetPageUptodate(page);
218 
219 		dp = kmap(page);
220 		hash = le64_to_cpu(dp->ldp_hash_start);
221 		kunmap(page);
222 
223 		offset = hash_x_index(hash, hash64);
224 
225 		prefetchw(&page->flags);
226 		ret = add_to_page_cache_lru(page, inode->i_mapping, offset,
227 					    GFP_NOFS);
228 		if (ret == 0) {
229 			unlock_page(page);
230 		} else {
231 			CDEBUG(D_VFSTRACE, "page %lu add to page cache failed: %d\n",
232 			       offset, ret);
233 		}
234 		page_cache_release(page);
235 	}
236 
237 	if (page_pool != &page0)
238 		kfree(page_pool);
239 	return rc;
240 }
241 
ll_check_page(struct inode * dir,struct page * page)242 static void ll_check_page(struct inode *dir, struct page *page)
243 {
244 	/* XXX: check page format later */
245 	SetPageChecked(page);
246 }
247 
ll_release_page(struct page * page,int remove)248 void ll_release_page(struct page *page, int remove)
249 {
250 	kunmap(page);
251 	if (remove) {
252 		lock_page(page);
253 		if (likely(page->mapping != NULL))
254 			truncate_complete_page(page->mapping, page);
255 		unlock_page(page);
256 	}
257 	page_cache_release(page);
258 }
259 
260 /*
261  * Find, kmap and return page that contains given hash.
262  */
ll_dir_page_locate(struct inode * dir,__u64 * hash,__u64 * start,__u64 * end)263 static struct page *ll_dir_page_locate(struct inode *dir, __u64 *hash,
264 				       __u64 *start, __u64 *end)
265 {
266 	int hash64 = ll_i2sbi(dir)->ll_flags & LL_SBI_64BIT_HASH;
267 	struct address_space *mapping = dir->i_mapping;
268 	/*
269 	 * Complement of hash is used as an index so that
270 	 * radix_tree_gang_lookup() can be used to find a page with starting
271 	 * hash _smaller_ than one we are looking for.
272 	 */
273 	unsigned long offset = hash_x_index(*hash, hash64);
274 	struct page *page;
275 	int found;
276 
277 	spin_lock_irq(&mapping->tree_lock);
278 	found = radix_tree_gang_lookup(&mapping->page_tree,
279 				       (void **)&page, offset, 1);
280 	if (found > 0 && !radix_tree_exceptional_entry(page)) {
281 		struct lu_dirpage *dp;
282 
283 		page_cache_get(page);
284 		spin_unlock_irq(&mapping->tree_lock);
285 		/*
286 		 * In contrast to find_lock_page() we are sure that directory
287 		 * page cannot be truncated (while DLM lock is held) and,
288 		 * hence, can avoid restart.
289 		 *
290 		 * In fact, page cannot be locked here at all, because
291 		 * ll_dir_filler() does synchronous io.
292 		 */
293 		wait_on_page_locked(page);
294 		if (PageUptodate(page)) {
295 			dp = kmap(page);
296 			if (BITS_PER_LONG == 32 && hash64) {
297 				*start = le64_to_cpu(dp->ldp_hash_start) >> 32;
298 				*end   = le64_to_cpu(dp->ldp_hash_end) >> 32;
299 				*hash  = *hash >> 32;
300 			} else {
301 				*start = le64_to_cpu(dp->ldp_hash_start);
302 				*end   = le64_to_cpu(dp->ldp_hash_end);
303 			}
304 			LASSERTF(*start <= *hash, "start = %#llx,end = %#llx,hash = %#llx\n",
305 				 *start, *end, *hash);
306 			CDEBUG(D_VFSTRACE, "page %lu [%llu %llu], hash %llu\n",
307 			       offset, *start, *end, *hash);
308 			if (*hash > *end) {
309 				ll_release_page(page, 0);
310 				page = NULL;
311 			} else if (*end != *start && *hash == *end) {
312 				/*
313 				 * upon hash collision, remove this page,
314 				 * otherwise put page reference, and
315 				 * ll_get_dir_page() will issue RPC to fetch
316 				 * the page we want.
317 				 */
318 				ll_release_page(page,
319 				    le32_to_cpu(dp->ldp_flags) & LDF_COLLIDE);
320 				page = NULL;
321 			}
322 		} else {
323 			page_cache_release(page);
324 			page = ERR_PTR(-EIO);
325 		}
326 
327 	} else {
328 		spin_unlock_irq(&mapping->tree_lock);
329 		page = NULL;
330 	}
331 	return page;
332 }
333 
ll_get_dir_page(struct inode * dir,__u64 hash,struct ll_dir_chain * chain)334 struct page *ll_get_dir_page(struct inode *dir, __u64 hash,
335 			     struct ll_dir_chain *chain)
336 {
337 	ldlm_policy_data_t policy = {.l_inodebits = {MDS_INODELOCK_UPDATE} };
338 	struct address_space *mapping = dir->i_mapping;
339 	struct lustre_handle lockh;
340 	struct lu_dirpage *dp;
341 	struct page *page;
342 	ldlm_mode_t mode;
343 	int rc;
344 	__u64 start = 0;
345 	__u64 end = 0;
346 	__u64 lhash = hash;
347 	struct ll_inode_info *lli = ll_i2info(dir);
348 	int hash64 = ll_i2sbi(dir)->ll_flags & LL_SBI_64BIT_HASH;
349 
350 	mode = LCK_PR;
351 	rc = md_lock_match(ll_i2sbi(dir)->ll_md_exp, LDLM_FL_BLOCK_GRANTED,
352 			   ll_inode2fid(dir), LDLM_IBITS, &policy, mode, &lockh);
353 	if (!rc) {
354 		struct ldlm_enqueue_info einfo = {
355 			.ei_type = LDLM_IBITS,
356 			.ei_mode = mode,
357 			.ei_cb_bl = ll_md_blocking_ast,
358 			.ei_cb_cp = ldlm_completion_ast,
359 		};
360 		struct lookup_intent it = { .it_op = IT_READDIR };
361 		struct ptlrpc_request *request;
362 		struct md_op_data *op_data;
363 
364 		op_data = ll_prep_md_op_data(NULL, dir, dir, NULL, 0, 0,
365 		LUSTRE_OPC_ANY, NULL);
366 		if (IS_ERR(op_data))
367 			return (void *)op_data;
368 
369 		rc = md_enqueue(ll_i2sbi(dir)->ll_md_exp, &einfo, &it,
370 				op_data, &lockh, NULL, 0, NULL, 0);
371 
372 		ll_finish_md_op_data(op_data);
373 
374 		request = (struct ptlrpc_request *)it.d.lustre.it_data;
375 		if (request)
376 			ptlrpc_req_finished(request);
377 		if (rc < 0) {
378 			CERROR("lock enqueue: "DFID" at %llu: rc %d\n",
379 				PFID(ll_inode2fid(dir)), hash, rc);
380 			return ERR_PTR(rc);
381 		}
382 
383 		CDEBUG(D_INODE, "setting lr_lvb_inode to inode %p (%lu/%u)\n",
384 		       dir, dir->i_ino, dir->i_generation);
385 		md_set_lock_data(ll_i2sbi(dir)->ll_md_exp,
386 				 &it.d.lustre.it_lock_handle, dir, NULL);
387 	} else {
388 		/* for cross-ref object, l_ast_data of the lock may not be set,
389 		 * we reset it here */
390 		md_set_lock_data(ll_i2sbi(dir)->ll_md_exp, &lockh.cookie,
391 				 dir, NULL);
392 	}
393 	ldlm_lock_dump_handle(D_OTHER, &lockh);
394 
395 	mutex_lock(&lli->lli_readdir_mutex);
396 	page = ll_dir_page_locate(dir, &lhash, &start, &end);
397 	if (IS_ERR(page)) {
398 		CERROR("dir page locate: "DFID" at %llu: rc %ld\n",
399 		       PFID(ll_inode2fid(dir)), lhash, PTR_ERR(page));
400 		goto out_unlock;
401 	} else if (page != NULL) {
402 		/*
403 		 * XXX nikita: not entirely correct handling of a corner case:
404 		 * suppose hash chain of entries with hash value HASH crosses
405 		 * border between pages P0 and P1. First both P0 and P1 are
406 		 * cached, seekdir() is called for some entry from the P0 part
407 		 * of the chain. Later P0 goes out of cache. telldir(HASH)
408 		 * happens and finds P1, as it starts with matching hash
409 		 * value. Remaining entries from P0 part of the chain are
410 		 * skipped. (Is that really a bug?)
411 		 *
412 		 * Possible solutions: 0. don't cache P1 is such case, handle
413 		 * it as an "overflow" page. 1. invalidate all pages at
414 		 * once. 2. use HASH|1 as an index for P1.
415 		 */
416 		goto hash_collision;
417 	}
418 
419 	page = read_cache_page(mapping, hash_x_index(hash, hash64),
420 			       ll_dir_filler, &lhash);
421 	if (IS_ERR(page)) {
422 		CERROR("read cache page: "DFID" at %llu: rc %ld\n",
423 		       PFID(ll_inode2fid(dir)), hash, PTR_ERR(page));
424 		goto out_unlock;
425 	}
426 
427 	wait_on_page_locked(page);
428 	(void)kmap(page);
429 	if (!PageUptodate(page)) {
430 		CERROR("page not updated: "DFID" at %llu: rc %d\n",
431 		       PFID(ll_inode2fid(dir)), hash, -5);
432 		goto fail;
433 	}
434 	if (!PageChecked(page))
435 		ll_check_page(dir, page);
436 	if (PageError(page)) {
437 		CERROR("page error: "DFID" at %llu: rc %d\n",
438 		       PFID(ll_inode2fid(dir)), hash, -5);
439 		goto fail;
440 	}
441 hash_collision:
442 	dp = page_address(page);
443 	if (BITS_PER_LONG == 32 && hash64) {
444 		start = le64_to_cpu(dp->ldp_hash_start) >> 32;
445 		end   = le64_to_cpu(dp->ldp_hash_end) >> 32;
446 		lhash = hash >> 32;
447 	} else {
448 		start = le64_to_cpu(dp->ldp_hash_start);
449 		end   = le64_to_cpu(dp->ldp_hash_end);
450 		lhash = hash;
451 	}
452 	if (end == start) {
453 		LASSERT(start == lhash);
454 		CWARN("Page-wide hash collision: %llu\n", end);
455 		if (BITS_PER_LONG == 32 && hash64)
456 			CWARN("Real page-wide hash collision at [%llu %llu] with hash %llu\n",
457 			      le64_to_cpu(dp->ldp_hash_start),
458 			      le64_to_cpu(dp->ldp_hash_end), hash);
459 		/*
460 		 * Fetch whole overflow chain...
461 		 *
462 		 * XXX not yet.
463 		 */
464 		goto fail;
465 	}
466 out_unlock:
467 	mutex_unlock(&lli->lli_readdir_mutex);
468 	ldlm_lock_decref(&lockh, mode);
469 	return page;
470 
471 fail:
472 	ll_release_page(page, 1);
473 	page = ERR_PTR(-EIO);
474 	goto out_unlock;
475 }
476 
ll_dir_read(struct inode * inode,struct dir_context * ctx)477 int ll_dir_read(struct inode *inode, struct dir_context *ctx)
478 {
479 	struct ll_inode_info *info       = ll_i2info(inode);
480 	struct ll_sb_info    *sbi	= ll_i2sbi(inode);
481 	__u64		   pos		= ctx->pos;
482 	int		   api32      = ll_need_32bit_api(sbi);
483 	int		   hash64     = sbi->ll_flags & LL_SBI_64BIT_HASH;
484 	struct page	  *page;
485 	struct ll_dir_chain   chain;
486 	int		   done = 0;
487 	int		   rc = 0;
488 
489 	ll_dir_chain_init(&chain);
490 
491 	page = ll_get_dir_page(inode, pos, &chain);
492 
493 	while (rc == 0 && !done) {
494 		struct lu_dirpage *dp;
495 		struct lu_dirent  *ent;
496 
497 		if (!IS_ERR(page)) {
498 			/*
499 			 * If page is empty (end of directory is reached),
500 			 * use this value.
501 			 */
502 			__u64 hash = MDS_DIR_END_OFF;
503 			__u64 next;
504 
505 			dp = page_address(page);
506 			for (ent = lu_dirent_start(dp); ent != NULL && !done;
507 			     ent = lu_dirent_next(ent)) {
508 				__u16	  type;
509 				int	    namelen;
510 				struct lu_fid  fid;
511 				__u64	  lhash;
512 				__u64	  ino;
513 
514 				/*
515 				 * XXX: implement correct swabbing here.
516 				 */
517 
518 				hash = le64_to_cpu(ent->lde_hash);
519 				if (hash < pos)
520 					/*
521 					 * Skip until we find target hash
522 					 * value.
523 					 */
524 					continue;
525 
526 				namelen = le16_to_cpu(ent->lde_namelen);
527 				if (namelen == 0)
528 					/*
529 					 * Skip dummy record.
530 					 */
531 					continue;
532 
533 				if (api32 && hash64)
534 					lhash = hash >> 32;
535 				else
536 					lhash = hash;
537 				fid_le_to_cpu(&fid, &ent->lde_fid);
538 				ino = cl_fid_build_ino(&fid, api32);
539 				type = ll_dirent_type_get(ent);
540 				ctx->pos = lhash;
541 				/* For 'll_nfs_get_name_filldir()', it will try
542 				 * to access the 'ent' through its 'lde_name',
543 				 * so the parameter 'name' for 'ctx->actor()'
544 				 * must be part of the 'ent'.
545 				 */
546 				done = !dir_emit(ctx, ent->lde_name,
547 						 namelen, ino, type);
548 			}
549 			next = le64_to_cpu(dp->ldp_hash_end);
550 			if (!done) {
551 				pos = next;
552 				if (pos == MDS_DIR_END_OFF) {
553 					/*
554 					 * End of directory reached.
555 					 */
556 					done = 1;
557 					ll_release_page(page, 0);
558 				} else if (1 /* chain is exhausted*/) {
559 					/*
560 					 * Normal case: continue to the next
561 					 * page.
562 					 */
563 					ll_release_page(page,
564 					    le32_to_cpu(dp->ldp_flags) &
565 							LDF_COLLIDE);
566 					next = pos;
567 					page = ll_get_dir_page(inode, pos,
568 							       &chain);
569 				} else {
570 					/*
571 					 * go into overflow page.
572 					 */
573 					LASSERT(le32_to_cpu(dp->ldp_flags) &
574 						LDF_COLLIDE);
575 					ll_release_page(page, 1);
576 				}
577 			} else {
578 				pos = hash;
579 				ll_release_page(page, 0);
580 			}
581 		} else {
582 			rc = PTR_ERR(page);
583 			CERROR("error reading dir "DFID" at %lu: rc %d\n",
584 			       PFID(&info->lli_fid), (unsigned long)pos, rc);
585 		}
586 	}
587 
588 	ctx->pos = pos;
589 	ll_dir_chain_fini(&chain);
590 	return rc;
591 }
592 
ll_readdir(struct file * filp,struct dir_context * ctx)593 static int ll_readdir(struct file *filp, struct dir_context *ctx)
594 {
595 	struct inode		*inode	= file_inode(filp);
596 	struct ll_file_data	*lfd	= LUSTRE_FPRIVATE(filp);
597 	struct ll_sb_info	*sbi	= ll_i2sbi(inode);
598 	int			hash64	= sbi->ll_flags & LL_SBI_64BIT_HASH;
599 	int			api32	= ll_need_32bit_api(sbi);
600 	int			rc;
601 
602 	CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) pos %lu/%llu 32bit_api %d\n",
603 	       inode->i_ino, inode->i_generation,
604 	       inode, (unsigned long)lfd->lfd_pos, i_size_read(inode), api32);
605 
606 	if (lfd->lfd_pos == MDS_DIR_END_OFF) {
607 		/*
608 		 * end-of-file.
609 		 */
610 		rc = 0;
611 		goto out;
612 	}
613 
614 	ctx->pos = lfd->lfd_pos;
615 	rc = ll_dir_read(inode, ctx);
616 	lfd->lfd_pos = ctx->pos;
617 	if (ctx->pos == MDS_DIR_END_OFF) {
618 		if (api32)
619 			ctx->pos = LL_DIR_END_OFF_32BIT;
620 		else
621 			ctx->pos = LL_DIR_END_OFF;
622 	} else {
623 		if (api32 && hash64)
624 			ctx->pos >>= 32;
625 	}
626 	filp->f_version = inode->i_version;
627 
628 out:
629 	if (!rc)
630 		ll_stats_ops_tally(sbi, LPROC_LL_READDIR, 1);
631 
632 	return rc;
633 }
634 
ll_send_mgc_param(struct obd_export * mgc,char * string)635 static int ll_send_mgc_param(struct obd_export *mgc, char *string)
636 {
637 	struct mgs_send_param *msp;
638 	int rc = 0;
639 
640 	msp = kzalloc(sizeof(*msp), GFP_NOFS);
641 	if (!msp)
642 		return -ENOMEM;
643 
644 	strlcpy(msp->mgs_param, string, sizeof(msp->mgs_param));
645 	rc = obd_set_info_async(NULL, mgc, sizeof(KEY_SET_INFO), KEY_SET_INFO,
646 				sizeof(struct mgs_send_param), msp, NULL);
647 	if (rc)
648 		CERROR("Failed to set parameter: %d\n", rc);
649 	kfree(msp);
650 
651 	return rc;
652 }
653 
ll_dir_setdirstripe(struct inode * dir,struct lmv_user_md * lump,char * filename)654 static int ll_dir_setdirstripe(struct inode *dir, struct lmv_user_md *lump,
655 			       char *filename)
656 {
657 	struct ptlrpc_request *request = NULL;
658 	struct md_op_data *op_data;
659 	struct ll_sb_info *sbi = ll_i2sbi(dir);
660 	int mode;
661 	int err;
662 
663 	mode = (~current_umask() & 0755) | S_IFDIR;
664 	op_data = ll_prep_md_op_data(NULL, dir, NULL, filename,
665 				     strlen(filename), mode, LUSTRE_OPC_MKDIR,
666 				     lump);
667 	if (IS_ERR(op_data)) {
668 		err = PTR_ERR(op_data);
669 		goto err_exit;
670 	}
671 
672 	op_data->op_cli_flags |= CLI_SET_MEA;
673 	err = md_create(sbi->ll_md_exp, op_data, lump, sizeof(*lump), mode,
674 			from_kuid(&init_user_ns, current_fsuid()),
675 			from_kgid(&init_user_ns, current_fsgid()),
676 			cfs_curproc_cap_pack(), 0, &request);
677 	ll_finish_md_op_data(op_data);
678 	if (err)
679 		goto err_exit;
680 err_exit:
681 	ptlrpc_req_finished(request);
682 	return err;
683 }
684 
ll_dir_setstripe(struct inode * inode,struct lov_user_md * lump,int set_default)685 int ll_dir_setstripe(struct inode *inode, struct lov_user_md *lump,
686 		     int set_default)
687 {
688 	struct ll_sb_info *sbi = ll_i2sbi(inode);
689 	struct md_op_data *op_data;
690 	struct ptlrpc_request *req = NULL;
691 	int rc = 0;
692 	struct lustre_sb_info *lsi = s2lsi(inode->i_sb);
693 	struct obd_device *mgc = lsi->lsi_mgc;
694 	int lum_size;
695 
696 	if (lump != NULL) {
697 		/*
698 		 * This is coming from userspace, so should be in
699 		 * local endian.  But the MDS would like it in little
700 		 * endian, so we swab it before we send it.
701 		 */
702 		switch (lump->lmm_magic) {
703 		case LOV_USER_MAGIC_V1: {
704 			if (lump->lmm_magic != cpu_to_le32(LOV_USER_MAGIC_V1))
705 				lustre_swab_lov_user_md_v1(lump);
706 			lum_size = sizeof(struct lov_user_md_v1);
707 			break;
708 		}
709 		case LOV_USER_MAGIC_V3: {
710 			if (lump->lmm_magic != cpu_to_le32(LOV_USER_MAGIC_V3))
711 				lustre_swab_lov_user_md_v3(
712 					(struct lov_user_md_v3 *)lump);
713 			lum_size = sizeof(struct lov_user_md_v3);
714 			break;
715 		}
716 		default: {
717 			CDEBUG(D_IOCTL, "bad userland LOV MAGIC: %#08x != %#08x nor %#08x\n",
718 			       lump->lmm_magic, LOV_USER_MAGIC_V1,
719 			       LOV_USER_MAGIC_V3);
720 			return -EINVAL;
721 		}
722 		}
723 	} else {
724 		lum_size = sizeof(struct lov_user_md_v1);
725 	}
726 
727 	op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
728 				     LUSTRE_OPC_ANY, NULL);
729 	if (IS_ERR(op_data))
730 		return PTR_ERR(op_data);
731 
732 	if (lump != NULL && lump->lmm_magic == cpu_to_le32(LMV_USER_MAGIC))
733 		op_data->op_cli_flags |= CLI_SET_MEA;
734 
735 	/* swabbing is done in lov_setstripe() on server side */
736 	rc = md_setattr(sbi->ll_md_exp, op_data, lump, lum_size,
737 			NULL, 0, &req, NULL);
738 	ll_finish_md_op_data(op_data);
739 	ptlrpc_req_finished(req);
740 	if (rc) {
741 		if (rc != -EPERM && rc != -EACCES)
742 			CERROR("mdc_setattr fails: rc = %d\n", rc);
743 	}
744 
745 	/* In the following we use the fact that LOV_USER_MAGIC_V1 and
746 	 LOV_USER_MAGIC_V3 have the same initial fields so we do not
747 	 need to make the distinction between the 2 versions */
748 	if (set_default && mgc->u.cli.cl_mgc_mgsexp) {
749 		char *param = NULL;
750 		char *buf;
751 
752 		param = kzalloc(MGS_PARAM_MAXLEN, GFP_NOFS);
753 		if (!param)
754 			return -ENOMEM;
755 
756 		buf = param;
757 		/* Get fsname and assume devname to be -MDT0000. */
758 		ll_get_fsname(inode->i_sb, buf, MTI_NAME_MAXLEN);
759 		strcat(buf, "-MDT0000.lov");
760 		buf += strlen(buf);
761 
762 		/* Set root stripesize */
763 		sprintf(buf, ".stripesize=%u",
764 			lump ? le32_to_cpu(lump->lmm_stripe_size) : 0);
765 		rc = ll_send_mgc_param(mgc->u.cli.cl_mgc_mgsexp, param);
766 		if (rc)
767 			goto end;
768 
769 		/* Set root stripecount */
770 		sprintf(buf, ".stripecount=%hd",
771 			lump ? le16_to_cpu(lump->lmm_stripe_count) : 0);
772 		rc = ll_send_mgc_param(mgc->u.cli.cl_mgc_mgsexp, param);
773 		if (rc)
774 			goto end;
775 
776 		/* Set root stripeoffset */
777 		sprintf(buf, ".stripeoffset=%hd",
778 			lump ? le16_to_cpu(lump->lmm_stripe_offset) :
779 			(typeof(lump->lmm_stripe_offset))(-1));
780 		rc = ll_send_mgc_param(mgc->u.cli.cl_mgc_mgsexp, param);
781 
782 end:
783 		kfree(param);
784 	}
785 	return rc;
786 }
787 
ll_dir_getstripe(struct inode * inode,struct lov_mds_md ** lmmp,int * lmm_size,struct ptlrpc_request ** request)788 int ll_dir_getstripe(struct inode *inode, struct lov_mds_md **lmmp,
789 		     int *lmm_size, struct ptlrpc_request **request)
790 {
791 	struct ll_sb_info *sbi = ll_i2sbi(inode);
792 	struct mdt_body   *body;
793 	struct lov_mds_md *lmm = NULL;
794 	struct ptlrpc_request *req = NULL;
795 	int rc, lmmsize;
796 	struct md_op_data *op_data;
797 
798 	rc = ll_get_default_mdsize(sbi, &lmmsize);
799 	if (rc)
800 		return rc;
801 
802 	op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL,
803 				     0, lmmsize, LUSTRE_OPC_ANY,
804 				     NULL);
805 	if (IS_ERR(op_data))
806 		return PTR_ERR(op_data);
807 
808 	op_data->op_valid = OBD_MD_FLEASIZE | OBD_MD_FLDIREA;
809 	rc = md_getattr(sbi->ll_md_exp, op_data, &req);
810 	ll_finish_md_op_data(op_data);
811 	if (rc < 0) {
812 		CDEBUG(D_INFO, "md_getattr failed on inode %lu/%u: rc %d\n",
813 		       inode->i_ino,
814 		       inode->i_generation, rc);
815 		goto out;
816 	}
817 
818 	body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
819 	LASSERT(body != NULL);
820 
821 	lmmsize = body->eadatasize;
822 
823 	if (!(body->valid & (OBD_MD_FLEASIZE | OBD_MD_FLDIREA)) ||
824 	    lmmsize == 0) {
825 		rc = -ENODATA;
826 		goto out;
827 	}
828 
829 	lmm = req_capsule_server_sized_get(&req->rq_pill,
830 					   &RMF_MDT_MD, lmmsize);
831 	LASSERT(lmm != NULL);
832 
833 	/*
834 	 * This is coming from the MDS, so is probably in
835 	 * little endian.  We convert it to host endian before
836 	 * passing it to userspace.
837 	 */
838 	/* We don't swab objects for directories */
839 	switch (le32_to_cpu(lmm->lmm_magic)) {
840 	case LOV_MAGIC_V1:
841 		if (cpu_to_le32(LOV_MAGIC) != LOV_MAGIC)
842 			lustre_swab_lov_user_md_v1((struct lov_user_md_v1 *)lmm);
843 		break;
844 	case LOV_MAGIC_V3:
845 		if (cpu_to_le32(LOV_MAGIC) != LOV_MAGIC)
846 			lustre_swab_lov_user_md_v3((struct lov_user_md_v3 *)lmm);
847 		break;
848 	default:
849 		CERROR("unknown magic: %lX\n", (unsigned long)lmm->lmm_magic);
850 		rc = -EPROTO;
851 	}
852 out:
853 	*lmmp = lmm;
854 	*lmm_size = lmmsize;
855 	*request = req;
856 	return rc;
857 }
858 
859 /*
860  *  Get MDT index for the inode.
861  */
ll_get_mdt_idx(struct inode * inode)862 int ll_get_mdt_idx(struct inode *inode)
863 {
864 	struct ll_sb_info *sbi = ll_i2sbi(inode);
865 	struct md_op_data *op_data;
866 	int rc, mdtidx;
867 
868 	op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0,
869 				     0, LUSTRE_OPC_ANY, NULL);
870 	if (IS_ERR(op_data))
871 		return PTR_ERR(op_data);
872 
873 	op_data->op_flags |= MF_GET_MDT_IDX;
874 	rc = md_getattr(sbi->ll_md_exp, op_data, NULL);
875 	mdtidx = op_data->op_mds;
876 	ll_finish_md_op_data(op_data);
877 	if (rc < 0) {
878 		CDEBUG(D_INFO, "md_getattr_name: %d\n", rc);
879 		return rc;
880 	}
881 	return mdtidx;
882 }
883 
884 /**
885  * Generic handler to do any pre-copy work.
886  *
887  * It send a first hsm_progress (with extent length == 0) to coordinator as a
888  * first information for it that real work has started.
889  *
890  * Moreover, for a ARCHIVE request, it will sample the file data version and
891  * store it in \a copy.
892  *
893  * \return 0 on success.
894  */
ll_ioc_copy_start(struct super_block * sb,struct hsm_copy * copy)895 static int ll_ioc_copy_start(struct super_block *sb, struct hsm_copy *copy)
896 {
897 	struct ll_sb_info		*sbi = ll_s2sbi(sb);
898 	struct hsm_progress_kernel	 hpk;
899 	int				 rc;
900 
901 	/* Forge a hsm_progress based on data from copy. */
902 	hpk.hpk_fid = copy->hc_hai.hai_fid;
903 	hpk.hpk_cookie = copy->hc_hai.hai_cookie;
904 	hpk.hpk_extent.offset = copy->hc_hai.hai_extent.offset;
905 	hpk.hpk_extent.length = 0;
906 	hpk.hpk_flags = 0;
907 	hpk.hpk_errval = 0;
908 	hpk.hpk_data_version = 0;
909 
910 	/* For archive request, we need to read the current file version. */
911 	if (copy->hc_hai.hai_action == HSMA_ARCHIVE) {
912 		struct inode	*inode;
913 		__u64		 data_version = 0;
914 
915 		/* Get inode for this fid */
916 		inode = search_inode_for_lustre(sb, &copy->hc_hai.hai_fid);
917 		if (IS_ERR(inode)) {
918 			hpk.hpk_flags |= HP_FLAG_RETRY;
919 			/* hpk_errval is >= 0 */
920 			hpk.hpk_errval = -PTR_ERR(inode);
921 			rc = PTR_ERR(inode);
922 			goto progress;
923 		}
924 
925 		/* Read current file data version */
926 		rc = ll_data_version(inode, &data_version, 1);
927 		iput(inode);
928 		if (rc != 0) {
929 			CDEBUG(D_HSM, "Could not read file data version of "
930 				      DFID" (rc = %d). Archive request (%#llx) could not be done.\n",
931 				      PFID(&copy->hc_hai.hai_fid), rc,
932 				      copy->hc_hai.hai_cookie);
933 			hpk.hpk_flags |= HP_FLAG_RETRY;
934 			/* hpk_errval must be >= 0 */
935 			hpk.hpk_errval = -rc;
936 			goto progress;
937 		}
938 
939 		/* Store it the hsm_copy for later copytool use.
940 		 * Always modified even if no lsm. */
941 		copy->hc_data_version = data_version;
942 	}
943 
944 progress:
945 	rc = obd_iocontrol(LL_IOC_HSM_PROGRESS, sbi->ll_md_exp, sizeof(hpk),
946 			   &hpk, NULL);
947 
948 	return rc;
949 }
950 
951 /**
952  * Generic handler to do any post-copy work.
953  *
954  * It will send the last hsm_progress update to coordinator to inform it
955  * that copy is finished and whether it was successful or not.
956  *
957  * Moreover,
958  * - for ARCHIVE request, it will sample the file data version and compare it
959  *   with the version saved in ll_ioc_copy_start(). If they do not match, copy
960  *   will be considered as failed.
961  * - for RESTORE request, it will sample the file data version and send it to
962  *   coordinator which is useful if the file was imported as 'released'.
963  *
964  * \return 0 on success.
965  */
ll_ioc_copy_end(struct super_block * sb,struct hsm_copy * copy)966 static int ll_ioc_copy_end(struct super_block *sb, struct hsm_copy *copy)
967 {
968 	struct ll_sb_info		*sbi = ll_s2sbi(sb);
969 	struct hsm_progress_kernel	 hpk;
970 	int				 rc;
971 
972 	/* If you modify the logic here, also check llapi_hsm_copy_end(). */
973 	/* Take care: copy->hc_hai.hai_action, len, gid and data are not
974 	 * initialized if copy_end was called with copy == NULL.
975 	 */
976 
977 	/* Forge a hsm_progress based on data from copy. */
978 	hpk.hpk_fid = copy->hc_hai.hai_fid;
979 	hpk.hpk_cookie = copy->hc_hai.hai_cookie;
980 	hpk.hpk_extent = copy->hc_hai.hai_extent;
981 	hpk.hpk_flags = copy->hc_flags | HP_FLAG_COMPLETED;
982 	hpk.hpk_errval = copy->hc_errval;
983 	hpk.hpk_data_version = 0;
984 
985 	/* For archive request, we need to check the file data was not changed.
986 	 *
987 	 * For restore request, we need to send the file data version, this is
988 	 * useful when the file was created using hsm_import.
989 	 */
990 	if (((copy->hc_hai.hai_action == HSMA_ARCHIVE) ||
991 	     (copy->hc_hai.hai_action == HSMA_RESTORE)) &&
992 	    (copy->hc_errval == 0)) {
993 		struct inode	*inode;
994 		__u64		 data_version = 0;
995 
996 		/* Get lsm for this fid */
997 		inode = search_inode_for_lustre(sb, &copy->hc_hai.hai_fid);
998 		if (IS_ERR(inode)) {
999 			hpk.hpk_flags |= HP_FLAG_RETRY;
1000 			/* hpk_errval must be >= 0 */
1001 			hpk.hpk_errval = -PTR_ERR(inode);
1002 			rc = PTR_ERR(inode);
1003 			goto progress;
1004 		}
1005 
1006 		rc = ll_data_version(inode, &data_version,
1007 				     copy->hc_hai.hai_action == HSMA_ARCHIVE);
1008 		iput(inode);
1009 		if (rc) {
1010 			CDEBUG(D_HSM, "Could not read file data version. Request could not be confirmed.\n");
1011 			if (hpk.hpk_errval == 0)
1012 				hpk.hpk_errval = -rc;
1013 			goto progress;
1014 		}
1015 
1016 		/* Store it the hsm_copy for later copytool use.
1017 		 * Always modified even if no lsm. */
1018 		hpk.hpk_data_version = data_version;
1019 
1020 		/* File could have been stripped during archiving, so we need
1021 		 * to check anyway. */
1022 		if ((copy->hc_hai.hai_action == HSMA_ARCHIVE) &&
1023 		    (copy->hc_data_version != data_version)) {
1024 			CDEBUG(D_HSM, "File data version mismatched. File content was changed during archiving. "
1025 			       DFID", start:%#llx current:%#llx\n",
1026 			       PFID(&copy->hc_hai.hai_fid),
1027 			       copy->hc_data_version, data_version);
1028 			/* File was changed, send error to cdt. Do not ask for
1029 			 * retry because if a file is modified frequently,
1030 			 * the cdt will loop on retried archive requests.
1031 			 * The policy engine will ask for a new archive later
1032 			 * when the file will not be modified for some tunable
1033 			 * time */
1034 			/* we do not notify caller */
1035 			hpk.hpk_flags &= ~HP_FLAG_RETRY;
1036 			/* hpk_errval must be >= 0 */
1037 			hpk.hpk_errval = EBUSY;
1038 		}
1039 
1040 	}
1041 
1042 progress:
1043 	rc = obd_iocontrol(LL_IOC_HSM_PROGRESS, sbi->ll_md_exp, sizeof(hpk),
1044 			   &hpk, NULL);
1045 
1046 	return rc;
1047 }
1048 
copy_and_ioctl(int cmd,struct obd_export * exp,const void __user * data,size_t size)1049 static int copy_and_ioctl(int cmd, struct obd_export *exp,
1050 			  const void __user *data, size_t size)
1051 {
1052 	void *copy;
1053 	int rc;
1054 
1055 	copy = kzalloc(size, GFP_NOFS);
1056 	if (!copy)
1057 		return -ENOMEM;
1058 
1059 	if (copy_from_user(copy, data, size)) {
1060 		rc = -EFAULT;
1061 		goto out;
1062 	}
1063 
1064 	rc = obd_iocontrol(cmd, exp, size, copy, NULL);
1065 out:
1066 	kfree(copy);
1067 
1068 	return rc;
1069 }
1070 
quotactl_ioctl(struct ll_sb_info * sbi,struct if_quotactl * qctl)1071 static int quotactl_ioctl(struct ll_sb_info *sbi, struct if_quotactl *qctl)
1072 {
1073 	int cmd = qctl->qc_cmd;
1074 	int type = qctl->qc_type;
1075 	int id = qctl->qc_id;
1076 	int valid = qctl->qc_valid;
1077 	int rc = 0;
1078 
1079 	switch (cmd) {
1080 	case LUSTRE_Q_INVALIDATE:
1081 	case LUSTRE_Q_FINVALIDATE:
1082 	case Q_QUOTAON:
1083 	case Q_QUOTAOFF:
1084 	case Q_SETQUOTA:
1085 	case Q_SETINFO:
1086 		if (!capable(CFS_CAP_SYS_ADMIN) ||
1087 		    sbi->ll_flags & LL_SBI_RMT_CLIENT)
1088 			return -EPERM;
1089 		break;
1090 	case Q_GETQUOTA:
1091 		if (((type == USRQUOTA &&
1092 		      !uid_eq(current_euid(), make_kuid(&init_user_ns, id))) ||
1093 		     (type == GRPQUOTA &&
1094 		      !in_egroup_p(make_kgid(&init_user_ns, id)))) &&
1095 		    (!capable(CFS_CAP_SYS_ADMIN) ||
1096 		     sbi->ll_flags & LL_SBI_RMT_CLIENT))
1097 			return -EPERM;
1098 		break;
1099 	case Q_GETINFO:
1100 		break;
1101 	default:
1102 		CERROR("unsupported quotactl op: %#x\n", cmd);
1103 		return -ENOTTY;
1104 	}
1105 
1106 	if (valid != QC_GENERAL) {
1107 		if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
1108 			return -EOPNOTSUPP;
1109 
1110 		if (cmd == Q_GETINFO)
1111 			qctl->qc_cmd = Q_GETOINFO;
1112 		else if (cmd == Q_GETQUOTA)
1113 			qctl->qc_cmd = Q_GETOQUOTA;
1114 		else
1115 			return -EINVAL;
1116 
1117 		switch (valid) {
1118 		case QC_MDTIDX:
1119 			rc = obd_iocontrol(OBD_IOC_QUOTACTL, sbi->ll_md_exp,
1120 					   sizeof(*qctl), qctl, NULL);
1121 			break;
1122 		case QC_OSTIDX:
1123 			rc = obd_iocontrol(OBD_IOC_QUOTACTL, sbi->ll_dt_exp,
1124 					   sizeof(*qctl), qctl, NULL);
1125 			break;
1126 		case QC_UUID:
1127 			rc = obd_iocontrol(OBD_IOC_QUOTACTL, sbi->ll_md_exp,
1128 					   sizeof(*qctl), qctl, NULL);
1129 			if (rc == -EAGAIN)
1130 				rc = obd_iocontrol(OBD_IOC_QUOTACTL,
1131 						   sbi->ll_dt_exp,
1132 						   sizeof(*qctl), qctl, NULL);
1133 			break;
1134 		default:
1135 			rc = -EINVAL;
1136 			break;
1137 		}
1138 
1139 		if (rc)
1140 			return rc;
1141 
1142 		qctl->qc_cmd = cmd;
1143 	} else {
1144 		struct obd_quotactl *oqctl;
1145 
1146 		oqctl = kzalloc(sizeof(*oqctl), GFP_NOFS);
1147 		if (!oqctl)
1148 			return -ENOMEM;
1149 
1150 		QCTL_COPY(oqctl, qctl);
1151 		rc = obd_quotactl(sbi->ll_md_exp, oqctl);
1152 		if (rc) {
1153 			if (rc != -EALREADY && cmd == Q_QUOTAON) {
1154 				oqctl->qc_cmd = Q_QUOTAOFF;
1155 				obd_quotactl(sbi->ll_md_exp, oqctl);
1156 			}
1157 			kfree(oqctl);
1158 			return rc;
1159 		}
1160 		/* If QIF_SPACE is not set, client should collect the
1161 		 * space usage from OSSs by itself */
1162 		if (cmd == Q_GETQUOTA &&
1163 		    !(oqctl->qc_dqblk.dqb_valid & QIF_SPACE) &&
1164 		    !oqctl->qc_dqblk.dqb_curspace) {
1165 			struct obd_quotactl *oqctl_tmp;
1166 
1167 			oqctl_tmp = kzalloc(sizeof(*oqctl_tmp), GFP_NOFS);
1168 			if (!oqctl_tmp) {
1169 				rc = -ENOMEM;
1170 				goto out;
1171 			}
1172 
1173 			oqctl_tmp->qc_cmd = Q_GETOQUOTA;
1174 			oqctl_tmp->qc_id = oqctl->qc_id;
1175 			oqctl_tmp->qc_type = oqctl->qc_type;
1176 
1177 			/* collect space usage from OSTs */
1178 			oqctl_tmp->qc_dqblk.dqb_curspace = 0;
1179 			rc = obd_quotactl(sbi->ll_dt_exp, oqctl_tmp);
1180 			if (!rc || rc == -EREMOTEIO) {
1181 				oqctl->qc_dqblk.dqb_curspace =
1182 					oqctl_tmp->qc_dqblk.dqb_curspace;
1183 				oqctl->qc_dqblk.dqb_valid |= QIF_SPACE;
1184 			}
1185 
1186 			/* collect space & inode usage from MDTs */
1187 			oqctl_tmp->qc_dqblk.dqb_curspace = 0;
1188 			oqctl_tmp->qc_dqblk.dqb_curinodes = 0;
1189 			rc = obd_quotactl(sbi->ll_md_exp, oqctl_tmp);
1190 			if (!rc || rc == -EREMOTEIO) {
1191 				oqctl->qc_dqblk.dqb_curspace +=
1192 					oqctl_tmp->qc_dqblk.dqb_curspace;
1193 				oqctl->qc_dqblk.dqb_curinodes =
1194 					oqctl_tmp->qc_dqblk.dqb_curinodes;
1195 				oqctl->qc_dqblk.dqb_valid |= QIF_INODES;
1196 			} else {
1197 				oqctl->qc_dqblk.dqb_valid &= ~QIF_SPACE;
1198 			}
1199 
1200 			kfree(oqctl_tmp);
1201 		}
1202 out:
1203 		QCTL_COPY(qctl, oqctl);
1204 		kfree(oqctl);
1205 	}
1206 
1207 	return rc;
1208 }
1209 
1210 /* This function tries to get a single name component,
1211  * to send to the server. No actual path traversal involved,
1212  * so we limit to NAME_MAX */
ll_getname(const char __user * filename)1213 static char *ll_getname(const char __user *filename)
1214 {
1215 	int ret = 0, len;
1216 	char *tmp;
1217 
1218 	tmp = kzalloc(NAME_MAX + 1, GFP_KERNEL);
1219 	if (!tmp)
1220 		return ERR_PTR(-ENOMEM);
1221 
1222 	len = strncpy_from_user(tmp, filename, NAME_MAX + 1);
1223 	if (len < 0)
1224 		ret = len;
1225 	else if (len == 0)
1226 		ret = -ENOENT;
1227 	else if (len > NAME_MAX && tmp[NAME_MAX] != 0)
1228 		ret = -ENAMETOOLONG;
1229 
1230 	if (ret) {
1231 		kfree(tmp);
1232 		tmp =  ERR_PTR(ret);
1233 	}
1234 	return tmp;
1235 }
1236 
1237 #define ll_putname(filename) kfree(filename)
1238 
ll_dir_ioctl(struct file * file,unsigned int cmd,unsigned long arg)1239 static long ll_dir_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1240 {
1241 	struct inode *inode = file_inode(file);
1242 	struct ll_sb_info *sbi = ll_i2sbi(inode);
1243 	struct obd_ioctl_data *data;
1244 	int rc = 0;
1245 
1246 	CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), cmd=%#x\n",
1247 	       inode->i_ino, inode->i_generation, inode, cmd);
1248 
1249 	/* asm-ppc{,64} declares TCGETS, et. al. as type 't' not 'T' */
1250 	if (_IOC_TYPE(cmd) == 'T' || _IOC_TYPE(cmd) == 't') /* tty ioctls */
1251 		return -ENOTTY;
1252 
1253 	ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_IOCTL, 1);
1254 	switch (cmd) {
1255 	case FSFILT_IOC_GETFLAGS:
1256 	case FSFILT_IOC_SETFLAGS:
1257 		return ll_iocontrol(inode, file, cmd, arg);
1258 	case FSFILT_IOC_GETVERSION_OLD:
1259 	case FSFILT_IOC_GETVERSION:
1260 		return put_user(inode->i_generation, (int *)arg);
1261 	/* We need to special case any other ioctls we want to handle,
1262 	 * to send them to the MDS/OST as appropriate and to properly
1263 	 * network encode the arg field.
1264 	case FSFILT_IOC_SETVERSION_OLD:
1265 	case FSFILT_IOC_SETVERSION:
1266 	*/
1267 	case LL_IOC_GET_MDTIDX: {
1268 		int mdtidx;
1269 
1270 		mdtidx = ll_get_mdt_idx(inode);
1271 		if (mdtidx < 0)
1272 			return mdtidx;
1273 
1274 		if (put_user((int)mdtidx, (int *)arg))
1275 			return -EFAULT;
1276 
1277 		return 0;
1278 	}
1279 	case IOC_MDC_LOOKUP: {
1280 		struct ptlrpc_request *request = NULL;
1281 		int namelen, len = 0;
1282 		char *buf = NULL;
1283 		char *filename;
1284 		struct md_op_data *op_data;
1285 
1286 		rc = obd_ioctl_getdata(&buf, &len, (void *)arg);
1287 		if (rc)
1288 			return rc;
1289 		data = (void *)buf;
1290 
1291 		filename = data->ioc_inlbuf1;
1292 		namelen = strlen(filename);
1293 
1294 		if (namelen < 1) {
1295 			CDEBUG(D_INFO, "IOC_MDC_LOOKUP missing filename\n");
1296 			rc = -EINVAL;
1297 			goto out_free;
1298 		}
1299 
1300 		op_data = ll_prep_md_op_data(NULL, inode, NULL, filename, namelen,
1301 					     0, LUSTRE_OPC_ANY, NULL);
1302 		if (IS_ERR(op_data)) {
1303 			rc = PTR_ERR(op_data);
1304 			goto out_free;
1305 		}
1306 
1307 		op_data->op_valid = OBD_MD_FLID;
1308 		rc = md_getattr_name(sbi->ll_md_exp, op_data, &request);
1309 		ll_finish_md_op_data(op_data);
1310 		if (rc < 0) {
1311 			CDEBUG(D_INFO, "md_getattr_name: %d\n", rc);
1312 			goto out_free;
1313 		}
1314 		ptlrpc_req_finished(request);
1315 out_free:
1316 		obd_ioctl_freedata(buf, len);
1317 		return rc;
1318 	}
1319 	case LL_IOC_LMV_SETSTRIPE: {
1320 		struct lmv_user_md  *lum;
1321 		char		*buf = NULL;
1322 		char		*filename;
1323 		int		 namelen = 0;
1324 		int		 lumlen = 0;
1325 		int		 len;
1326 		int		 rc;
1327 
1328 		rc = obd_ioctl_getdata(&buf, &len, (void *)arg);
1329 		if (rc)
1330 			return rc;
1331 
1332 		data = (void *)buf;
1333 		if (data->ioc_inlbuf1 == NULL || data->ioc_inlbuf2 == NULL ||
1334 		    data->ioc_inllen1 == 0 || data->ioc_inllen2 == 0) {
1335 			rc = -EINVAL;
1336 			goto lmv_out_free;
1337 		}
1338 
1339 		filename = data->ioc_inlbuf1;
1340 		namelen = data->ioc_inllen1;
1341 
1342 		if (namelen < 1) {
1343 			CDEBUG(D_INFO, "IOC_MDC_LOOKUP missing filename\n");
1344 			rc = -EINVAL;
1345 			goto lmv_out_free;
1346 		}
1347 		lum = (struct lmv_user_md *)data->ioc_inlbuf2;
1348 		lumlen = data->ioc_inllen2;
1349 
1350 		if (lum->lum_magic != LMV_USER_MAGIC ||
1351 		    lumlen != sizeof(*lum)) {
1352 			CERROR("%s: wrong lum magic %x or size %d: rc = %d\n",
1353 			       filename, lum->lum_magic, lumlen, -EFAULT);
1354 			rc = -EINVAL;
1355 			goto lmv_out_free;
1356 		}
1357 
1358 		/**
1359 		 * ll_dir_setdirstripe will be used to set dir stripe
1360 		 *  mdc_create--->mdt_reint_create (with dirstripe)
1361 		 */
1362 		rc = ll_dir_setdirstripe(inode, lum, filename);
1363 lmv_out_free:
1364 		obd_ioctl_freedata(buf, len);
1365 		return rc;
1366 
1367 	}
1368 	case LL_IOC_LOV_SETSTRIPE: {
1369 		struct lov_user_md_v3 lumv3;
1370 		struct lov_user_md_v1 *lumv1 = (struct lov_user_md_v1 *)&lumv3;
1371 		struct lov_user_md_v1 *lumv1p = (struct lov_user_md_v1 *)arg;
1372 		struct lov_user_md_v3 *lumv3p = (struct lov_user_md_v3 *)arg;
1373 
1374 		int set_default = 0;
1375 
1376 		LASSERT(sizeof(lumv3) == sizeof(*lumv3p));
1377 		LASSERT(sizeof(lumv3.lmm_objects[0]) ==
1378 			sizeof(lumv3p->lmm_objects[0]));
1379 		/* first try with v1 which is smaller than v3 */
1380 		if (copy_from_user(lumv1, lumv1p, sizeof(*lumv1)))
1381 			return -EFAULT;
1382 
1383 		if (lumv1->lmm_magic == LOV_USER_MAGIC_V3) {
1384 			if (copy_from_user(&lumv3, lumv3p, sizeof(lumv3)))
1385 				return -EFAULT;
1386 		}
1387 
1388 		if (is_root_inode(inode))
1389 			set_default = 1;
1390 
1391 		/* in v1 and v3 cases lumv1 points to data */
1392 		rc = ll_dir_setstripe(inode, lumv1, set_default);
1393 
1394 		return rc;
1395 	}
1396 	case LL_IOC_LMV_GETSTRIPE: {
1397 		struct lmv_user_md *lump = (struct lmv_user_md *)arg;
1398 		struct lmv_user_md lum;
1399 		struct lmv_user_md *tmp;
1400 		int lum_size;
1401 		int rc = 0;
1402 		int mdtindex;
1403 
1404 		if (copy_from_user(&lum, lump, sizeof(struct lmv_user_md)))
1405 			return -EFAULT;
1406 
1407 		if (lum.lum_magic != LMV_MAGIC_V1)
1408 			return -EINVAL;
1409 
1410 		lum_size = lmv_user_md_size(1, LMV_MAGIC_V1);
1411 		tmp = kzalloc(lum_size, GFP_NOFS);
1412 		if (!tmp) {
1413 			rc = -ENOMEM;
1414 			goto free_lmv;
1415 		}
1416 
1417 		*tmp = lum;
1418 		tmp->lum_type = LMV_STRIPE_TYPE;
1419 		tmp->lum_stripe_count = 1;
1420 		mdtindex = ll_get_mdt_idx(inode);
1421 		if (mdtindex < 0) {
1422 			rc = -ENOMEM;
1423 			goto free_lmv;
1424 		}
1425 
1426 		tmp->lum_stripe_offset = mdtindex;
1427 		tmp->lum_objects[0].lum_mds = mdtindex;
1428 		memcpy(&tmp->lum_objects[0].lum_fid, ll_inode2fid(inode),
1429 		       sizeof(struct lu_fid));
1430 		if (copy_to_user((void *)arg, tmp, lum_size)) {
1431 			rc = -EFAULT;
1432 			goto free_lmv;
1433 		}
1434 free_lmv:
1435 		kfree(tmp);
1436 		return rc;
1437 	}
1438 	case LL_IOC_LOV_SWAP_LAYOUTS:
1439 		return -EPERM;
1440 	case LL_IOC_OBD_STATFS:
1441 		return ll_obd_statfs(inode, (void *)arg);
1442 	case LL_IOC_LOV_GETSTRIPE:
1443 	case LL_IOC_MDC_GETINFO:
1444 	case IOC_MDC_GETFILEINFO:
1445 	case IOC_MDC_GETFILESTRIPE: {
1446 		struct ptlrpc_request *request = NULL;
1447 		struct lov_user_md *lump;
1448 		struct lov_mds_md *lmm = NULL;
1449 		struct mdt_body *body;
1450 		char *filename = NULL;
1451 		int lmmsize;
1452 
1453 		if (cmd == IOC_MDC_GETFILEINFO ||
1454 		    cmd == IOC_MDC_GETFILESTRIPE) {
1455 			filename = ll_getname((const char *)arg);
1456 			if (IS_ERR(filename))
1457 				return PTR_ERR(filename);
1458 
1459 			rc = ll_lov_getstripe_ea_info(inode, filename, &lmm,
1460 						      &lmmsize, &request);
1461 		} else {
1462 			rc = ll_dir_getstripe(inode, &lmm, &lmmsize, &request);
1463 		}
1464 
1465 		if (request) {
1466 			body = req_capsule_server_get(&request->rq_pill,
1467 						      &RMF_MDT_BODY);
1468 			LASSERT(body != NULL);
1469 		} else {
1470 			goto out_req;
1471 		}
1472 
1473 		if (rc < 0) {
1474 			if (rc == -ENODATA && (cmd == IOC_MDC_GETFILEINFO ||
1475 					       cmd == LL_IOC_MDC_GETINFO)) {
1476 				rc = 0;
1477 				goto skip_lmm;
1478 			} else
1479 				goto out_req;
1480 		}
1481 
1482 		if (cmd == IOC_MDC_GETFILESTRIPE ||
1483 		    cmd == LL_IOC_LOV_GETSTRIPE) {
1484 			lump = (struct lov_user_md *)arg;
1485 		} else {
1486 			struct lov_user_mds_data *lmdp;
1487 
1488 			lmdp = (struct lov_user_mds_data *)arg;
1489 			lump = &lmdp->lmd_lmm;
1490 		}
1491 		if (copy_to_user(lump, lmm, lmmsize)) {
1492 			if (copy_to_user(lump, lmm, sizeof(*lump))) {
1493 				rc = -EFAULT;
1494 				goto out_req;
1495 			}
1496 			rc = -EOVERFLOW;
1497 		}
1498 skip_lmm:
1499 		if (cmd == IOC_MDC_GETFILEINFO || cmd == LL_IOC_MDC_GETINFO) {
1500 			struct lov_user_mds_data *lmdp;
1501 			lstat_t st = { 0 };
1502 
1503 			st.st_dev     = inode->i_sb->s_dev;
1504 			st.st_mode    = body->mode;
1505 			st.st_nlink   = body->nlink;
1506 			st.st_uid     = body->uid;
1507 			st.st_gid     = body->gid;
1508 			st.st_rdev    = body->rdev;
1509 			st.st_size    = body->size;
1510 			st.st_blksize = PAGE_CACHE_SIZE;
1511 			st.st_blocks  = body->blocks;
1512 			st.st_atime   = body->atime;
1513 			st.st_mtime   = body->mtime;
1514 			st.st_ctime   = body->ctime;
1515 			st.st_ino     = inode->i_ino;
1516 
1517 			lmdp = (struct lov_user_mds_data *)arg;
1518 			if (copy_to_user(&lmdp->lmd_st, &st, sizeof(st))) {
1519 				rc = -EFAULT;
1520 				goto out_req;
1521 			}
1522 		}
1523 
1524 out_req:
1525 		ptlrpc_req_finished(request);
1526 		if (filename)
1527 			ll_putname(filename);
1528 		return rc;
1529 	}
1530 	case IOC_LOV_GETINFO: {
1531 		struct lov_user_mds_data *lumd;
1532 		struct lov_stripe_md *lsm;
1533 		struct lov_user_md *lum;
1534 		struct lov_mds_md *lmm;
1535 		int lmmsize;
1536 		lstat_t st;
1537 
1538 		lumd = (struct lov_user_mds_data *)arg;
1539 		lum = &lumd->lmd_lmm;
1540 
1541 		rc = ll_get_max_mdsize(sbi, &lmmsize);
1542 		if (rc)
1543 			return rc;
1544 
1545 		lmm = libcfs_kvzalloc(lmmsize, GFP_NOFS);
1546 		if (lmm == NULL)
1547 			return -ENOMEM;
1548 		if (copy_from_user(lmm, lum, lmmsize)) {
1549 			rc = -EFAULT;
1550 			goto free_lmm;
1551 		}
1552 
1553 		switch (lmm->lmm_magic) {
1554 		case LOV_USER_MAGIC_V1:
1555 			if (cpu_to_le32(LOV_USER_MAGIC_V1) == LOV_USER_MAGIC_V1)
1556 				break;
1557 			/* swab objects first so that stripes num will be sane */
1558 			lustre_swab_lov_user_md_objects(
1559 				((struct lov_user_md_v1 *)lmm)->lmm_objects,
1560 				((struct lov_user_md_v1 *)lmm)->lmm_stripe_count);
1561 			lustre_swab_lov_user_md_v1((struct lov_user_md_v1 *)lmm);
1562 			break;
1563 		case LOV_USER_MAGIC_V3:
1564 			if (cpu_to_le32(LOV_USER_MAGIC_V3) == LOV_USER_MAGIC_V3)
1565 				break;
1566 			/* swab objects first so that stripes num will be sane */
1567 			lustre_swab_lov_user_md_objects(
1568 				((struct lov_user_md_v3 *)lmm)->lmm_objects,
1569 				((struct lov_user_md_v3 *)lmm)->lmm_stripe_count);
1570 			lustre_swab_lov_user_md_v3((struct lov_user_md_v3 *)lmm);
1571 			break;
1572 		default:
1573 			rc = -EINVAL;
1574 			goto free_lmm;
1575 		}
1576 
1577 		rc = obd_unpackmd(sbi->ll_dt_exp, &lsm, lmm, lmmsize);
1578 		if (rc < 0) {
1579 			rc = -ENOMEM;
1580 			goto free_lmm;
1581 		}
1582 
1583 		/* Perform glimpse_size operation. */
1584 		memset(&st, 0, sizeof(st));
1585 
1586 		rc = ll_glimpse_ioctl(sbi, lsm, &st);
1587 		if (rc)
1588 			goto free_lsm;
1589 
1590 		if (copy_to_user(&lumd->lmd_st, &st, sizeof(st))) {
1591 			rc = -EFAULT;
1592 			goto free_lsm;
1593 		}
1594 
1595 free_lsm:
1596 		obd_free_memmd(sbi->ll_dt_exp, &lsm);
1597 free_lmm:
1598 		kvfree(lmm);
1599 		return rc;
1600 	}
1601 	case OBD_IOC_LLOG_CATINFO: {
1602 		return -EOPNOTSUPP;
1603 	}
1604 	case OBD_IOC_QUOTACHECK: {
1605 		struct obd_quotactl *oqctl;
1606 		int error = 0;
1607 
1608 		if (!capable(CFS_CAP_SYS_ADMIN) ||
1609 		    sbi->ll_flags & LL_SBI_RMT_CLIENT)
1610 			return -EPERM;
1611 
1612 		oqctl = kzalloc(sizeof(*oqctl), GFP_NOFS);
1613 		if (!oqctl)
1614 			return -ENOMEM;
1615 		oqctl->qc_type = arg;
1616 		rc = obd_quotacheck(sbi->ll_md_exp, oqctl);
1617 		if (rc < 0) {
1618 			CDEBUG(D_INFO, "md_quotacheck failed: rc %d\n", rc);
1619 			error = rc;
1620 		}
1621 
1622 		rc = obd_quotacheck(sbi->ll_dt_exp, oqctl);
1623 		if (rc < 0)
1624 			CDEBUG(D_INFO, "obd_quotacheck failed: rc %d\n", rc);
1625 
1626 		kfree(oqctl);
1627 		return error ?: rc;
1628 	}
1629 	case OBD_IOC_POLL_QUOTACHECK: {
1630 		struct if_quotacheck *check;
1631 
1632 		if (!capable(CFS_CAP_SYS_ADMIN) ||
1633 		    sbi->ll_flags & LL_SBI_RMT_CLIENT)
1634 			return -EPERM;
1635 
1636 		check = kzalloc(sizeof(*check), GFP_NOFS);
1637 		if (!check)
1638 			return -ENOMEM;
1639 
1640 		rc = obd_iocontrol(cmd, sbi->ll_md_exp, 0, (void *)check,
1641 				   NULL);
1642 		if (rc) {
1643 			CDEBUG(D_QUOTA, "mdc ioctl %d failed: %d\n", cmd, rc);
1644 			if (copy_to_user((void *)arg, check,
1645 					     sizeof(*check)))
1646 				CDEBUG(D_QUOTA, "copy_to_user failed\n");
1647 			goto out_poll;
1648 		}
1649 
1650 		rc = obd_iocontrol(cmd, sbi->ll_dt_exp, 0, (void *)check,
1651 				   NULL);
1652 		if (rc) {
1653 			CDEBUG(D_QUOTA, "osc ioctl %d failed: %d\n", cmd, rc);
1654 			if (copy_to_user((void *)arg, check,
1655 					     sizeof(*check)))
1656 				CDEBUG(D_QUOTA, "copy_to_user failed\n");
1657 			goto out_poll;
1658 		}
1659 out_poll:
1660 		kfree(check);
1661 		return rc;
1662 	}
1663 	case LL_IOC_QUOTACTL: {
1664 		struct if_quotactl *qctl;
1665 
1666 		qctl = kzalloc(sizeof(*qctl), GFP_NOFS);
1667 		if (!qctl)
1668 			return -ENOMEM;
1669 
1670 		if (copy_from_user(qctl, (void *)arg, sizeof(*qctl))) {
1671 			rc = -EFAULT;
1672 			goto out_quotactl;
1673 		}
1674 
1675 		rc = quotactl_ioctl(sbi, qctl);
1676 
1677 		if (rc == 0 && copy_to_user((void *)arg, qctl, sizeof(*qctl)))
1678 			rc = -EFAULT;
1679 
1680 out_quotactl:
1681 		kfree(qctl);
1682 		return rc;
1683 	}
1684 	case OBD_IOC_GETDTNAME:
1685 	case OBD_IOC_GETMDNAME:
1686 		return ll_get_obd_name(inode, cmd, arg);
1687 	case LL_IOC_FLUSHCTX:
1688 		return ll_flush_ctx(inode);
1689 #ifdef CONFIG_FS_POSIX_ACL
1690 	case LL_IOC_RMTACL: {
1691 	    if (sbi->ll_flags & LL_SBI_RMT_CLIENT && is_root_inode(inode)) {
1692 		struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
1693 
1694 		LASSERT(fd != NULL);
1695 		rc = rct_add(&sbi->ll_rct, current_pid(), arg);
1696 		if (!rc)
1697 			fd->fd_flags |= LL_FILE_RMTACL;
1698 		return rc;
1699 	    } else
1700 		return 0;
1701 	}
1702 #endif
1703 	case LL_IOC_GETOBDCOUNT: {
1704 		int count, vallen;
1705 		struct obd_export *exp;
1706 
1707 		if (copy_from_user(&count, (int *)arg, sizeof(int)))
1708 			return -EFAULT;
1709 
1710 		/* get ost count when count is zero, get mdt count otherwise */
1711 		exp = count ? sbi->ll_md_exp : sbi->ll_dt_exp;
1712 		vallen = sizeof(count);
1713 		rc = obd_get_info(NULL, exp, sizeof(KEY_TGT_COUNT),
1714 				  KEY_TGT_COUNT, &vallen, &count, NULL);
1715 		if (rc) {
1716 			CERROR("get target count failed: %d\n", rc);
1717 			return rc;
1718 		}
1719 
1720 		if (copy_to_user((int *)arg, &count, sizeof(int)))
1721 			return -EFAULT;
1722 
1723 		return 0;
1724 	}
1725 	case LL_IOC_PATH2FID:
1726 		if (copy_to_user((void *)arg, ll_inode2fid(inode),
1727 				     sizeof(struct lu_fid)))
1728 			return -EFAULT;
1729 		return 0;
1730 	case LL_IOC_GET_CONNECT_FLAGS: {
1731 		return obd_iocontrol(cmd, sbi->ll_md_exp, 0, NULL, (void *)arg);
1732 	}
1733 	case OBD_IOC_CHANGELOG_SEND:
1734 	case OBD_IOC_CHANGELOG_CLEAR:
1735 		if (!capable(CFS_CAP_SYS_ADMIN))
1736 			return -EPERM;
1737 
1738 		rc = copy_and_ioctl(cmd, sbi->ll_md_exp, (void *)arg,
1739 				    sizeof(struct ioc_changelog));
1740 		return rc;
1741 	case OBD_IOC_FID2PATH:
1742 		return ll_fid2path(inode, (void *)arg);
1743 	case LL_IOC_HSM_REQUEST: {
1744 		struct hsm_user_request	*hur;
1745 		ssize_t			 totalsize;
1746 
1747 		hur = memdup_user((void *)arg, sizeof(*hur));
1748 		if (IS_ERR(hur))
1749 			return PTR_ERR(hur);
1750 
1751 		/* Compute the whole struct size */
1752 		totalsize = hur_len(hur);
1753 		kfree(hur);
1754 		if (totalsize < 0)
1755 			return -E2BIG;
1756 
1757 		/* Final size will be more than double totalsize */
1758 		if (totalsize >= MDS_MAXREQSIZE / 3)
1759 			return -E2BIG;
1760 
1761 		hur = libcfs_kvzalloc(totalsize, GFP_NOFS);
1762 		if (hur == NULL)
1763 			return -ENOMEM;
1764 
1765 		/* Copy the whole struct */
1766 		if (copy_from_user(hur, (void *)arg, totalsize)) {
1767 			kvfree(hur);
1768 			return -EFAULT;
1769 		}
1770 
1771 		if (hur->hur_request.hr_action == HUA_RELEASE) {
1772 			const struct lu_fid *fid;
1773 			struct inode *f;
1774 			int i;
1775 
1776 			for (i = 0; i < hur->hur_request.hr_itemcount; i++) {
1777 				fid = &hur->hur_user_item[i].hui_fid;
1778 				f = search_inode_for_lustre(inode->i_sb, fid);
1779 				if (IS_ERR(f)) {
1780 					rc = PTR_ERR(f);
1781 					break;
1782 				}
1783 
1784 				rc = ll_hsm_release(f);
1785 				iput(f);
1786 				if (rc != 0)
1787 					break;
1788 			}
1789 		} else {
1790 			rc = obd_iocontrol(cmd, ll_i2mdexp(inode), totalsize,
1791 					   hur, NULL);
1792 		}
1793 
1794 		kvfree(hur);
1795 
1796 		return rc;
1797 	}
1798 	case LL_IOC_HSM_PROGRESS: {
1799 		struct hsm_progress_kernel	hpk;
1800 		struct hsm_progress		hp;
1801 
1802 		if (copy_from_user(&hp, (void *)arg, sizeof(hp)))
1803 			return -EFAULT;
1804 
1805 		hpk.hpk_fid = hp.hp_fid;
1806 		hpk.hpk_cookie = hp.hp_cookie;
1807 		hpk.hpk_extent = hp.hp_extent;
1808 		hpk.hpk_flags = hp.hp_flags;
1809 		hpk.hpk_errval = hp.hp_errval;
1810 		hpk.hpk_data_version = 0;
1811 
1812 		/* File may not exist in Lustre; all progress
1813 		 * reported to Lustre root */
1814 		rc = obd_iocontrol(cmd, sbi->ll_md_exp, sizeof(hpk), &hpk,
1815 				   NULL);
1816 		return rc;
1817 	}
1818 	case LL_IOC_HSM_CT_START:
1819 		rc = copy_and_ioctl(cmd, sbi->ll_md_exp, (void *)arg,
1820 				    sizeof(struct lustre_kernelcomm));
1821 		return rc;
1822 
1823 	case LL_IOC_HSM_COPY_START: {
1824 		struct hsm_copy	*copy;
1825 		int		 rc;
1826 
1827 		copy = memdup_user((char *)arg, sizeof(*copy));
1828 		if (IS_ERR(copy))
1829 			return PTR_ERR(copy);
1830 
1831 		rc = ll_ioc_copy_start(inode->i_sb, copy);
1832 		if (copy_to_user((char *)arg, copy, sizeof(*copy)))
1833 			rc = -EFAULT;
1834 
1835 		kfree(copy);
1836 		return rc;
1837 	}
1838 	case LL_IOC_HSM_COPY_END: {
1839 		struct hsm_copy	*copy;
1840 		int		 rc;
1841 
1842 		copy = memdup_user((char *)arg, sizeof(*copy));
1843 		if (IS_ERR(copy))
1844 			return PTR_ERR(copy);
1845 
1846 		rc = ll_ioc_copy_end(inode->i_sb, copy);
1847 		if (copy_to_user((char *)arg, copy, sizeof(*copy)))
1848 			rc = -EFAULT;
1849 
1850 		kfree(copy);
1851 		return rc;
1852 	}
1853 	default:
1854 		return obd_iocontrol(cmd, sbi->ll_dt_exp, 0, NULL, (void *)arg);
1855 	}
1856 }
1857 
ll_dir_seek(struct file * file,loff_t offset,int origin)1858 static loff_t ll_dir_seek(struct file *file, loff_t offset, int origin)
1859 {
1860 	struct inode *inode = file->f_mapping->host;
1861 	struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
1862 	struct ll_sb_info *sbi = ll_i2sbi(inode);
1863 	int api32 = ll_need_32bit_api(sbi);
1864 	loff_t ret = -EINVAL;
1865 
1866 	mutex_lock(&inode->i_mutex);
1867 	switch (origin) {
1868 	case SEEK_SET:
1869 		break;
1870 	case SEEK_CUR:
1871 		offset += file->f_pos;
1872 		break;
1873 	case SEEK_END:
1874 		if (offset > 0)
1875 			goto out;
1876 		if (api32)
1877 			offset += LL_DIR_END_OFF_32BIT;
1878 		else
1879 			offset += LL_DIR_END_OFF;
1880 		break;
1881 	default:
1882 		goto out;
1883 	}
1884 
1885 	if (offset >= 0 &&
1886 	    ((api32 && offset <= LL_DIR_END_OFF_32BIT) ||
1887 	     (!api32 && offset <= LL_DIR_END_OFF))) {
1888 		if (offset != file->f_pos) {
1889 			if ((api32 && offset == LL_DIR_END_OFF_32BIT) ||
1890 			    (!api32 && offset == LL_DIR_END_OFF))
1891 				fd->lfd_pos = MDS_DIR_END_OFF;
1892 			else if (api32 && sbi->ll_flags & LL_SBI_64BIT_HASH)
1893 				fd->lfd_pos = offset << 32;
1894 			else
1895 				fd->lfd_pos = offset;
1896 			file->f_pos = offset;
1897 			file->f_version = 0;
1898 		}
1899 		ret = offset;
1900 	}
1901 	goto out;
1902 
1903 out:
1904 	mutex_unlock(&inode->i_mutex);
1905 	return ret;
1906 }
1907 
ll_dir_open(struct inode * inode,struct file * file)1908 static int ll_dir_open(struct inode *inode, struct file *file)
1909 {
1910 	return ll_file_open(inode, file);
1911 }
1912 
ll_dir_release(struct inode * inode,struct file * file)1913 static int ll_dir_release(struct inode *inode, struct file *file)
1914 {
1915 	return ll_file_release(inode, file);
1916 }
1917 
1918 const struct file_operations ll_dir_operations = {
1919 	.llseek   = ll_dir_seek,
1920 	.open     = ll_dir_open,
1921 	.release  = ll_dir_release,
1922 	.read     = generic_read_dir,
1923 	.iterate  = ll_readdir,
1924 	.unlocked_ioctl   = ll_dir_ioctl,
1925 	.fsync    = ll_fsync,
1926 };
1927