• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * Implementation of cl_io for VVP layer.
37  *
38  *   Author: Nikita Danilov <nikita.danilov@sun.com>
39  *   Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
40  */
41 
42 #define DEBUG_SUBSYSTEM S_LLITE
43 
44 #include "../include/obd.h"
45 #include "../include/lustre_lite.h"
46 
47 #include "vvp_internal.h"
48 
49 static struct vvp_io *cl2vvp_io(const struct lu_env *env,
50 				const struct cl_io_slice *slice);
51 
52 /**
53  * True, if \a io is a normal io, False for splice_{read,write}
54  */
cl_is_normalio(const struct lu_env * env,const struct cl_io * io)55 int cl_is_normalio(const struct lu_env *env, const struct cl_io *io)
56 {
57 	struct vvp_io *vio = vvp_env_io(env);
58 
59 	LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
60 
61 	return vio->cui_io_subtype == IO_NORMAL;
62 }
63 
64 /**
65  * For swapping layout. The file's layout may have changed.
66  * To avoid populating pages to a wrong stripe, we have to verify the
67  * correctness of layout. It works because swapping layout processes
68  * have to acquire group lock.
69  */
can_populate_pages(const struct lu_env * env,struct cl_io * io,struct inode * inode)70 static bool can_populate_pages(const struct lu_env *env, struct cl_io *io,
71 				struct inode *inode)
72 {
73 	struct ll_inode_info	*lli = ll_i2info(inode);
74 	struct ccc_io		*cio = ccc_env_io(env);
75 	bool rc = true;
76 
77 	switch (io->ci_type) {
78 	case CIT_READ:
79 	case CIT_WRITE:
80 		/* don't need lock here to check lli_layout_gen as we have held
81 		 * extent lock and GROUP lock has to hold to swap layout */
82 		if (ll_layout_version_get(lli) != cio->cui_layout_gen) {
83 			io->ci_need_restart = 1;
84 			/* this will return application a short read/write */
85 			io->ci_continue = 0;
86 			rc = false;
87 		}
88 	case CIT_FAULT:
89 		/* fault is okay because we've already had a page. */
90 	default:
91 		break;
92 	}
93 
94 	return rc;
95 }
96 
97 /*****************************************************************************
98  *
99  * io operations.
100  *
101  */
102 
vvp_io_fault_iter_init(const struct lu_env * env,const struct cl_io_slice * ios)103 static int vvp_io_fault_iter_init(const struct lu_env *env,
104 				  const struct cl_io_slice *ios)
105 {
106 	struct vvp_io *vio   = cl2vvp_io(env, ios);
107 	struct inode  *inode = ccc_object_inode(ios->cis_obj);
108 
109 	LASSERT(inode ==
110 		file_inode(cl2ccc_io(env, ios)->cui_fd->fd_file));
111 	vio->u.fault.ft_mtime = inode->i_mtime.tv_sec;
112 	return 0;
113 }
114 
vvp_io_fini(const struct lu_env * env,const struct cl_io_slice * ios)115 static void vvp_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
116 {
117 	struct cl_io     *io  = ios->cis_io;
118 	struct cl_object *obj = io->ci_obj;
119 	struct ccc_io    *cio = cl2ccc_io(env, ios);
120 
121 	CLOBINVRNT(env, obj, ccc_object_invariant(obj));
122 
123 	CDEBUG(D_VFSTRACE, DFID
124 	       " ignore/verify layout %d/%d, layout version %d restore needed %d\n",
125 	       PFID(lu_object_fid(&obj->co_lu)),
126 	       io->ci_ignore_layout, io->ci_verify_layout,
127 	       cio->cui_layout_gen, io->ci_restore_needed);
128 
129 	if (io->ci_restore_needed == 1) {
130 		int	rc;
131 
132 		/* file was detected release, we need to restore it
133 		 * before finishing the io
134 		 */
135 		rc = ll_layout_restore(ccc_object_inode(obj));
136 		/* if restore registration failed, no restart,
137 		 * we will return -ENODATA */
138 		/* The layout will change after restore, so we need to
139 		 * block on layout lock hold by the MDT
140 		 * as MDT will not send new layout in lvb (see LU-3124)
141 		 * we have to explicitly fetch it, all this will be done
142 		 * by ll_layout_refresh()
143 		 */
144 		if (rc == 0) {
145 			io->ci_restore_needed = 0;
146 			io->ci_need_restart = 1;
147 			io->ci_verify_layout = 1;
148 		} else {
149 			io->ci_restore_needed = 1;
150 			io->ci_need_restart = 0;
151 			io->ci_verify_layout = 0;
152 			io->ci_result = rc;
153 		}
154 	}
155 
156 	if (!io->ci_ignore_layout && io->ci_verify_layout) {
157 		__u32 gen = 0;
158 
159 		/* check layout version */
160 		ll_layout_refresh(ccc_object_inode(obj), &gen);
161 		io->ci_need_restart = cio->cui_layout_gen != gen;
162 		if (io->ci_need_restart) {
163 			CDEBUG(D_VFSTRACE,
164 			       DFID" layout changed from %d to %d.\n",
165 			       PFID(lu_object_fid(&obj->co_lu)),
166 			       cio->cui_layout_gen, gen);
167 			/* today successful restore is the only possible
168 			 * case */
169 			/* restore was done, clear restoring state */
170 			ll_i2info(ccc_object_inode(obj))->lli_flags &=
171 				~LLIF_FILE_RESTORING;
172 		}
173 	}
174 }
175 
vvp_io_fault_fini(const struct lu_env * env,const struct cl_io_slice * ios)176 static void vvp_io_fault_fini(const struct lu_env *env,
177 			      const struct cl_io_slice *ios)
178 {
179 	struct cl_io   *io   = ios->cis_io;
180 	struct cl_page *page = io->u.ci_fault.ft_page;
181 
182 	CLOBINVRNT(env, io->ci_obj, ccc_object_invariant(io->ci_obj));
183 
184 	if (page != NULL) {
185 		lu_ref_del(&page->cp_reference, "fault", io);
186 		cl_page_put(env, page);
187 		io->u.ci_fault.ft_page = NULL;
188 	}
189 	vvp_io_fini(env, ios);
190 }
191 
vvp_mode_from_vma(struct vm_area_struct * vma)192 static enum cl_lock_mode vvp_mode_from_vma(struct vm_area_struct *vma)
193 {
194 	/*
195 	 * we only want to hold PW locks if the mmap() can generate
196 	 * writes back to the file and that only happens in shared
197 	 * writable vmas
198 	 */
199 	if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
200 		return CLM_WRITE;
201 	return CLM_READ;
202 }
203 
vvp_mmap_locks(const struct lu_env * env,struct ccc_io * vio,struct cl_io * io)204 static int vvp_mmap_locks(const struct lu_env *env,
205 			  struct ccc_io *vio, struct cl_io *io)
206 {
207 	struct ccc_thread_info *cti = ccc_env_info(env);
208 	struct mm_struct       *mm = current->mm;
209 	struct vm_area_struct  *vma;
210 	struct cl_lock_descr   *descr = &cti->cti_descr;
211 	ldlm_policy_data_t      policy;
212 	unsigned long	   addr;
213 	ssize_t		 count;
214 	int		     result;
215 	struct iov_iter i;
216 	struct iovec iov;
217 
218 	LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
219 
220 	if (!cl_is_normalio(env, io))
221 		return 0;
222 
223 	if (vio->cui_iter == NULL) /* nfs or loop back device write */
224 		return 0;
225 
226 	/* No MM (e.g. NFS)? No vmas too. */
227 	if (mm == NULL)
228 		return 0;
229 
230 	iov_for_each(iov, i, *(vio->cui_iter)) {
231 		addr = (unsigned long)iov.iov_base;
232 		count = iov.iov_len;
233 		if (count == 0)
234 			continue;
235 
236 		count += addr & (~CFS_PAGE_MASK);
237 		addr &= CFS_PAGE_MASK;
238 
239 		down_read(&mm->mmap_sem);
240 		while ((vma = our_vma(mm, addr, count)) != NULL) {
241 			struct inode *inode = file_inode(vma->vm_file);
242 			int flags = CEF_MUST;
243 
244 			if (ll_file_nolock(vma->vm_file)) {
245 				/*
246 				 * For no lock case, a lockless lock will be
247 				 * generated.
248 				 */
249 				flags = CEF_NEVER;
250 			}
251 
252 			/*
253 			 * XXX: Required lock mode can be weakened: CIT_WRITE
254 			 * io only ever reads user level buffer, and CIT_READ
255 			 * only writes on it.
256 			 */
257 			policy_from_vma(&policy, vma, addr, count);
258 			descr->cld_mode = vvp_mode_from_vma(vma);
259 			descr->cld_obj = ll_i2info(inode)->lli_clob;
260 			descr->cld_start = cl_index(descr->cld_obj,
261 						    policy.l_extent.start);
262 			descr->cld_end = cl_index(descr->cld_obj,
263 						  policy.l_extent.end);
264 			descr->cld_enq_flags = flags;
265 			result = cl_io_lock_alloc_add(env, io, descr);
266 
267 			CDEBUG(D_VFSTRACE, "lock: %d: [%lu, %lu]\n",
268 			       descr->cld_mode, descr->cld_start,
269 			       descr->cld_end);
270 
271 			if (result < 0) {
272 				up_read(&mm->mmap_sem);
273 				return result;
274 			}
275 
276 			if (vma->vm_end - addr >= count)
277 				break;
278 
279 			count -= vma->vm_end - addr;
280 			addr = vma->vm_end;
281 		}
282 		up_read(&mm->mmap_sem);
283 	}
284 	return 0;
285 }
286 
vvp_io_rw_lock(const struct lu_env * env,struct cl_io * io,enum cl_lock_mode mode,loff_t start,loff_t end)287 static int vvp_io_rw_lock(const struct lu_env *env, struct cl_io *io,
288 			  enum cl_lock_mode mode, loff_t start, loff_t end)
289 {
290 	struct ccc_io *cio = ccc_env_io(env);
291 	int result;
292 	int ast_flags = 0;
293 
294 	LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
295 
296 	ccc_io_update_iov(env, cio, io);
297 
298 	if (io->u.ci_rw.crw_nonblock)
299 		ast_flags |= CEF_NONBLOCK;
300 	result = vvp_mmap_locks(env, cio, io);
301 	if (result == 0)
302 		result = ccc_io_one_lock(env, io, ast_flags, mode, start, end);
303 	return result;
304 }
305 
vvp_io_read_lock(const struct lu_env * env,const struct cl_io_slice * ios)306 static int vvp_io_read_lock(const struct lu_env *env,
307 			    const struct cl_io_slice *ios)
308 {
309 	struct cl_io	 *io = ios->cis_io;
310 	struct cl_io_rw_common *rd = &io->u.ci_rd.rd;
311 	int result;
312 
313 	result = vvp_io_rw_lock(env, io, CLM_READ, rd->crw_pos,
314 				rd->crw_pos + rd->crw_count - 1);
315 
316 	return result;
317 }
318 
vvp_io_fault_lock(const struct lu_env * env,const struct cl_io_slice * ios)319 static int vvp_io_fault_lock(const struct lu_env *env,
320 			     const struct cl_io_slice *ios)
321 {
322 	struct cl_io *io   = ios->cis_io;
323 	struct vvp_io *vio = cl2vvp_io(env, ios);
324 	/*
325 	 * XXX LDLM_FL_CBPENDING
326 	 */
327 	return ccc_io_one_lock_index
328 		(env, io, 0, vvp_mode_from_vma(vio->u.fault.ft_vma),
329 		 io->u.ci_fault.ft_index, io->u.ci_fault.ft_index);
330 }
331 
vvp_io_write_lock(const struct lu_env * env,const struct cl_io_slice * ios)332 static int vvp_io_write_lock(const struct lu_env *env,
333 			     const struct cl_io_slice *ios)
334 {
335 	struct cl_io *io = ios->cis_io;
336 	loff_t start;
337 	loff_t end;
338 
339 	if (io->u.ci_wr.wr_append) {
340 		start = 0;
341 		end   = OBD_OBJECT_EOF;
342 	} else {
343 		start = io->u.ci_wr.wr.crw_pos;
344 		end   = start + io->u.ci_wr.wr.crw_count - 1;
345 	}
346 	return vvp_io_rw_lock(env, io, CLM_WRITE, start, end);
347 }
348 
vvp_io_setattr_iter_init(const struct lu_env * env,const struct cl_io_slice * ios)349 static int vvp_io_setattr_iter_init(const struct lu_env *env,
350 				    const struct cl_io_slice *ios)
351 {
352 	return 0;
353 }
354 
355 /**
356  * Implementation of cl_io_operations::cio_lock() method for CIT_SETATTR io.
357  *
358  * Handles "lockless io" mode when extent locking is done by server.
359  */
vvp_io_setattr_lock(const struct lu_env * env,const struct cl_io_slice * ios)360 static int vvp_io_setattr_lock(const struct lu_env *env,
361 			       const struct cl_io_slice *ios)
362 {
363 	struct ccc_io *cio = ccc_env_io(env);
364 	struct cl_io  *io  = ios->cis_io;
365 	__u64 new_size;
366 	__u32 enqflags = 0;
367 
368 	if (cl_io_is_trunc(io)) {
369 		new_size = io->u.ci_setattr.sa_attr.lvb_size;
370 		if (new_size == 0)
371 			enqflags = CEF_DISCARD_DATA;
372 	} else {
373 		if ((io->u.ci_setattr.sa_attr.lvb_mtime >=
374 		     io->u.ci_setattr.sa_attr.lvb_ctime) ||
375 		    (io->u.ci_setattr.sa_attr.lvb_atime >=
376 		     io->u.ci_setattr.sa_attr.lvb_ctime))
377 			return 0;
378 		new_size = 0;
379 	}
380 	cio->u.setattr.cui_local_lock = SETATTR_EXTENT_LOCK;
381 	return ccc_io_one_lock(env, io, enqflags, CLM_WRITE,
382 			       new_size, OBD_OBJECT_EOF);
383 }
384 
vvp_do_vmtruncate(struct inode * inode,size_t size)385 static int vvp_do_vmtruncate(struct inode *inode, size_t size)
386 {
387 	int     result;
388 	/*
389 	 * Only ll_inode_size_lock is taken at this level.
390 	 */
391 	ll_inode_size_lock(inode);
392 	result = inode_newsize_ok(inode, size);
393 	if (result < 0) {
394 		ll_inode_size_unlock(inode);
395 		return result;
396 	}
397 	truncate_setsize(inode, size);
398 	ll_inode_size_unlock(inode);
399 	return result;
400 }
401 
vvp_io_setattr_trunc(const struct lu_env * env,const struct cl_io_slice * ios,struct inode * inode,loff_t size)402 static int vvp_io_setattr_trunc(const struct lu_env *env,
403 				const struct cl_io_slice *ios,
404 				struct inode *inode, loff_t size)
405 {
406 	inode_dio_wait(inode);
407 	return 0;
408 }
409 
vvp_io_setattr_time(const struct lu_env * env,const struct cl_io_slice * ios)410 static int vvp_io_setattr_time(const struct lu_env *env,
411 			       const struct cl_io_slice *ios)
412 {
413 	struct cl_io       *io    = ios->cis_io;
414 	struct cl_object   *obj   = io->ci_obj;
415 	struct cl_attr     *attr  = ccc_env_thread_attr(env);
416 	int result;
417 	unsigned valid = CAT_CTIME;
418 
419 	cl_object_attr_lock(obj);
420 	attr->cat_ctime = io->u.ci_setattr.sa_attr.lvb_ctime;
421 	if (io->u.ci_setattr.sa_valid & ATTR_ATIME_SET) {
422 		attr->cat_atime = io->u.ci_setattr.sa_attr.lvb_atime;
423 		valid |= CAT_ATIME;
424 	}
425 	if (io->u.ci_setattr.sa_valid & ATTR_MTIME_SET) {
426 		attr->cat_mtime = io->u.ci_setattr.sa_attr.lvb_mtime;
427 		valid |= CAT_MTIME;
428 	}
429 	result = cl_object_attr_set(env, obj, attr, valid);
430 	cl_object_attr_unlock(obj);
431 
432 	return result;
433 }
434 
vvp_io_setattr_start(const struct lu_env * env,const struct cl_io_slice * ios)435 static int vvp_io_setattr_start(const struct lu_env *env,
436 				const struct cl_io_slice *ios)
437 {
438 	struct cl_io	*io    = ios->cis_io;
439 	struct inode	*inode = ccc_object_inode(io->ci_obj);
440 	int result = 0;
441 
442 	mutex_lock(&inode->i_mutex);
443 	if (cl_io_is_trunc(io))
444 		result = vvp_io_setattr_trunc(env, ios, inode,
445 					io->u.ci_setattr.sa_attr.lvb_size);
446 	if (result == 0)
447 		result = vvp_io_setattr_time(env, ios);
448 	return result;
449 }
450 
vvp_io_setattr_end(const struct lu_env * env,const struct cl_io_slice * ios)451 static void vvp_io_setattr_end(const struct lu_env *env,
452 			       const struct cl_io_slice *ios)
453 {
454 	struct cl_io *io    = ios->cis_io;
455 	struct inode *inode = ccc_object_inode(io->ci_obj);
456 
457 	if (cl_io_is_trunc(io))
458 		/* Truncate in memory pages - they must be clean pages
459 		 * because osc has already notified to destroy osc_extents. */
460 		vvp_do_vmtruncate(inode, io->u.ci_setattr.sa_attr.lvb_size);
461 
462 	mutex_unlock(&inode->i_mutex);
463 }
464 
vvp_io_setattr_fini(const struct lu_env * env,const struct cl_io_slice * ios)465 static void vvp_io_setattr_fini(const struct lu_env *env,
466 				const struct cl_io_slice *ios)
467 {
468 	vvp_io_fini(env, ios);
469 }
470 
vvp_io_read_start(const struct lu_env * env,const struct cl_io_slice * ios)471 static int vvp_io_read_start(const struct lu_env *env,
472 			     const struct cl_io_slice *ios)
473 {
474 	struct vvp_io     *vio   = cl2vvp_io(env, ios);
475 	struct ccc_io     *cio   = cl2ccc_io(env, ios);
476 	struct cl_io      *io    = ios->cis_io;
477 	struct cl_object  *obj   = io->ci_obj;
478 	struct inode      *inode = ccc_object_inode(obj);
479 	struct ll_ra_read *bead  = &vio->cui_bead;
480 	struct file       *file  = cio->cui_fd->fd_file;
481 
482 	int     result;
483 	loff_t  pos = io->u.ci_rd.rd.crw_pos;
484 	long    cnt = io->u.ci_rd.rd.crw_count;
485 	long    tot = cio->cui_tot_count;
486 	int     exceed = 0;
487 
488 	CLOBINVRNT(env, obj, ccc_object_invariant(obj));
489 
490 	CDEBUG(D_VFSTRACE, "read: -> [%lli, %lli)\n", pos, pos + cnt);
491 
492 	if (!can_populate_pages(env, io, inode))
493 		return 0;
494 
495 	result = ccc_prep_size(env, obj, io, pos, tot, &exceed);
496 	if (result != 0)
497 		return result;
498 	else if (exceed != 0)
499 		goto out;
500 
501 	LU_OBJECT_HEADER(D_INODE, env, &obj->co_lu,
502 			"Read ino %lu, %lu bytes, offset %lld, size %llu\n",
503 			inode->i_ino, cnt, pos, i_size_read(inode));
504 
505 	/* turn off the kernel's read-ahead */
506 	cio->cui_fd->fd_file->f_ra.ra_pages = 0;
507 
508 	/* initialize read-ahead window once per syscall */
509 	if (!vio->cui_ra_window_set) {
510 		vio->cui_ra_window_set = 1;
511 		bead->lrr_start = cl_index(obj, pos);
512 		/*
513 		 * XXX: explicit PAGE_CACHE_SIZE
514 		 */
515 		bead->lrr_count = cl_index(obj, tot + PAGE_CACHE_SIZE - 1);
516 		ll_ra_read_in(file, bead);
517 	}
518 
519 	/* BUG: 5972 */
520 	file_accessed(file);
521 	switch (vio->cui_io_subtype) {
522 	case IO_NORMAL:
523 		LASSERT(cio->cui_iocb->ki_pos == pos);
524 		result = generic_file_read_iter(cio->cui_iocb, cio->cui_iter);
525 		break;
526 	case IO_SPLICE:
527 		result = generic_file_splice_read(file, &pos,
528 				vio->u.splice.cui_pipe, cnt,
529 				vio->u.splice.cui_flags);
530 		/* LU-1109: do splice read stripe by stripe otherwise if it
531 		 * may make nfsd stuck if this read occupied all internal pipe
532 		 * buffers. */
533 		io->ci_continue = 0;
534 		break;
535 	default:
536 		CERROR("Wrong IO type %u\n", vio->cui_io_subtype);
537 		LBUG();
538 	}
539 
540 out:
541 	if (result >= 0) {
542 		if (result < cnt)
543 			io->ci_continue = 0;
544 		io->ci_nob += result;
545 		ll_rw_stats_tally(ll_i2sbi(inode), current->pid,
546 				  cio->cui_fd, pos, result, READ);
547 		result = 0;
548 	}
549 	return result;
550 }
551 
vvp_io_read_fini(const struct lu_env * env,const struct cl_io_slice * ios)552 static void vvp_io_read_fini(const struct lu_env *env, const struct cl_io_slice *ios)
553 {
554 	struct vvp_io *vio = cl2vvp_io(env, ios);
555 	struct ccc_io *cio = cl2ccc_io(env, ios);
556 
557 	if (vio->cui_ra_window_set)
558 		ll_ra_read_ex(cio->cui_fd->fd_file, &vio->cui_bead);
559 
560 	vvp_io_fini(env, ios);
561 }
562 
vvp_io_write_start(const struct lu_env * env,const struct cl_io_slice * ios)563 static int vvp_io_write_start(const struct lu_env *env,
564 			      const struct cl_io_slice *ios)
565 {
566 	struct ccc_io      *cio   = cl2ccc_io(env, ios);
567 	struct cl_io       *io    = ios->cis_io;
568 	struct cl_object   *obj   = io->ci_obj;
569 	struct inode       *inode = ccc_object_inode(obj);
570 	ssize_t result = 0;
571 	loff_t pos = io->u.ci_wr.wr.crw_pos;
572 	size_t cnt = io->u.ci_wr.wr.crw_count;
573 
574 	if (!can_populate_pages(env, io, inode))
575 		return 0;
576 
577 	if (cl_io_is_append(io)) {
578 		/*
579 		 * PARALLEL IO This has to be changed for parallel IO doing
580 		 * out-of-order writes.
581 		 */
582 		pos = io->u.ci_wr.wr.crw_pos = i_size_read(inode);
583 		cio->cui_iocb->ki_pos = pos;
584 	} else {
585 		LASSERT(cio->cui_iocb->ki_pos == pos);
586 	}
587 
588 	CDEBUG(D_VFSTRACE, "write: [%lli, %lli)\n", pos, pos + (long long)cnt);
589 
590 	if (cio->cui_iter == NULL) /* from a temp io in ll_cl_init(). */
591 		result = 0;
592 	else
593 		result = generic_file_write_iter(cio->cui_iocb, cio->cui_iter);
594 
595 	if (result > 0) {
596 		if (result < cnt)
597 			io->ci_continue = 0;
598 		io->ci_nob += result;
599 		ll_rw_stats_tally(ll_i2sbi(inode), current->pid,
600 				  cio->cui_fd, pos, result, WRITE);
601 		result = 0;
602 	}
603 	return result;
604 }
605 
vvp_io_kernel_fault(struct vvp_fault_io * cfio)606 static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
607 {
608 	struct vm_fault *vmf = cfio->fault.ft_vmf;
609 
610 	cfio->fault.ft_flags = filemap_fault(cfio->ft_vma, vmf);
611 	cfio->fault.ft_flags_valid = 1;
612 
613 	if (vmf->page) {
614 		CDEBUG(D_PAGE,
615 		       "page %p map %p index %lu flags %lx count %u priv %0lx: got addr %p type NOPAGE\n",
616 		       vmf->page, vmf->page->mapping, vmf->page->index,
617 		       (long)vmf->page->flags, page_count(vmf->page),
618 		       page_private(vmf->page), vmf->virtual_address);
619 		if (unlikely(!(cfio->fault.ft_flags & VM_FAULT_LOCKED))) {
620 			lock_page(vmf->page);
621 			cfio->fault.ft_flags |= VM_FAULT_LOCKED;
622 		}
623 
624 		cfio->ft_vmpage = vmf->page;
625 		return 0;
626 	}
627 
628 	if (cfio->fault.ft_flags & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
629 		CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", vmf->virtual_address);
630 		return -EFAULT;
631 	}
632 
633 	if (cfio->fault.ft_flags & VM_FAULT_OOM) {
634 		CDEBUG(D_PAGE, "got addr %p - OOM\n", vmf->virtual_address);
635 		return -ENOMEM;
636 	}
637 
638 	if (cfio->fault.ft_flags & VM_FAULT_RETRY)
639 		return -EAGAIN;
640 
641 	CERROR("Unknown error in page fault %d!\n", cfio->fault.ft_flags);
642 	return -EINVAL;
643 }
644 
vvp_io_fault_start(const struct lu_env * env,const struct cl_io_slice * ios)645 static int vvp_io_fault_start(const struct lu_env *env,
646 			      const struct cl_io_slice *ios)
647 {
648 	struct vvp_io       *vio     = cl2vvp_io(env, ios);
649 	struct cl_io	*io      = ios->cis_io;
650 	struct cl_object    *obj     = io->ci_obj;
651 	struct inode	*inode   = ccc_object_inode(obj);
652 	struct cl_fault_io  *fio     = &io->u.ci_fault;
653 	struct vvp_fault_io *cfio    = &vio->u.fault;
654 	loff_t	       offset;
655 	int		  result  = 0;
656 	struct page	  *vmpage  = NULL;
657 	struct cl_page      *page;
658 	loff_t	       size;
659 	pgoff_t	      last; /* last page in a file data region */
660 
661 	if (fio->ft_executable &&
662 	    inode->i_mtime.tv_sec != vio->u.fault.ft_mtime)
663 		CWARN("binary "DFID
664 		      " changed while waiting for the page fault lock\n",
665 		      PFID(lu_object_fid(&obj->co_lu)));
666 
667 	/* offset of the last byte on the page */
668 	offset = cl_offset(obj, fio->ft_index + 1) - 1;
669 	LASSERT(cl_index(obj, offset) == fio->ft_index);
670 	result = ccc_prep_size(env, obj, io, 0, offset + 1, NULL);
671 	if (result != 0)
672 		return result;
673 
674 	/* must return locked page */
675 	if (fio->ft_mkwrite) {
676 		LASSERT(cfio->ft_vmpage != NULL);
677 		lock_page(cfio->ft_vmpage);
678 	} else {
679 		result = vvp_io_kernel_fault(cfio);
680 		if (result != 0)
681 			return result;
682 	}
683 
684 	vmpage = cfio->ft_vmpage;
685 	LASSERT(PageLocked(vmpage));
686 
687 	if (OBD_FAIL_CHECK(OBD_FAIL_LLITE_FAULT_TRUNC_RACE))
688 		ll_invalidate_page(vmpage);
689 
690 	size = i_size_read(inode);
691 	/* Though we have already held a cl_lock upon this page, but
692 	 * it still can be truncated locally. */
693 	if (unlikely((vmpage->mapping != inode->i_mapping) ||
694 		     (page_offset(vmpage) > size))) {
695 		CDEBUG(D_PAGE, "llite: fault and truncate race happened!\n");
696 
697 		/* return +1 to stop cl_io_loop() and ll_fault() will catch
698 		 * and retry. */
699 		result = 1;
700 		goto out;
701 	}
702 
703 	if (fio->ft_mkwrite) {
704 		pgoff_t last_index;
705 		/*
706 		 * Capture the size while holding the lli_trunc_sem from above
707 		 * we want to make sure that we complete the mkwrite action
708 		 * while holding this lock. We need to make sure that we are
709 		 * not past the end of the file.
710 		 */
711 		last_index = cl_index(obj, size - 1);
712 		if (last_index < fio->ft_index) {
713 			CDEBUG(D_PAGE,
714 			       "llite: mkwrite and truncate race happened: %p: 0x%lx 0x%lx\n",
715 			       vmpage->mapping, fio->ft_index, last_index);
716 			/*
717 			 * We need to return if we are
718 			 * passed the end of the file. This will propagate
719 			 * up the call stack to ll_page_mkwrite where
720 			 * we will return VM_FAULT_NOPAGE. Any non-negative
721 			 * value returned here will be silently
722 			 * converted to 0. If the vmpage->mapping is null
723 			 * the error code would be converted back to ENODATA
724 			 * in ll_page_mkwrite0. Thus we return -ENODATA
725 			 * to handle both cases
726 			 */
727 			result = -ENODATA;
728 			goto out;
729 		}
730 	}
731 
732 	page = cl_page_find(env, obj, fio->ft_index, vmpage, CPT_CACHEABLE);
733 	if (IS_ERR(page)) {
734 		result = PTR_ERR(page);
735 		goto out;
736 	}
737 
738 	/* if page is going to be written, we should add this page into cache
739 	 * earlier. */
740 	if (fio->ft_mkwrite) {
741 		wait_on_page_writeback(vmpage);
742 		if (set_page_dirty(vmpage)) {
743 			struct ccc_page *cp;
744 
745 			/* vvp_page_assume() calls wait_on_page_writeback(). */
746 			cl_page_assume(env, io, page);
747 
748 			cp = cl2ccc_page(cl_page_at(page, &vvp_device_type));
749 			vvp_write_pending(cl2ccc(obj), cp);
750 
751 			/* Do not set Dirty bit here so that in case IO is
752 			 * started before the page is really made dirty, we
753 			 * still have chance to detect it. */
754 			result = cl_page_cache_add(env, io, page, CRT_WRITE);
755 			LASSERT(cl_page_is_owned(page, io));
756 
757 			vmpage = NULL;
758 			if (result < 0) {
759 				cl_page_unmap(env, io, page);
760 				cl_page_discard(env, io, page);
761 				cl_page_disown(env, io, page);
762 
763 				cl_page_put(env, page);
764 
765 				/* we're in big trouble, what can we do now? */
766 				if (result == -EDQUOT)
767 					result = -ENOSPC;
768 				goto out;
769 			} else
770 				cl_page_disown(env, io, page);
771 		}
772 	}
773 
774 	last = cl_index(obj, size - 1);
775 	/*
776 	 * The ft_index is only used in the case of
777 	 * a mkwrite action. We need to check
778 	 * our assertions are correct, since
779 	 * we should have caught this above
780 	 */
781 	LASSERT(!fio->ft_mkwrite || fio->ft_index <= last);
782 	if (fio->ft_index == last)
783 		/*
784 		 * Last page is mapped partially.
785 		 */
786 		fio->ft_nob = size - cl_offset(obj, fio->ft_index);
787 	else
788 		fio->ft_nob = cl_page_size(obj);
789 
790 	lu_ref_add(&page->cp_reference, "fault", io);
791 	fio->ft_page = page;
792 
793 out:
794 	/* return unlocked vmpage to avoid deadlocking */
795 	if (vmpage != NULL)
796 		unlock_page(vmpage);
797 	cfio->fault.ft_flags &= ~VM_FAULT_LOCKED;
798 	return result;
799 }
800 
vvp_io_fsync_start(const struct lu_env * env,const struct cl_io_slice * ios)801 static int vvp_io_fsync_start(const struct lu_env *env,
802 			      const struct cl_io_slice *ios)
803 {
804 	/* we should mark TOWRITE bit to each dirty page in radix tree to
805 	 * verify pages have been written, but this is difficult because of
806 	 * race. */
807 	return 0;
808 }
809 
vvp_io_read_page(const struct lu_env * env,const struct cl_io_slice * ios,const struct cl_page_slice * slice)810 static int vvp_io_read_page(const struct lu_env *env,
811 			    const struct cl_io_slice *ios,
812 			    const struct cl_page_slice *slice)
813 {
814 	struct cl_io	      *io     = ios->cis_io;
815 	struct cl_object	  *obj    = slice->cpl_obj;
816 	struct ccc_page	   *cp     = cl2ccc_page(slice);
817 	struct cl_page	    *page   = slice->cpl_page;
818 	struct inode	      *inode  = ccc_object_inode(obj);
819 	struct ll_sb_info	 *sbi    = ll_i2sbi(inode);
820 	struct ll_file_data       *fd     = cl2ccc_io(env, ios)->cui_fd;
821 	struct ll_readahead_state *ras    = &fd->fd_ras;
822 	struct page		*vmpage = cp->cpg_page;
823 	struct cl_2queue	  *queue  = &io->ci_queue;
824 	int rc;
825 
826 	CLOBINVRNT(env, obj, ccc_object_invariant(obj));
827 	LASSERT(slice->cpl_obj == obj);
828 
829 	if (sbi->ll_ra_info.ra_max_pages_per_file &&
830 	    sbi->ll_ra_info.ra_max_pages)
831 		ras_update(sbi, inode, ras, page->cp_index,
832 			   cp->cpg_defer_uptodate);
833 
834 	/* Sanity check whether the page is protected by a lock. */
835 	rc = cl_page_is_under_lock(env, io, page);
836 	if (rc != -EBUSY) {
837 		CL_PAGE_HEADER(D_WARNING, env, page, "%s: %d\n",
838 			       rc == -ENODATA ? "without a lock" :
839 			       "match failed", rc);
840 		if (rc != -ENODATA)
841 			return rc;
842 	}
843 
844 	if (cp->cpg_defer_uptodate) {
845 		cp->cpg_ra_used = 1;
846 		cl_page_export(env, page, 1);
847 	}
848 	/*
849 	 * Add page into the queue even when it is marked uptodate above.
850 	 * this will unlock it automatically as part of cl_page_list_disown().
851 	 */
852 	cl_2queue_add(queue, page);
853 	if (sbi->ll_ra_info.ra_max_pages_per_file &&
854 	    sbi->ll_ra_info.ra_max_pages)
855 		ll_readahead(env, io, ras,
856 			     vmpage->mapping, &queue->c2_qin, fd->fd_flags);
857 
858 	return 0;
859 }
860 
vvp_page_sync_io(const struct lu_env * env,struct cl_io * io,struct cl_page * page,struct ccc_page * cp,enum cl_req_type crt)861 static int vvp_page_sync_io(const struct lu_env *env, struct cl_io *io,
862 			    struct cl_page *page, struct ccc_page *cp,
863 			    enum cl_req_type crt)
864 {
865 	struct cl_2queue  *queue;
866 	int result;
867 
868 	LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
869 
870 	queue = &io->ci_queue;
871 	cl_2queue_init_page(queue, page);
872 
873 	result = cl_io_submit_sync(env, io, crt, queue, 0);
874 	LASSERT(cl_page_is_owned(page, io));
875 
876 	if (crt == CRT_READ)
877 		/*
878 		 * in CRT_WRITE case page is left locked even in case of
879 		 * error.
880 		 */
881 		cl_page_list_disown(env, io, &queue->c2_qin);
882 	cl_2queue_fini(env, queue);
883 
884 	return result;
885 }
886 
887 /**
888  * Prepare partially written-to page for a write.
889  */
vvp_io_prepare_partial(const struct lu_env * env,struct cl_io * io,struct cl_object * obj,struct cl_page * pg,struct ccc_page * cp,unsigned from,unsigned to)890 static int vvp_io_prepare_partial(const struct lu_env *env, struct cl_io *io,
891 				  struct cl_object *obj, struct cl_page *pg,
892 				  struct ccc_page *cp,
893 				  unsigned from, unsigned to)
894 {
895 	struct cl_attr *attr   = ccc_env_thread_attr(env);
896 	loff_t	  offset = cl_offset(obj, pg->cp_index);
897 	int	     result;
898 
899 	cl_object_attr_lock(obj);
900 	result = cl_object_attr_get(env, obj, attr);
901 	cl_object_attr_unlock(obj);
902 	if (result == 0) {
903 		/*
904 		 * If are writing to a new page, no need to read old data.
905 		 * The extent locking will have updated the KMS, and for our
906 		 * purposes here we can treat it like i_size.
907 		 */
908 		if (attr->cat_kms <= offset) {
909 			char *kaddr = kmap_atomic(cp->cpg_page);
910 
911 			memset(kaddr, 0, cl_page_size(obj));
912 			kunmap_atomic(kaddr);
913 		} else if (cp->cpg_defer_uptodate)
914 			cp->cpg_ra_used = 1;
915 		else
916 			result = vvp_page_sync_io(env, io, pg, cp, CRT_READ);
917 		/*
918 		 * In older implementations, obdo_refresh_inode is called here
919 		 * to update the inode because the write might modify the
920 		 * object info at OST. However, this has been proven useless,
921 		 * since LVB functions will be called when user space program
922 		 * tries to retrieve inode attribute.  Also, see bug 15909 for
923 		 * details. -jay
924 		 */
925 		if (result == 0)
926 			cl_page_export(env, pg, 1);
927 	}
928 	return result;
929 }
930 
vvp_io_prepare_write(const struct lu_env * env,const struct cl_io_slice * ios,const struct cl_page_slice * slice,unsigned from,unsigned to)931 static int vvp_io_prepare_write(const struct lu_env *env,
932 				const struct cl_io_slice *ios,
933 				const struct cl_page_slice *slice,
934 				unsigned from, unsigned to)
935 {
936 	struct cl_object *obj    = slice->cpl_obj;
937 	struct ccc_page  *cp     = cl2ccc_page(slice);
938 	struct cl_page   *pg     = slice->cpl_page;
939 	struct page       *vmpage = cp->cpg_page;
940 
941 	int result;
942 
943 	LINVRNT(cl_page_is_vmlocked(env, pg));
944 	LASSERT(vmpage->mapping->host == ccc_object_inode(obj));
945 
946 	result = 0;
947 
948 	CL_PAGE_HEADER(D_PAGE, env, pg, "preparing: [%d, %d]\n", from, to);
949 	if (!PageUptodate(vmpage)) {
950 		/*
951 		 * We're completely overwriting an existing page, so _don't_
952 		 * set it up to date until commit_write
953 		 */
954 		if (from == 0 && to == PAGE_CACHE_SIZE) {
955 			CL_PAGE_HEADER(D_PAGE, env, pg, "full page write\n");
956 			POISON_PAGE(page, 0x11);
957 		} else
958 			result = vvp_io_prepare_partial(env, ios->cis_io, obj,
959 							pg, cp, from, to);
960 	} else
961 		CL_PAGE_HEADER(D_PAGE, env, pg, "uptodate\n");
962 	return result;
963 }
964 
vvp_io_commit_write(const struct lu_env * env,const struct cl_io_slice * ios,const struct cl_page_slice * slice,unsigned from,unsigned to)965 static int vvp_io_commit_write(const struct lu_env *env,
966 			       const struct cl_io_slice *ios,
967 			       const struct cl_page_slice *slice,
968 			       unsigned from, unsigned to)
969 {
970 	struct cl_object  *obj    = slice->cpl_obj;
971 	struct cl_io      *io     = ios->cis_io;
972 	struct ccc_page   *cp     = cl2ccc_page(slice);
973 	struct cl_page    *pg     = slice->cpl_page;
974 	struct inode      *inode  = ccc_object_inode(obj);
975 	struct ll_sb_info *sbi    = ll_i2sbi(inode);
976 	struct ll_inode_info *lli = ll_i2info(inode);
977 	struct page	*vmpage = cp->cpg_page;
978 
979 	int    result;
980 	int    tallyop;
981 	loff_t size;
982 
983 	LINVRNT(cl_page_is_vmlocked(env, pg));
984 	LASSERT(vmpage->mapping->host == inode);
985 
986 	LU_OBJECT_HEADER(D_INODE, env, &obj->co_lu, "committing page write\n");
987 	CL_PAGE_HEADER(D_PAGE, env, pg, "committing: [%d, %d]\n", from, to);
988 
989 	/*
990 	 * queue a write for some time in the future the first time we
991 	 * dirty the page.
992 	 *
993 	 * This is different from what other file systems do: they usually
994 	 * just mark page (and some of its buffers) dirty and rely on
995 	 * balance_dirty_pages() to start a write-back. Lustre wants write-back
996 	 * to be started earlier for the following reasons:
997 	 *
998 	 *     (1) with a large number of clients we need to limit the amount
999 	 *     of cached data on the clients a lot;
1000 	 *
1001 	 *     (2) large compute jobs generally want compute-only then io-only
1002 	 *     and the IO should complete as quickly as possible;
1003 	 *
1004 	 *     (3) IO is batched up to the RPC size and is async until the
1005 	 *     client max cache is hit
1006 	 *     (/proc/fs/lustre/osc/OSC.../max_dirty_mb)
1007 	 *
1008 	 */
1009 	if (!PageDirty(vmpage)) {
1010 		tallyop = LPROC_LL_DIRTY_MISSES;
1011 		result = cl_page_cache_add(env, io, pg, CRT_WRITE);
1012 		if (result == 0) {
1013 			/* page was added into cache successfully. */
1014 			set_page_dirty(vmpage);
1015 			vvp_write_pending(cl2ccc(obj), cp);
1016 		} else if (result == -EDQUOT) {
1017 			pgoff_t last_index = i_size_read(inode) >> PAGE_CACHE_SHIFT;
1018 			bool need_clip = true;
1019 
1020 			/*
1021 			 * Client ran out of disk space grant. Possible
1022 			 * strategies are:
1023 			 *
1024 			 *     (a) do a sync write, renewing grant;
1025 			 *
1026 			 *     (b) stop writing on this stripe, switch to the
1027 			 *     next one.
1028 			 *
1029 			 * (b) is a part of "parallel io" design that is the
1030 			 * ultimate goal. (a) is what "old" client did, and
1031 			 * what the new code continues to do for the time
1032 			 * being.
1033 			 */
1034 			if (last_index > pg->cp_index) {
1035 				to = PAGE_CACHE_SIZE;
1036 				need_clip = false;
1037 			} else if (last_index == pg->cp_index) {
1038 				int size_to = i_size_read(inode) & ~CFS_PAGE_MASK;
1039 
1040 				if (to < size_to)
1041 					to = size_to;
1042 			}
1043 			if (need_clip)
1044 				cl_page_clip(env, pg, 0, to);
1045 			result = vvp_page_sync_io(env, io, pg, cp, CRT_WRITE);
1046 			if (result)
1047 				CERROR("Write page %lu of inode %p failed %d\n",
1048 				       pg->cp_index, inode, result);
1049 		}
1050 	} else {
1051 		tallyop = LPROC_LL_DIRTY_HITS;
1052 		result = 0;
1053 	}
1054 	ll_stats_ops_tally(sbi, tallyop, 1);
1055 
1056 	/* Inode should be marked DIRTY even if no new page was marked DIRTY
1057 	 * because page could have been not flushed between 2 modifications.
1058 	 * It is important the file is marked DIRTY as soon as the I/O is done
1059 	 * Indeed, when cache is flushed, file could be already closed and it
1060 	 * is too late to warn the MDT.
1061 	 * It is acceptable that file is marked DIRTY even if I/O is dropped
1062 	 * for some reasons before being flushed to OST.
1063 	 */
1064 	if (result == 0) {
1065 		spin_lock(&lli->lli_lock);
1066 		lli->lli_flags |= LLIF_DATA_MODIFIED;
1067 		spin_unlock(&lli->lli_lock);
1068 	}
1069 
1070 	size = cl_offset(obj, pg->cp_index) + to;
1071 
1072 	ll_inode_size_lock(inode);
1073 	if (result == 0) {
1074 		if (size > i_size_read(inode)) {
1075 			cl_isize_write_nolock(inode, size);
1076 			CDEBUG(D_VFSTRACE, DFID" updating i_size %lu\n",
1077 			       PFID(lu_object_fid(&obj->co_lu)),
1078 			       (unsigned long)size);
1079 		}
1080 		cl_page_export(env, pg, 1);
1081 	} else {
1082 		if (size > i_size_read(inode))
1083 			cl_page_discard(env, io, pg);
1084 	}
1085 	ll_inode_size_unlock(inode);
1086 	return result;
1087 }
1088 
1089 static const struct cl_io_operations vvp_io_ops = {
1090 	.op = {
1091 		[CIT_READ] = {
1092 			.cio_fini      = vvp_io_read_fini,
1093 			.cio_lock      = vvp_io_read_lock,
1094 			.cio_start     = vvp_io_read_start,
1095 			.cio_advance   = ccc_io_advance
1096 		},
1097 		[CIT_WRITE] = {
1098 			.cio_fini      = vvp_io_fini,
1099 			.cio_lock      = vvp_io_write_lock,
1100 			.cio_start     = vvp_io_write_start,
1101 			.cio_advance   = ccc_io_advance
1102 		},
1103 		[CIT_SETATTR] = {
1104 			.cio_fini       = vvp_io_setattr_fini,
1105 			.cio_iter_init  = vvp_io_setattr_iter_init,
1106 			.cio_lock       = vvp_io_setattr_lock,
1107 			.cio_start      = vvp_io_setattr_start,
1108 			.cio_end	= vvp_io_setattr_end
1109 		},
1110 		[CIT_FAULT] = {
1111 			.cio_fini      = vvp_io_fault_fini,
1112 			.cio_iter_init = vvp_io_fault_iter_init,
1113 			.cio_lock      = vvp_io_fault_lock,
1114 			.cio_start     = vvp_io_fault_start,
1115 			.cio_end       = ccc_io_end
1116 		},
1117 		[CIT_FSYNC] = {
1118 			.cio_start  = vvp_io_fsync_start,
1119 			.cio_fini   = vvp_io_fini
1120 		},
1121 		[CIT_MISC] = {
1122 			.cio_fini   = vvp_io_fini
1123 		}
1124 	},
1125 	.cio_read_page     = vvp_io_read_page,
1126 	.cio_prepare_write = vvp_io_prepare_write,
1127 	.cio_commit_write  = vvp_io_commit_write
1128 };
1129 
vvp_io_init(const struct lu_env * env,struct cl_object * obj,struct cl_io * io)1130 int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
1131 		struct cl_io *io)
1132 {
1133 	struct vvp_io      *vio   = vvp_env_io(env);
1134 	struct ccc_io      *cio   = ccc_env_io(env);
1135 	struct inode       *inode = ccc_object_inode(obj);
1136 	int		 result;
1137 
1138 	CLOBINVRNT(env, obj, ccc_object_invariant(obj));
1139 
1140 	CDEBUG(D_VFSTRACE, DFID
1141 	       " ignore/verify layout %d/%d, layout version %d restore needed %d\n",
1142 	       PFID(lu_object_fid(&obj->co_lu)),
1143 	       io->ci_ignore_layout, io->ci_verify_layout,
1144 	       cio->cui_layout_gen, io->ci_restore_needed);
1145 
1146 	CL_IO_SLICE_CLEAN(cio, cui_cl);
1147 	cl_io_slice_add(io, &cio->cui_cl, obj, &vvp_io_ops);
1148 	vio->cui_ra_window_set = 0;
1149 	result = 0;
1150 	if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE) {
1151 		size_t count;
1152 		struct ll_inode_info *lli = ll_i2info(inode);
1153 
1154 		count = io->u.ci_rw.crw_count;
1155 		/* "If nbyte is 0, read() will return 0 and have no other
1156 		 *  results."  -- Single Unix Spec */
1157 		if (count == 0)
1158 			result = 1;
1159 		else
1160 			cio->cui_tot_count = count;
1161 
1162 		/* for read/write, we store the jobid in the inode, and
1163 		 * it'll be fetched by osc when building RPC.
1164 		 *
1165 		 * it's not accurate if the file is shared by different
1166 		 * jobs.
1167 		 */
1168 		lustre_get_jobid(lli->lli_jobid);
1169 	} else if (io->ci_type == CIT_SETATTR) {
1170 		if (!cl_io_is_trunc(io))
1171 			io->ci_lockreq = CILR_MANDATORY;
1172 	}
1173 
1174 	/* ignore layout change for generic CIT_MISC but not for glimpse.
1175 	 * io context for glimpse must set ci_verify_layout to true,
1176 	 * see cl_glimpse_size0() for details. */
1177 	if (io->ci_type == CIT_MISC && !io->ci_verify_layout)
1178 		io->ci_ignore_layout = 1;
1179 
1180 	/* Enqueue layout lock and get layout version. We need to do this
1181 	 * even for operations requiring to open file, such as read and write,
1182 	 * because it might not grant layout lock in IT_OPEN. */
1183 	if (result == 0 && !io->ci_ignore_layout) {
1184 		result = ll_layout_refresh(inode, &cio->cui_layout_gen);
1185 		if (result == -ENOENT)
1186 			/* If the inode on MDS has been removed, but the objects
1187 			 * on OSTs haven't been destroyed (async unlink), layout
1188 			 * fetch will return -ENOENT, we'd ignore this error
1189 			 * and continue with dirty flush. LU-3230. */
1190 			result = 0;
1191 		if (result < 0)
1192 			CERROR("%s: refresh file layout " DFID " error %d.\n",
1193 				ll_get_fsname(inode->i_sb, NULL, 0),
1194 				PFID(lu_object_fid(&obj->co_lu)), result);
1195 	}
1196 
1197 	return result;
1198 }
1199 
cl2vvp_io(const struct lu_env * env,const struct cl_io_slice * slice)1200 static struct vvp_io *cl2vvp_io(const struct lu_env *env,
1201 				const struct cl_io_slice *slice)
1202 {
1203 	/* Calling just for assertion */
1204 	cl2ccc_io(env, slice);
1205 	return vvp_env_io(env);
1206 }
1207