1 /*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26 /*
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2011, 2012, Intel Corporation.
31 */
32 /*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * Client IO.
37 *
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
39 */
40
41 #define DEBUG_SUBSYSTEM S_CLASS
42
43 #include "../include/obd_class.h"
44 #include "../include/obd_support.h"
45 #include "../include/lustre_fid.h"
46 #include <linux/list.h>
47 #include "../include/cl_object.h"
48 #include "cl_internal.h"
49
50 /*****************************************************************************
51 *
52 * cl_io interface.
53 *
54 */
55
56 #define cl_io_for_each(slice, io) \
57 list_for_each_entry((slice), &io->ci_layers, cis_linkage)
58 #define cl_io_for_each_reverse(slice, io) \
59 list_for_each_entry_reverse((slice), &io->ci_layers, cis_linkage)
60
cl_io_type_is_valid(enum cl_io_type type)61 static inline int cl_io_type_is_valid(enum cl_io_type type)
62 {
63 return CIT_READ <= type && type < CIT_OP_NR;
64 }
65
cl_io_is_loopable(const struct cl_io * io)66 static inline int cl_io_is_loopable(const struct cl_io *io)
67 {
68 return cl_io_type_is_valid(io->ci_type) && io->ci_type != CIT_MISC;
69 }
70
71 /**
72 * Returns true iff there is an IO ongoing in the given environment.
73 */
cl_io_is_going(const struct lu_env * env)74 int cl_io_is_going(const struct lu_env *env)
75 {
76 return cl_env_info(env)->clt_current_io != NULL;
77 }
78 EXPORT_SYMBOL(cl_io_is_going);
79
80 /**
81 * cl_io invariant that holds at all times when exported cl_io_*() functions
82 * are entered and left.
83 */
cl_io_invariant(const struct cl_io * io)84 static int cl_io_invariant(const struct cl_io *io)
85 {
86 struct cl_io *up;
87
88 up = io->ci_parent;
89 return
90 /*
91 * io can own pages only when it is ongoing. Sub-io might
92 * still be in CIS_LOCKED state when top-io is in
93 * CIS_IO_GOING.
94 */
95 ergo(io->ci_owned_nr > 0, io->ci_state == CIS_IO_GOING ||
96 (io->ci_state == CIS_LOCKED && up != NULL));
97 }
98
99 /**
100 * Finalize \a io, by calling cl_io_operations::cio_fini() bottom-to-top.
101 */
cl_io_fini(const struct lu_env * env,struct cl_io * io)102 void cl_io_fini(const struct lu_env *env, struct cl_io *io)
103 {
104 struct cl_io_slice *slice;
105 struct cl_thread_info *info;
106
107 LINVRNT(cl_io_type_is_valid(io->ci_type));
108 LINVRNT(cl_io_invariant(io));
109
110 while (!list_empty(&io->ci_layers)) {
111 slice = container_of(io->ci_layers.prev, struct cl_io_slice,
112 cis_linkage);
113 list_del_init(&slice->cis_linkage);
114 if (slice->cis_iop->op[io->ci_type].cio_fini != NULL)
115 slice->cis_iop->op[io->ci_type].cio_fini(env, slice);
116 /*
117 * Invalidate slice to catch use after free. This assumes that
118 * slices are allocated within session and can be touched
119 * after ->cio_fini() returns.
120 */
121 slice->cis_io = NULL;
122 }
123 io->ci_state = CIS_FINI;
124 info = cl_env_info(env);
125 if (info->clt_current_io == io)
126 info->clt_current_io = NULL;
127
128 /* sanity check for layout change */
129 switch (io->ci_type) {
130 case CIT_READ:
131 case CIT_WRITE:
132 break;
133 case CIT_FAULT:
134 case CIT_FSYNC:
135 LASSERT(!io->ci_need_restart);
136 break;
137 case CIT_SETATTR:
138 case CIT_MISC:
139 /* Check ignore layout change conf */
140 LASSERT(ergo(io->ci_ignore_layout || !io->ci_verify_layout,
141 !io->ci_need_restart));
142 break;
143 default:
144 LBUG();
145 }
146 }
147 EXPORT_SYMBOL(cl_io_fini);
148
cl_io_init0(const struct lu_env * env,struct cl_io * io,enum cl_io_type iot,struct cl_object * obj)149 static int cl_io_init0(const struct lu_env *env, struct cl_io *io,
150 enum cl_io_type iot, struct cl_object *obj)
151 {
152 struct cl_object *scan;
153 int result;
154
155 LINVRNT(io->ci_state == CIS_ZERO || io->ci_state == CIS_FINI);
156 LINVRNT(cl_io_type_is_valid(iot));
157 LINVRNT(cl_io_invariant(io));
158
159 io->ci_type = iot;
160 INIT_LIST_HEAD(&io->ci_lockset.cls_todo);
161 INIT_LIST_HEAD(&io->ci_lockset.cls_curr);
162 INIT_LIST_HEAD(&io->ci_lockset.cls_done);
163 INIT_LIST_HEAD(&io->ci_layers);
164
165 result = 0;
166 cl_object_for_each(scan, obj) {
167 if (scan->co_ops->coo_io_init != NULL) {
168 result = scan->co_ops->coo_io_init(env, scan, io);
169 if (result != 0)
170 break;
171 }
172 }
173 if (result == 0)
174 io->ci_state = CIS_INIT;
175 return result;
176 }
177
178 /**
179 * Initialize sub-io, by calling cl_io_operations::cio_init() top-to-bottom.
180 *
181 * \pre obj != cl_object_top(obj)
182 */
cl_io_sub_init(const struct lu_env * env,struct cl_io * io,enum cl_io_type iot,struct cl_object * obj)183 int cl_io_sub_init(const struct lu_env *env, struct cl_io *io,
184 enum cl_io_type iot, struct cl_object *obj)
185 {
186 struct cl_thread_info *info = cl_env_info(env);
187
188 LASSERT(obj != cl_object_top(obj));
189 if (info->clt_current_io == NULL)
190 info->clt_current_io = io;
191 return cl_io_init0(env, io, iot, obj);
192 }
193 EXPORT_SYMBOL(cl_io_sub_init);
194
195 /**
196 * Initialize \a io, by calling cl_io_operations::cio_init() top-to-bottom.
197 *
198 * Caller has to call cl_io_fini() after a call to cl_io_init(), no matter
199 * what the latter returned.
200 *
201 * \pre obj == cl_object_top(obj)
202 * \pre cl_io_type_is_valid(iot)
203 * \post cl_io_type_is_valid(io->ci_type) && io->ci_type == iot
204 */
cl_io_init(const struct lu_env * env,struct cl_io * io,enum cl_io_type iot,struct cl_object * obj)205 int cl_io_init(const struct lu_env *env, struct cl_io *io,
206 enum cl_io_type iot, struct cl_object *obj)
207 {
208 struct cl_thread_info *info = cl_env_info(env);
209
210 LASSERT(obj == cl_object_top(obj));
211 LASSERT(info->clt_current_io == NULL);
212
213 info->clt_current_io = io;
214 return cl_io_init0(env, io, iot, obj);
215 }
216 EXPORT_SYMBOL(cl_io_init);
217
218 /**
219 * Initialize read or write io.
220 *
221 * \pre iot == CIT_READ || iot == CIT_WRITE
222 */
cl_io_rw_init(const struct lu_env * env,struct cl_io * io,enum cl_io_type iot,loff_t pos,size_t count)223 int cl_io_rw_init(const struct lu_env *env, struct cl_io *io,
224 enum cl_io_type iot, loff_t pos, size_t count)
225 {
226 LINVRNT(iot == CIT_READ || iot == CIT_WRITE);
227 LINVRNT(io->ci_obj != NULL);
228
229 LU_OBJECT_HEADER(D_VFSTRACE, env, &io->ci_obj->co_lu,
230 "io range: %u [%llu, %llu) %u %u\n",
231 iot, (__u64)pos, (__u64)pos + count,
232 io->u.ci_rw.crw_nonblock, io->u.ci_wr.wr_append);
233 io->u.ci_rw.crw_pos = pos;
234 io->u.ci_rw.crw_count = count;
235 return cl_io_init(env, io, iot, io->ci_obj);
236 }
237 EXPORT_SYMBOL(cl_io_rw_init);
238
239 static inline const struct lu_fid *
cl_lock_descr_fid(const struct cl_lock_descr * descr)240 cl_lock_descr_fid(const struct cl_lock_descr *descr)
241 {
242 return lu_object_fid(&descr->cld_obj->co_lu);
243 }
244
cl_lock_descr_sort(const struct cl_lock_descr * d0,const struct cl_lock_descr * d1)245 static int cl_lock_descr_sort(const struct cl_lock_descr *d0,
246 const struct cl_lock_descr *d1)
247 {
248 return lu_fid_cmp(cl_lock_descr_fid(d0), cl_lock_descr_fid(d1)) ?:
249 __diff_normalize(d0->cld_start, d1->cld_start);
250 }
251
cl_lock_descr_cmp(const struct cl_lock_descr * d0,const struct cl_lock_descr * d1)252 static int cl_lock_descr_cmp(const struct cl_lock_descr *d0,
253 const struct cl_lock_descr *d1)
254 {
255 int ret;
256
257 ret = lu_fid_cmp(cl_lock_descr_fid(d0), cl_lock_descr_fid(d1));
258 if (ret)
259 return ret;
260 if (d0->cld_end < d1->cld_start)
261 return -1;
262 if (d0->cld_start > d0->cld_end)
263 return 1;
264 return 0;
265 }
266
cl_lock_descr_merge(struct cl_lock_descr * d0,const struct cl_lock_descr * d1)267 static void cl_lock_descr_merge(struct cl_lock_descr *d0,
268 const struct cl_lock_descr *d1)
269 {
270 d0->cld_start = min(d0->cld_start, d1->cld_start);
271 d0->cld_end = max(d0->cld_end, d1->cld_end);
272
273 if (d1->cld_mode == CLM_WRITE && d0->cld_mode != CLM_WRITE)
274 d0->cld_mode = CLM_WRITE;
275
276 if (d1->cld_mode == CLM_GROUP && d0->cld_mode != CLM_GROUP)
277 d0->cld_mode = CLM_GROUP;
278 }
279
280 /*
281 * Sort locks in lexicographical order of their (fid, start-offset) pairs.
282 */
cl_io_locks_sort(struct cl_io * io)283 static void cl_io_locks_sort(struct cl_io *io)
284 {
285 int done = 0;
286
287 /* hidden treasure: bubble sort for now. */
288 do {
289 struct cl_io_lock_link *curr;
290 struct cl_io_lock_link *prev;
291 struct cl_io_lock_link *temp;
292
293 done = 1;
294 prev = NULL;
295
296 list_for_each_entry_safe(curr, temp,
297 &io->ci_lockset.cls_todo,
298 cill_linkage) {
299 if (prev != NULL) {
300 switch (cl_lock_descr_sort(&prev->cill_descr,
301 &curr->cill_descr)) {
302 case 0:
303 /*
304 * IMPOSSIBLE: Identical locks are
305 * already removed at
306 * this point.
307 */
308 default:
309 LBUG();
310 case 1:
311 list_move_tail(&curr->cill_linkage,
312 &prev->cill_linkage);
313 done = 0;
314 continue; /* don't change prev: it's
315 * still "previous" */
316 case -1: /* already in order */
317 break;
318 }
319 }
320 prev = curr;
321 }
322 } while (!done);
323 }
324
325 /**
326 * Check whether \a queue contains locks matching \a need.
327 *
328 * \retval +ve there is a matching lock in the \a queue
329 * \retval 0 there are no matching locks in the \a queue
330 */
cl_queue_match(const struct list_head * queue,const struct cl_lock_descr * need)331 int cl_queue_match(const struct list_head *queue,
332 const struct cl_lock_descr *need)
333 {
334 struct cl_io_lock_link *scan;
335
336 list_for_each_entry(scan, queue, cill_linkage) {
337 if (cl_lock_descr_match(&scan->cill_descr, need))
338 return 1;
339 }
340 return 0;
341 }
342 EXPORT_SYMBOL(cl_queue_match);
343
cl_queue_merge(const struct list_head * queue,const struct cl_lock_descr * need)344 static int cl_queue_merge(const struct list_head *queue,
345 const struct cl_lock_descr *need)
346 {
347 struct cl_io_lock_link *scan;
348
349 list_for_each_entry(scan, queue, cill_linkage) {
350 if (cl_lock_descr_cmp(&scan->cill_descr, need))
351 continue;
352 cl_lock_descr_merge(&scan->cill_descr, need);
353 CDEBUG(D_VFSTRACE, "lock: %d: [%lu, %lu]\n",
354 scan->cill_descr.cld_mode, scan->cill_descr.cld_start,
355 scan->cill_descr.cld_end);
356 return 1;
357 }
358 return 0;
359
360 }
361
cl_lockset_match(const struct cl_lockset * set,const struct cl_lock_descr * need)362 static int cl_lockset_match(const struct cl_lockset *set,
363 const struct cl_lock_descr *need)
364 {
365 return cl_queue_match(&set->cls_curr, need) ||
366 cl_queue_match(&set->cls_done, need);
367 }
368
cl_lockset_merge(const struct cl_lockset * set,const struct cl_lock_descr * need)369 static int cl_lockset_merge(const struct cl_lockset *set,
370 const struct cl_lock_descr *need)
371 {
372 return cl_queue_merge(&set->cls_todo, need) ||
373 cl_lockset_match(set, need);
374 }
375
cl_lockset_lock_one(const struct lu_env * env,struct cl_io * io,struct cl_lockset * set,struct cl_io_lock_link * link)376 static int cl_lockset_lock_one(const struct lu_env *env,
377 struct cl_io *io, struct cl_lockset *set,
378 struct cl_io_lock_link *link)
379 {
380 struct cl_lock *lock;
381 int result;
382
383 lock = cl_lock_request(env, io, &link->cill_descr, "io", io);
384
385 if (!IS_ERR(lock)) {
386 link->cill_lock = lock;
387 list_move(&link->cill_linkage, &set->cls_curr);
388 if (!(link->cill_descr.cld_enq_flags & CEF_ASYNC)) {
389 result = cl_wait(env, lock);
390 if (result == 0)
391 list_move(&link->cill_linkage,
392 &set->cls_done);
393 } else
394 result = 0;
395 } else
396 result = PTR_ERR(lock);
397 return result;
398 }
399
cl_lock_link_fini(const struct lu_env * env,struct cl_io * io,struct cl_io_lock_link * link)400 static void cl_lock_link_fini(const struct lu_env *env, struct cl_io *io,
401 struct cl_io_lock_link *link)
402 {
403 struct cl_lock *lock = link->cill_lock;
404
405 list_del_init(&link->cill_linkage);
406 if (lock != NULL) {
407 cl_lock_release(env, lock, "io", io);
408 link->cill_lock = NULL;
409 }
410 if (link->cill_fini != NULL)
411 link->cill_fini(env, link);
412 }
413
cl_lockset_lock(const struct lu_env * env,struct cl_io * io,struct cl_lockset * set)414 static int cl_lockset_lock(const struct lu_env *env, struct cl_io *io,
415 struct cl_lockset *set)
416 {
417 struct cl_io_lock_link *link;
418 struct cl_io_lock_link *temp;
419 struct cl_lock *lock;
420 int result;
421
422 result = 0;
423 list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage) {
424 if (!cl_lockset_match(set, &link->cill_descr)) {
425 /* XXX some locking to guarantee that locks aren't
426 * expanded in between. */
427 result = cl_lockset_lock_one(env, io, set, link);
428 if (result != 0)
429 break;
430 } else
431 cl_lock_link_fini(env, io, link);
432 }
433 if (result == 0) {
434 list_for_each_entry_safe(link, temp,
435 &set->cls_curr, cill_linkage) {
436 lock = link->cill_lock;
437 result = cl_wait(env, lock);
438 if (result == 0)
439 list_move(&link->cill_linkage,
440 &set->cls_done);
441 else
442 break;
443 }
444 }
445 return result;
446 }
447
448 /**
449 * Takes locks necessary for the current iteration of io.
450 *
451 * Calls cl_io_operations::cio_lock() top-to-bottom to collect locks required
452 * by layers for the current iteration. Then sort locks (to avoid dead-locks),
453 * and acquire them.
454 */
cl_io_lock(const struct lu_env * env,struct cl_io * io)455 int cl_io_lock(const struct lu_env *env, struct cl_io *io)
456 {
457 const struct cl_io_slice *scan;
458 int result = 0;
459
460 LINVRNT(cl_io_is_loopable(io));
461 LINVRNT(io->ci_state == CIS_IT_STARTED);
462 LINVRNT(cl_io_invariant(io));
463
464 cl_io_for_each(scan, io) {
465 if (scan->cis_iop->op[io->ci_type].cio_lock == NULL)
466 continue;
467 result = scan->cis_iop->op[io->ci_type].cio_lock(env, scan);
468 if (result != 0)
469 break;
470 }
471 if (result == 0) {
472 cl_io_locks_sort(io);
473 result = cl_lockset_lock(env, io, &io->ci_lockset);
474 }
475 if (result != 0)
476 cl_io_unlock(env, io);
477 else
478 io->ci_state = CIS_LOCKED;
479 return result;
480 }
481 EXPORT_SYMBOL(cl_io_lock);
482
483 /**
484 * Release locks takes by io.
485 */
cl_io_unlock(const struct lu_env * env,struct cl_io * io)486 void cl_io_unlock(const struct lu_env *env, struct cl_io *io)
487 {
488 struct cl_lockset *set;
489 struct cl_io_lock_link *link;
490 struct cl_io_lock_link *temp;
491 const struct cl_io_slice *scan;
492
493 LASSERT(cl_io_is_loopable(io));
494 LASSERT(CIS_IT_STARTED <= io->ci_state && io->ci_state < CIS_UNLOCKED);
495 LINVRNT(cl_io_invariant(io));
496
497 set = &io->ci_lockset;
498
499 list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage)
500 cl_lock_link_fini(env, io, link);
501
502 list_for_each_entry_safe(link, temp, &set->cls_curr, cill_linkage)
503 cl_lock_link_fini(env, io, link);
504
505 list_for_each_entry_safe(link, temp, &set->cls_done, cill_linkage) {
506 cl_unuse(env, link->cill_lock);
507 cl_lock_link_fini(env, io, link);
508 }
509 cl_io_for_each_reverse(scan, io) {
510 if (scan->cis_iop->op[io->ci_type].cio_unlock != NULL)
511 scan->cis_iop->op[io->ci_type].cio_unlock(env, scan);
512 }
513 io->ci_state = CIS_UNLOCKED;
514 LASSERT(!cl_env_info(env)->clt_counters[CNL_TOP].ctc_nr_locks_acquired);
515 }
516 EXPORT_SYMBOL(cl_io_unlock);
517
518 /**
519 * Prepares next iteration of io.
520 *
521 * Calls cl_io_operations::cio_iter_init() top-to-bottom. This exists to give
522 * layers a chance to modify io parameters, e.g., so that lov can restrict io
523 * to a single stripe.
524 */
cl_io_iter_init(const struct lu_env * env,struct cl_io * io)525 int cl_io_iter_init(const struct lu_env *env, struct cl_io *io)
526 {
527 const struct cl_io_slice *scan;
528 int result;
529
530 LINVRNT(cl_io_is_loopable(io));
531 LINVRNT(io->ci_state == CIS_INIT || io->ci_state == CIS_IT_ENDED);
532 LINVRNT(cl_io_invariant(io));
533
534 result = 0;
535 cl_io_for_each(scan, io) {
536 if (scan->cis_iop->op[io->ci_type].cio_iter_init == NULL)
537 continue;
538 result = scan->cis_iop->op[io->ci_type].cio_iter_init(env,
539 scan);
540 if (result != 0)
541 break;
542 }
543 if (result == 0)
544 io->ci_state = CIS_IT_STARTED;
545 return result;
546 }
547 EXPORT_SYMBOL(cl_io_iter_init);
548
549 /**
550 * Finalizes io iteration.
551 *
552 * Calls cl_io_operations::cio_iter_fini() bottom-to-top.
553 */
cl_io_iter_fini(const struct lu_env * env,struct cl_io * io)554 void cl_io_iter_fini(const struct lu_env *env, struct cl_io *io)
555 {
556 const struct cl_io_slice *scan;
557
558 LINVRNT(cl_io_is_loopable(io));
559 LINVRNT(io->ci_state == CIS_UNLOCKED);
560 LINVRNT(cl_io_invariant(io));
561
562 cl_io_for_each_reverse(scan, io) {
563 if (scan->cis_iop->op[io->ci_type].cio_iter_fini != NULL)
564 scan->cis_iop->op[io->ci_type].cio_iter_fini(env, scan);
565 }
566 io->ci_state = CIS_IT_ENDED;
567 }
568 EXPORT_SYMBOL(cl_io_iter_fini);
569
570 /**
571 * Records that read or write io progressed \a nob bytes forward.
572 */
cl_io_rw_advance(const struct lu_env * env,struct cl_io * io,size_t nob)573 static void cl_io_rw_advance(const struct lu_env *env, struct cl_io *io,
574 size_t nob)
575 {
576 const struct cl_io_slice *scan;
577
578 LINVRNT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE ||
579 nob == 0);
580 LINVRNT(cl_io_is_loopable(io));
581 LINVRNT(cl_io_invariant(io));
582
583 io->u.ci_rw.crw_pos += nob;
584 io->u.ci_rw.crw_count -= nob;
585
586 /* layers have to be notified. */
587 cl_io_for_each_reverse(scan, io) {
588 if (scan->cis_iop->op[io->ci_type].cio_advance != NULL)
589 scan->cis_iop->op[io->ci_type].cio_advance(env, scan,
590 nob);
591 }
592 }
593
594 /**
595 * Adds a lock to a lockset.
596 */
cl_io_lock_add(const struct lu_env * env,struct cl_io * io,struct cl_io_lock_link * link)597 int cl_io_lock_add(const struct lu_env *env, struct cl_io *io,
598 struct cl_io_lock_link *link)
599 {
600 int result;
601
602 if (cl_lockset_merge(&io->ci_lockset, &link->cill_descr))
603 result = 1;
604 else {
605 list_add(&link->cill_linkage, &io->ci_lockset.cls_todo);
606 result = 0;
607 }
608 return result;
609 }
610 EXPORT_SYMBOL(cl_io_lock_add);
611
cl_free_io_lock_link(const struct lu_env * env,struct cl_io_lock_link * link)612 static void cl_free_io_lock_link(const struct lu_env *env,
613 struct cl_io_lock_link *link)
614 {
615 kfree(link);
616 }
617
618 /**
619 * Allocates new lock link, and uses it to add a lock to a lockset.
620 */
cl_io_lock_alloc_add(const struct lu_env * env,struct cl_io * io,struct cl_lock_descr * descr)621 int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io,
622 struct cl_lock_descr *descr)
623 {
624 struct cl_io_lock_link *link;
625 int result;
626
627 link = kzalloc(sizeof(*link), GFP_NOFS);
628 if (link != NULL) {
629 link->cill_descr = *descr;
630 link->cill_fini = cl_free_io_lock_link;
631 result = cl_io_lock_add(env, io, link);
632 if (result) /* lock match */
633 link->cill_fini(env, link);
634 } else
635 result = -ENOMEM;
636
637 return result;
638 }
639 EXPORT_SYMBOL(cl_io_lock_alloc_add);
640
641 /**
642 * Starts io by calling cl_io_operations::cio_start() top-to-bottom.
643 */
cl_io_start(const struct lu_env * env,struct cl_io * io)644 int cl_io_start(const struct lu_env *env, struct cl_io *io)
645 {
646 const struct cl_io_slice *scan;
647 int result = 0;
648
649 LINVRNT(cl_io_is_loopable(io));
650 LINVRNT(io->ci_state == CIS_LOCKED);
651 LINVRNT(cl_io_invariant(io));
652
653 io->ci_state = CIS_IO_GOING;
654 cl_io_for_each(scan, io) {
655 if (scan->cis_iop->op[io->ci_type].cio_start == NULL)
656 continue;
657 result = scan->cis_iop->op[io->ci_type].cio_start(env, scan);
658 if (result != 0)
659 break;
660 }
661 if (result >= 0)
662 result = 0;
663 return result;
664 }
665 EXPORT_SYMBOL(cl_io_start);
666
667 /**
668 * Wait until current io iteration is finished by calling
669 * cl_io_operations::cio_end() bottom-to-top.
670 */
cl_io_end(const struct lu_env * env,struct cl_io * io)671 void cl_io_end(const struct lu_env *env, struct cl_io *io)
672 {
673 const struct cl_io_slice *scan;
674
675 LINVRNT(cl_io_is_loopable(io));
676 LINVRNT(io->ci_state == CIS_IO_GOING);
677 LINVRNT(cl_io_invariant(io));
678
679 cl_io_for_each_reverse(scan, io) {
680 if (scan->cis_iop->op[io->ci_type].cio_end != NULL)
681 scan->cis_iop->op[io->ci_type].cio_end(env, scan);
682 /* TODO: error handling. */
683 }
684 io->ci_state = CIS_IO_FINISHED;
685 }
686 EXPORT_SYMBOL(cl_io_end);
687
688 static const struct cl_page_slice *
cl_io_slice_page(const struct cl_io_slice * ios,struct cl_page * page)689 cl_io_slice_page(const struct cl_io_slice *ios, struct cl_page *page)
690 {
691 const struct cl_page_slice *slice;
692
693 slice = cl_page_at(page, ios->cis_obj->co_lu.lo_dev->ld_type);
694 LINVRNT(slice != NULL);
695 return slice;
696 }
697
698 /**
699 * True iff \a page is within \a io range.
700 */
cl_page_in_io(const struct cl_page * page,const struct cl_io * io)701 static int cl_page_in_io(const struct cl_page *page, const struct cl_io *io)
702 {
703 int result = 1;
704 loff_t start;
705 loff_t end;
706 pgoff_t idx;
707
708 idx = page->cp_index;
709 switch (io->ci_type) {
710 case CIT_READ:
711 case CIT_WRITE:
712 /*
713 * check that [start, end) and [pos, pos + count) extents
714 * overlap.
715 */
716 if (!cl_io_is_append(io)) {
717 const struct cl_io_rw_common *crw = &(io->u.ci_rw);
718
719 start = cl_offset(page->cp_obj, idx);
720 end = cl_offset(page->cp_obj, idx + 1);
721 result = crw->crw_pos < end &&
722 start < crw->crw_pos + crw->crw_count;
723 }
724 break;
725 case CIT_FAULT:
726 result = io->u.ci_fault.ft_index == idx;
727 break;
728 default:
729 LBUG();
730 }
731 return result;
732 }
733
734 /**
735 * Called by read io, when page has to be read from the server.
736 *
737 * \see cl_io_operations::cio_read_page()
738 */
cl_io_read_page(const struct lu_env * env,struct cl_io * io,struct cl_page * page)739 int cl_io_read_page(const struct lu_env *env, struct cl_io *io,
740 struct cl_page *page)
741 {
742 const struct cl_io_slice *scan;
743 struct cl_2queue *queue;
744 int result = 0;
745
746 LINVRNT(io->ci_type == CIT_READ || io->ci_type == CIT_FAULT);
747 LINVRNT(cl_page_is_owned(page, io));
748 LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED);
749 LINVRNT(cl_page_in_io(page, io));
750 LINVRNT(cl_io_invariant(io));
751
752 queue = &io->ci_queue;
753
754 cl_2queue_init(queue);
755 /*
756 * ->cio_read_page() methods called in the loop below are supposed to
757 * never block waiting for network (the only subtle point is the
758 * creation of new pages for read-ahead that might result in cache
759 * shrinking, but currently only clean pages are shrunk and this
760 * requires no network io).
761 *
762 * Should this ever starts blocking, retry loop would be needed for
763 * "parallel io" (see CLO_REPEAT loops in cl_lock.c).
764 */
765 cl_io_for_each(scan, io) {
766 if (scan->cis_iop->cio_read_page != NULL) {
767 const struct cl_page_slice *slice;
768
769 slice = cl_io_slice_page(scan, page);
770 LINVRNT(slice != NULL);
771 result = scan->cis_iop->cio_read_page(env, scan, slice);
772 if (result != 0)
773 break;
774 }
775 }
776 if (result == 0)
777 result = cl_io_submit_rw(env, io, CRT_READ, queue);
778 /*
779 * Unlock unsent pages in case of error.
780 */
781 cl_page_list_disown(env, io, &queue->c2_qin);
782 cl_2queue_fini(env, queue);
783 return result;
784 }
785 EXPORT_SYMBOL(cl_io_read_page);
786
787 /**
788 * Called by write io to prepare page to receive data from user buffer.
789 *
790 * \see cl_io_operations::cio_prepare_write()
791 */
cl_io_prepare_write(const struct lu_env * env,struct cl_io * io,struct cl_page * page,unsigned from,unsigned to)792 int cl_io_prepare_write(const struct lu_env *env, struct cl_io *io,
793 struct cl_page *page, unsigned from, unsigned to)
794 {
795 const struct cl_io_slice *scan;
796 int result = 0;
797
798 LINVRNT(io->ci_type == CIT_WRITE);
799 LINVRNT(cl_page_is_owned(page, io));
800 LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED);
801 LINVRNT(cl_io_invariant(io));
802 LASSERT(cl_page_in_io(page, io));
803
804 cl_io_for_each_reverse(scan, io) {
805 if (scan->cis_iop->cio_prepare_write != NULL) {
806 const struct cl_page_slice *slice;
807
808 slice = cl_io_slice_page(scan, page);
809 result = scan->cis_iop->cio_prepare_write(env, scan,
810 slice,
811 from, to);
812 if (result != 0)
813 break;
814 }
815 }
816 return result;
817 }
818 EXPORT_SYMBOL(cl_io_prepare_write);
819
820 /**
821 * Called by write io after user data were copied into a page.
822 *
823 * \see cl_io_operations::cio_commit_write()
824 */
cl_io_commit_write(const struct lu_env * env,struct cl_io * io,struct cl_page * page,unsigned from,unsigned to)825 int cl_io_commit_write(const struct lu_env *env, struct cl_io *io,
826 struct cl_page *page, unsigned from, unsigned to)
827 {
828 const struct cl_io_slice *scan;
829 int result = 0;
830
831 LINVRNT(io->ci_type == CIT_WRITE);
832 LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED);
833 LINVRNT(cl_io_invariant(io));
834 /*
835 * XXX Uh... not nice. Top level cl_io_commit_write() call (vvp->lov)
836 * already called cl_page_cache_add(), moving page into CPS_CACHED
837 * state. Better (and more general) way of dealing with such situation
838 * is needed.
839 */
840 LASSERT(cl_page_is_owned(page, io) || page->cp_parent != NULL);
841 LASSERT(cl_page_in_io(page, io));
842
843 cl_io_for_each(scan, io) {
844 if (scan->cis_iop->cio_commit_write != NULL) {
845 const struct cl_page_slice *slice;
846
847 slice = cl_io_slice_page(scan, page);
848 result = scan->cis_iop->cio_commit_write(env, scan,
849 slice,
850 from, to);
851 if (result != 0)
852 break;
853 }
854 }
855 LINVRNT(result <= 0);
856 return result;
857 }
858 EXPORT_SYMBOL(cl_io_commit_write);
859
860 /**
861 * Submits a list of pages for immediate io.
862 *
863 * After the function gets returned, The submitted pages are moved to
864 * queue->c2_qout queue, and queue->c2_qin contain both the pages don't need
865 * to be submitted, and the pages are errant to submit.
866 *
867 * \returns 0 if at least one page was submitted, error code otherwise.
868 * \see cl_io_operations::cio_submit()
869 */
cl_io_submit_rw(const struct lu_env * env,struct cl_io * io,enum cl_req_type crt,struct cl_2queue * queue)870 int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io,
871 enum cl_req_type crt, struct cl_2queue *queue)
872 {
873 const struct cl_io_slice *scan;
874 int result = 0;
875
876 LINVRNT(crt < ARRAY_SIZE(scan->cis_iop->req_op));
877
878 cl_io_for_each(scan, io) {
879 if (scan->cis_iop->req_op[crt].cio_submit == NULL)
880 continue;
881 result = scan->cis_iop->req_op[crt].cio_submit(env, scan, crt,
882 queue);
883 if (result != 0)
884 break;
885 }
886 /*
887 * If ->cio_submit() failed, no pages were sent.
888 */
889 LASSERT(ergo(result != 0, list_empty(&queue->c2_qout.pl_pages)));
890 return result;
891 }
892 EXPORT_SYMBOL(cl_io_submit_rw);
893
894 /**
895 * Submit a sync_io and wait for the IO to be finished, or error happens.
896 * If \a timeout is zero, it means to wait for the IO unconditionally.
897 */
cl_io_submit_sync(const struct lu_env * env,struct cl_io * io,enum cl_req_type iot,struct cl_2queue * queue,long timeout)898 int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io,
899 enum cl_req_type iot, struct cl_2queue *queue,
900 long timeout)
901 {
902 struct cl_sync_io *anchor = &cl_env_info(env)->clt_anchor;
903 struct cl_page *pg;
904 int rc;
905
906 cl_page_list_for_each(pg, &queue->c2_qin) {
907 LASSERT(pg->cp_sync_io == NULL);
908 pg->cp_sync_io = anchor;
909 }
910
911 cl_sync_io_init(anchor, queue->c2_qin.pl_nr);
912 rc = cl_io_submit_rw(env, io, iot, queue);
913 if (rc == 0) {
914 /*
915 * If some pages weren't sent for any reason (e.g.,
916 * read found up-to-date pages in the cache, or write found
917 * clean pages), count them as completed to avoid infinite
918 * wait.
919 */
920 cl_page_list_for_each(pg, &queue->c2_qin) {
921 pg->cp_sync_io = NULL;
922 cl_sync_io_note(anchor, 1);
923 }
924
925 /* wait for the IO to be finished. */
926 rc = cl_sync_io_wait(env, io, &queue->c2_qout,
927 anchor, timeout);
928 } else {
929 LASSERT(list_empty(&queue->c2_qout.pl_pages));
930 cl_page_list_for_each(pg, &queue->c2_qin)
931 pg->cp_sync_io = NULL;
932 }
933 return rc;
934 }
935 EXPORT_SYMBOL(cl_io_submit_sync);
936
937 /**
938 * Cancel an IO which has been submitted by cl_io_submit_rw.
939 */
cl_io_cancel(const struct lu_env * env,struct cl_io * io,struct cl_page_list * queue)940 static int cl_io_cancel(const struct lu_env *env, struct cl_io *io,
941 struct cl_page_list *queue)
942 {
943 struct cl_page *page;
944 int result = 0;
945
946 CERROR("Canceling ongoing page transmission\n");
947 cl_page_list_for_each(page, queue) {
948 int rc;
949
950 LINVRNT(cl_page_in_io(page, io));
951 rc = cl_page_cancel(env, page);
952 result = result ?: rc;
953 }
954 return result;
955 }
956
957 /**
958 * Main io loop.
959 *
960 * Pumps io through iterations calling
961 *
962 * - cl_io_iter_init()
963 *
964 * - cl_io_lock()
965 *
966 * - cl_io_start()
967 *
968 * - cl_io_end()
969 *
970 * - cl_io_unlock()
971 *
972 * - cl_io_iter_fini()
973 *
974 * repeatedly until there is no more io to do.
975 */
cl_io_loop(const struct lu_env * env,struct cl_io * io)976 int cl_io_loop(const struct lu_env *env, struct cl_io *io)
977 {
978 int result = 0;
979
980 LINVRNT(cl_io_is_loopable(io));
981
982 do {
983 size_t nob;
984
985 io->ci_continue = 0;
986 result = cl_io_iter_init(env, io);
987 if (result == 0) {
988 nob = io->ci_nob;
989 result = cl_io_lock(env, io);
990 if (result == 0) {
991 /*
992 * Notify layers that locks has been taken,
993 * and do actual i/o.
994 *
995 * - llite: kms, short read;
996 * - llite: generic_file_read();
997 */
998 result = cl_io_start(env, io);
999 /*
1000 * Send any remaining pending
1001 * io, etc.
1002 *
1003 * - llite: ll_rw_stats_tally.
1004 */
1005 cl_io_end(env, io);
1006 cl_io_unlock(env, io);
1007 cl_io_rw_advance(env, io, io->ci_nob - nob);
1008 }
1009 }
1010 cl_io_iter_fini(env, io);
1011 } while (result == 0 && io->ci_continue);
1012 if (result == 0)
1013 result = io->ci_result;
1014 return result < 0 ? result : 0;
1015 }
1016 EXPORT_SYMBOL(cl_io_loop);
1017
1018 /**
1019 * Adds io slice to the cl_io.
1020 *
1021 * This is called by cl_object_operations::coo_io_init() methods to add a
1022 * per-layer state to the io. New state is added at the end of
1023 * cl_io::ci_layers list, that is, it is at the bottom of the stack.
1024 *
1025 * \see cl_lock_slice_add(), cl_req_slice_add(), cl_page_slice_add()
1026 */
cl_io_slice_add(struct cl_io * io,struct cl_io_slice * slice,struct cl_object * obj,const struct cl_io_operations * ops)1027 void cl_io_slice_add(struct cl_io *io, struct cl_io_slice *slice,
1028 struct cl_object *obj,
1029 const struct cl_io_operations *ops)
1030 {
1031 struct list_head *linkage = &slice->cis_linkage;
1032
1033 LASSERT((linkage->prev == NULL && linkage->next == NULL) ||
1034 list_empty(linkage));
1035
1036 list_add_tail(linkage, &io->ci_layers);
1037 slice->cis_io = io;
1038 slice->cis_obj = obj;
1039 slice->cis_iop = ops;
1040 }
1041 EXPORT_SYMBOL(cl_io_slice_add);
1042
1043 /**
1044 * Initializes page list.
1045 */
cl_page_list_init(struct cl_page_list * plist)1046 void cl_page_list_init(struct cl_page_list *plist)
1047 {
1048 plist->pl_nr = 0;
1049 INIT_LIST_HEAD(&plist->pl_pages);
1050 plist->pl_owner = current;
1051 }
1052 EXPORT_SYMBOL(cl_page_list_init);
1053
1054 /**
1055 * Adds a page to a page list.
1056 */
cl_page_list_add(struct cl_page_list * plist,struct cl_page * page)1057 void cl_page_list_add(struct cl_page_list *plist, struct cl_page *page)
1058 {
1059 /* it would be better to check that page is owned by "current" io, but
1060 * it is not passed here. */
1061 LASSERT(page->cp_owner != NULL);
1062 LINVRNT(plist->pl_owner == current);
1063
1064 lockdep_off();
1065 mutex_lock(&page->cp_mutex);
1066 lockdep_on();
1067 LASSERT(list_empty(&page->cp_batch));
1068 list_add_tail(&page->cp_batch, &plist->pl_pages);
1069 ++plist->pl_nr;
1070 lu_ref_add_at(&page->cp_reference, &page->cp_queue_ref, "queue", plist);
1071 cl_page_get(page);
1072 }
1073 EXPORT_SYMBOL(cl_page_list_add);
1074
1075 /**
1076 * Removes a page from a page list.
1077 */
cl_page_list_del(const struct lu_env * env,struct cl_page_list * plist,struct cl_page * page)1078 static void cl_page_list_del(const struct lu_env *env,
1079 struct cl_page_list *plist, struct cl_page *page)
1080 {
1081 LASSERT(plist->pl_nr > 0);
1082 LINVRNT(plist->pl_owner == current);
1083
1084 list_del_init(&page->cp_batch);
1085 lockdep_off();
1086 mutex_unlock(&page->cp_mutex);
1087 lockdep_on();
1088 --plist->pl_nr;
1089 lu_ref_del_at(&page->cp_reference, &page->cp_queue_ref, "queue", plist);
1090 cl_page_put(env, page);
1091 }
1092
1093 /**
1094 * Moves a page from one page list to another.
1095 */
cl_page_list_move(struct cl_page_list * dst,struct cl_page_list * src,struct cl_page * page)1096 void cl_page_list_move(struct cl_page_list *dst, struct cl_page_list *src,
1097 struct cl_page *page)
1098 {
1099 LASSERT(src->pl_nr > 0);
1100 LINVRNT(dst->pl_owner == current);
1101 LINVRNT(src->pl_owner == current);
1102
1103 list_move_tail(&page->cp_batch, &dst->pl_pages);
1104 --src->pl_nr;
1105 ++dst->pl_nr;
1106 lu_ref_set_at(&page->cp_reference, &page->cp_queue_ref, "queue",
1107 src, dst);
1108 }
1109 EXPORT_SYMBOL(cl_page_list_move);
1110
1111 /**
1112 * splice the cl_page_list, just as list head does
1113 */
cl_page_list_splice(struct cl_page_list * list,struct cl_page_list * head)1114 void cl_page_list_splice(struct cl_page_list *list, struct cl_page_list *head)
1115 {
1116 struct cl_page *page;
1117 struct cl_page *tmp;
1118
1119 LINVRNT(list->pl_owner == current);
1120 LINVRNT(head->pl_owner == current);
1121
1122 cl_page_list_for_each_safe(page, tmp, list)
1123 cl_page_list_move(head, list, page);
1124 }
1125 EXPORT_SYMBOL(cl_page_list_splice);
1126
1127 void cl_page_disown0(const struct lu_env *env,
1128 struct cl_io *io, struct cl_page *pg);
1129
1130 /**
1131 * Disowns pages in a queue.
1132 */
cl_page_list_disown(const struct lu_env * env,struct cl_io * io,struct cl_page_list * plist)1133 void cl_page_list_disown(const struct lu_env *env,
1134 struct cl_io *io, struct cl_page_list *plist)
1135 {
1136 struct cl_page *page;
1137 struct cl_page *temp;
1138
1139 LINVRNT(plist->pl_owner == current);
1140
1141 cl_page_list_for_each_safe(page, temp, plist) {
1142 LASSERT(plist->pl_nr > 0);
1143
1144 list_del_init(&page->cp_batch);
1145 lockdep_off();
1146 mutex_unlock(&page->cp_mutex);
1147 lockdep_on();
1148 --plist->pl_nr;
1149 /*
1150 * cl_page_disown0 rather than usual cl_page_disown() is used,
1151 * because pages are possibly in CPS_FREEING state already due
1152 * to the call to cl_page_list_discard().
1153 */
1154 /*
1155 * XXX cl_page_disown0() will fail if page is not locked.
1156 */
1157 cl_page_disown0(env, io, page);
1158 lu_ref_del_at(&page->cp_reference, &page->cp_queue_ref, "queue",
1159 plist);
1160 cl_page_put(env, page);
1161 }
1162 }
1163 EXPORT_SYMBOL(cl_page_list_disown);
1164
1165 /**
1166 * Releases pages from queue.
1167 */
cl_page_list_fini(const struct lu_env * env,struct cl_page_list * plist)1168 static void cl_page_list_fini(const struct lu_env *env,
1169 struct cl_page_list *plist)
1170 {
1171 struct cl_page *page;
1172 struct cl_page *temp;
1173
1174 LINVRNT(plist->pl_owner == current);
1175
1176 cl_page_list_for_each_safe(page, temp, plist)
1177 cl_page_list_del(env, plist, page);
1178 LASSERT(plist->pl_nr == 0);
1179 }
1180
1181 /**
1182 * Assumes all pages in a queue.
1183 */
cl_page_list_assume(const struct lu_env * env,struct cl_io * io,struct cl_page_list * plist)1184 static void cl_page_list_assume(const struct lu_env *env,
1185 struct cl_io *io, struct cl_page_list *plist)
1186 {
1187 struct cl_page *page;
1188
1189 LINVRNT(plist->pl_owner == current);
1190
1191 cl_page_list_for_each(page, plist)
1192 cl_page_assume(env, io, page);
1193 }
1194
1195 /**
1196 * Discards all pages in a queue.
1197 */
cl_page_list_discard(const struct lu_env * env,struct cl_io * io,struct cl_page_list * plist)1198 static void cl_page_list_discard(const struct lu_env *env, struct cl_io *io,
1199 struct cl_page_list *plist)
1200 {
1201 struct cl_page *page;
1202
1203 LINVRNT(plist->pl_owner == current);
1204 cl_page_list_for_each(page, plist)
1205 cl_page_discard(env, io, page);
1206 }
1207
1208 /**
1209 * Initialize dual page queue.
1210 */
cl_2queue_init(struct cl_2queue * queue)1211 void cl_2queue_init(struct cl_2queue *queue)
1212 {
1213 cl_page_list_init(&queue->c2_qin);
1214 cl_page_list_init(&queue->c2_qout);
1215 }
1216 EXPORT_SYMBOL(cl_2queue_init);
1217
1218 /**
1219 * Add a page to the incoming page list of 2-queue.
1220 */
cl_2queue_add(struct cl_2queue * queue,struct cl_page * page)1221 void cl_2queue_add(struct cl_2queue *queue, struct cl_page *page)
1222 {
1223 cl_page_list_add(&queue->c2_qin, page);
1224 }
1225 EXPORT_SYMBOL(cl_2queue_add);
1226
1227 /**
1228 * Disown pages in both lists of a 2-queue.
1229 */
cl_2queue_disown(const struct lu_env * env,struct cl_io * io,struct cl_2queue * queue)1230 void cl_2queue_disown(const struct lu_env *env,
1231 struct cl_io *io, struct cl_2queue *queue)
1232 {
1233 cl_page_list_disown(env, io, &queue->c2_qin);
1234 cl_page_list_disown(env, io, &queue->c2_qout);
1235 }
1236 EXPORT_SYMBOL(cl_2queue_disown);
1237
1238 /**
1239 * Discard (truncate) pages in both lists of a 2-queue.
1240 */
cl_2queue_discard(const struct lu_env * env,struct cl_io * io,struct cl_2queue * queue)1241 void cl_2queue_discard(const struct lu_env *env,
1242 struct cl_io *io, struct cl_2queue *queue)
1243 {
1244 cl_page_list_discard(env, io, &queue->c2_qin);
1245 cl_page_list_discard(env, io, &queue->c2_qout);
1246 }
1247 EXPORT_SYMBOL(cl_2queue_discard);
1248
1249 /**
1250 * Finalize both page lists of a 2-queue.
1251 */
cl_2queue_fini(const struct lu_env * env,struct cl_2queue * queue)1252 void cl_2queue_fini(const struct lu_env *env, struct cl_2queue *queue)
1253 {
1254 cl_page_list_fini(env, &queue->c2_qout);
1255 cl_page_list_fini(env, &queue->c2_qin);
1256 }
1257 EXPORT_SYMBOL(cl_2queue_fini);
1258
1259 /**
1260 * Initialize a 2-queue to contain \a page in its incoming page list.
1261 */
cl_2queue_init_page(struct cl_2queue * queue,struct cl_page * page)1262 void cl_2queue_init_page(struct cl_2queue *queue, struct cl_page *page)
1263 {
1264 cl_2queue_init(queue);
1265 cl_2queue_add(queue, page);
1266 }
1267 EXPORT_SYMBOL(cl_2queue_init_page);
1268
1269 /**
1270 * Returns top-level io.
1271 *
1272 * \see cl_object_top(), cl_page_top().
1273 */
cl_io_top(struct cl_io * io)1274 struct cl_io *cl_io_top(struct cl_io *io)
1275 {
1276 while (io->ci_parent != NULL)
1277 io = io->ci_parent;
1278 return io;
1279 }
1280 EXPORT_SYMBOL(cl_io_top);
1281
1282 /**
1283 * Adds request slice to the compound request.
1284 *
1285 * This is called by cl_device_operations::cdo_req_init() methods to add a
1286 * per-layer state to the request. New state is added at the end of
1287 * cl_req::crq_layers list, that is, it is at the bottom of the stack.
1288 *
1289 * \see cl_lock_slice_add(), cl_page_slice_add(), cl_io_slice_add()
1290 */
cl_req_slice_add(struct cl_req * req,struct cl_req_slice * slice,struct cl_device * dev,const struct cl_req_operations * ops)1291 void cl_req_slice_add(struct cl_req *req, struct cl_req_slice *slice,
1292 struct cl_device *dev,
1293 const struct cl_req_operations *ops)
1294 {
1295 list_add_tail(&slice->crs_linkage, &req->crq_layers);
1296 slice->crs_dev = dev;
1297 slice->crs_ops = ops;
1298 slice->crs_req = req;
1299 }
1300 EXPORT_SYMBOL(cl_req_slice_add);
1301
cl_req_free(const struct lu_env * env,struct cl_req * req)1302 static void cl_req_free(const struct lu_env *env, struct cl_req *req)
1303 {
1304 unsigned i;
1305
1306 LASSERT(list_empty(&req->crq_pages));
1307 LASSERT(req->crq_nrpages == 0);
1308 LINVRNT(list_empty(&req->crq_layers));
1309 LINVRNT(equi(req->crq_nrobjs > 0, req->crq_o != NULL));
1310
1311 if (req->crq_o != NULL) {
1312 for (i = 0; i < req->crq_nrobjs; ++i) {
1313 struct cl_object *obj = req->crq_o[i].ro_obj;
1314
1315 if (obj != NULL) {
1316 lu_object_ref_del_at(&obj->co_lu,
1317 &req->crq_o[i].ro_obj_ref,
1318 "cl_req", req);
1319 cl_object_put(env, obj);
1320 }
1321 }
1322 kfree(req->crq_o);
1323 }
1324 kfree(req);
1325 }
1326
cl_req_init(const struct lu_env * env,struct cl_req * req,struct cl_page * page)1327 static int cl_req_init(const struct lu_env *env, struct cl_req *req,
1328 struct cl_page *page)
1329 {
1330 struct cl_device *dev;
1331 struct cl_page_slice *slice;
1332 int result;
1333
1334 result = 0;
1335 page = cl_page_top(page);
1336 do {
1337 list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
1338 dev = lu2cl_dev(slice->cpl_obj->co_lu.lo_dev);
1339 if (dev->cd_ops->cdo_req_init != NULL) {
1340 result = dev->cd_ops->cdo_req_init(env,
1341 dev, req);
1342 if (result != 0)
1343 break;
1344 }
1345 }
1346 page = page->cp_child;
1347 } while (page != NULL && result == 0);
1348 return result;
1349 }
1350
1351 /**
1352 * Invokes per-request transfer completion call-backs
1353 * (cl_req_operations::cro_completion()) bottom-to-top.
1354 */
cl_req_completion(const struct lu_env * env,struct cl_req * req,int rc)1355 void cl_req_completion(const struct lu_env *env, struct cl_req *req, int rc)
1356 {
1357 struct cl_req_slice *slice;
1358
1359 /*
1360 * for the lack of list_for_each_entry_reverse_safe()...
1361 */
1362 while (!list_empty(&req->crq_layers)) {
1363 slice = list_entry(req->crq_layers.prev,
1364 struct cl_req_slice, crs_linkage);
1365 list_del_init(&slice->crs_linkage);
1366 if (slice->crs_ops->cro_completion != NULL)
1367 slice->crs_ops->cro_completion(env, slice, rc);
1368 }
1369 cl_req_free(env, req);
1370 }
1371 EXPORT_SYMBOL(cl_req_completion);
1372
1373 /**
1374 * Allocates new transfer request.
1375 */
cl_req_alloc(const struct lu_env * env,struct cl_page * page,enum cl_req_type crt,int nr_objects)1376 struct cl_req *cl_req_alloc(const struct lu_env *env, struct cl_page *page,
1377 enum cl_req_type crt, int nr_objects)
1378 {
1379 struct cl_req *req;
1380
1381 LINVRNT(nr_objects > 0);
1382
1383 req = kzalloc(sizeof(*req), GFP_NOFS);
1384 if (req != NULL) {
1385 int result;
1386
1387 req->crq_type = crt;
1388 INIT_LIST_HEAD(&req->crq_pages);
1389 INIT_LIST_HEAD(&req->crq_layers);
1390
1391 req->crq_o = kcalloc(nr_objects, sizeof(req->crq_o[0]),
1392 GFP_NOFS);
1393 if (req->crq_o != NULL) {
1394 req->crq_nrobjs = nr_objects;
1395 result = cl_req_init(env, req, page);
1396 } else
1397 result = -ENOMEM;
1398 if (result != 0) {
1399 cl_req_completion(env, req, result);
1400 req = ERR_PTR(result);
1401 }
1402 } else
1403 req = ERR_PTR(-ENOMEM);
1404 return req;
1405 }
1406 EXPORT_SYMBOL(cl_req_alloc);
1407
1408 /**
1409 * Adds a page to a request.
1410 */
cl_req_page_add(const struct lu_env * env,struct cl_req * req,struct cl_page * page)1411 void cl_req_page_add(const struct lu_env *env,
1412 struct cl_req *req, struct cl_page *page)
1413 {
1414 struct cl_object *obj;
1415 struct cl_req_obj *rqo;
1416 int i;
1417
1418 page = cl_page_top(page);
1419
1420 LASSERT(list_empty(&page->cp_flight));
1421 LASSERT(page->cp_req == NULL);
1422
1423 CL_PAGE_DEBUG(D_PAGE, env, page, "req %p, %d, %u\n",
1424 req, req->crq_type, req->crq_nrpages);
1425
1426 list_add_tail(&page->cp_flight, &req->crq_pages);
1427 ++req->crq_nrpages;
1428 page->cp_req = req;
1429 obj = cl_object_top(page->cp_obj);
1430 for (i = 0, rqo = req->crq_o; obj != rqo->ro_obj; ++i, ++rqo) {
1431 if (rqo->ro_obj == NULL) {
1432 rqo->ro_obj = obj;
1433 cl_object_get(obj);
1434 lu_object_ref_add_at(&obj->co_lu, &rqo->ro_obj_ref,
1435 "cl_req", req);
1436 break;
1437 }
1438 }
1439 LASSERT(i < req->crq_nrobjs);
1440 }
1441 EXPORT_SYMBOL(cl_req_page_add);
1442
1443 /**
1444 * Removes a page from a request.
1445 */
cl_req_page_done(const struct lu_env * env,struct cl_page * page)1446 void cl_req_page_done(const struct lu_env *env, struct cl_page *page)
1447 {
1448 struct cl_req *req = page->cp_req;
1449
1450 page = cl_page_top(page);
1451
1452 LASSERT(!list_empty(&page->cp_flight));
1453 LASSERT(req->crq_nrpages > 0);
1454
1455 list_del_init(&page->cp_flight);
1456 --req->crq_nrpages;
1457 page->cp_req = NULL;
1458 }
1459 EXPORT_SYMBOL(cl_req_page_done);
1460
1461 /**
1462 * Notifies layers that request is about to depart by calling
1463 * cl_req_operations::cro_prep() top-to-bottom.
1464 */
cl_req_prep(const struct lu_env * env,struct cl_req * req)1465 int cl_req_prep(const struct lu_env *env, struct cl_req *req)
1466 {
1467 int i;
1468 int result;
1469 const struct cl_req_slice *slice;
1470
1471 /*
1472 * Check that the caller of cl_req_alloc() didn't lie about the number
1473 * of objects.
1474 */
1475 for (i = 0; i < req->crq_nrobjs; ++i)
1476 LASSERT(req->crq_o[i].ro_obj != NULL);
1477
1478 result = 0;
1479 list_for_each_entry(slice, &req->crq_layers, crs_linkage) {
1480 if (slice->crs_ops->cro_prep != NULL) {
1481 result = slice->crs_ops->cro_prep(env, slice);
1482 if (result != 0)
1483 break;
1484 }
1485 }
1486 return result;
1487 }
1488 EXPORT_SYMBOL(cl_req_prep);
1489
1490 /**
1491 * Fills in attributes that are passed to server together with transfer. Only
1492 * attributes from \a flags may be touched. This can be called multiple times
1493 * for the same request.
1494 */
cl_req_attr_set(const struct lu_env * env,struct cl_req * req,struct cl_req_attr * attr,u64 flags)1495 void cl_req_attr_set(const struct lu_env *env, struct cl_req *req,
1496 struct cl_req_attr *attr, u64 flags)
1497 {
1498 const struct cl_req_slice *slice;
1499 struct cl_page *page;
1500 int i;
1501
1502 LASSERT(!list_empty(&req->crq_pages));
1503
1504 /* Take any page to use as a model. */
1505 page = list_entry(req->crq_pages.next, struct cl_page, cp_flight);
1506
1507 for (i = 0; i < req->crq_nrobjs; ++i) {
1508 list_for_each_entry(slice, &req->crq_layers, crs_linkage) {
1509 const struct cl_page_slice *scan;
1510 const struct cl_object *obj;
1511
1512 scan = cl_page_at(page,
1513 slice->crs_dev->cd_lu_dev.ld_type);
1514 LASSERT(scan != NULL);
1515 obj = scan->cpl_obj;
1516 if (slice->crs_ops->cro_attr_set != NULL)
1517 slice->crs_ops->cro_attr_set(env, slice, obj,
1518 attr + i, flags);
1519 }
1520 }
1521 }
1522 EXPORT_SYMBOL(cl_req_attr_set);
1523
1524 /* XXX complete(), init_completion(), and wait_for_completion(), until they are
1525 * implemented in libcfs. */
1526 # include <linux/sched.h>
1527
1528 /**
1529 * Initialize synchronous io wait anchor, for transfer of \a nrpages pages.
1530 */
cl_sync_io_init(struct cl_sync_io * anchor,int nrpages)1531 void cl_sync_io_init(struct cl_sync_io *anchor, int nrpages)
1532 {
1533 init_waitqueue_head(&anchor->csi_waitq);
1534 atomic_set(&anchor->csi_sync_nr, nrpages);
1535 atomic_set(&anchor->csi_barrier, nrpages > 0);
1536 anchor->csi_sync_rc = 0;
1537 }
1538 EXPORT_SYMBOL(cl_sync_io_init);
1539
1540 /**
1541 * Wait until all transfer completes. Transfer completion routine has to call
1542 * cl_sync_io_note() for every page.
1543 */
cl_sync_io_wait(const struct lu_env * env,struct cl_io * io,struct cl_page_list * queue,struct cl_sync_io * anchor,long timeout)1544 int cl_sync_io_wait(const struct lu_env *env, struct cl_io *io,
1545 struct cl_page_list *queue, struct cl_sync_io *anchor,
1546 long timeout)
1547 {
1548 struct l_wait_info lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(timeout),
1549 NULL, NULL, NULL);
1550 int rc;
1551
1552 LASSERT(timeout >= 0);
1553
1554 rc = l_wait_event(anchor->csi_waitq,
1555 atomic_read(&anchor->csi_sync_nr) == 0,
1556 &lwi);
1557 if (rc < 0) {
1558 CERROR("SYNC IO failed with error: %d, try to cancel %d remaining pages\n",
1559 rc, atomic_read(&anchor->csi_sync_nr));
1560
1561 (void)cl_io_cancel(env, io, queue);
1562
1563 lwi = (struct l_wait_info) { 0 };
1564 (void)l_wait_event(anchor->csi_waitq,
1565 atomic_read(&anchor->csi_sync_nr) == 0,
1566 &lwi);
1567 } else {
1568 rc = anchor->csi_sync_rc;
1569 }
1570 LASSERT(atomic_read(&anchor->csi_sync_nr) == 0);
1571 cl_page_list_assume(env, io, queue);
1572
1573 /* wait until cl_sync_io_note() has done wakeup */
1574 while (unlikely(atomic_read(&anchor->csi_barrier) != 0)) {
1575 cpu_relax();
1576 }
1577
1578 POISON(anchor, 0x5a, sizeof(*anchor));
1579 return rc;
1580 }
1581 EXPORT_SYMBOL(cl_sync_io_wait);
1582
1583 /**
1584 * Indicate that transfer of a single page completed.
1585 */
cl_sync_io_note(struct cl_sync_io * anchor,int ioret)1586 void cl_sync_io_note(struct cl_sync_io *anchor, int ioret)
1587 {
1588 if (anchor->csi_sync_rc == 0 && ioret < 0)
1589 anchor->csi_sync_rc = ioret;
1590 /*
1591 * Synchronous IO done without releasing page lock (e.g., as a part of
1592 * ->{prepare,commit}_write(). Completion is used to signal the end of
1593 * IO.
1594 */
1595 LASSERT(atomic_read(&anchor->csi_sync_nr) > 0);
1596 if (atomic_dec_and_test(&anchor->csi_sync_nr)) {
1597 wake_up_all(&anchor->csi_waitq);
1598 /* it's safe to nuke or reuse anchor now */
1599 atomic_set(&anchor->csi_barrier, 0);
1600 }
1601 }
1602 EXPORT_SYMBOL(cl_sync_io_note);
1603