1 /*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26 /*
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2011, 2012, Intel Corporation.
31 */
32 /*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * Implementation of cl_object for LOV layer.
37 *
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
39 * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
40 */
41
42 #define DEBUG_SUBSYSTEM S_LOV
43
44 #include "lov_cl_internal.h"
45
46 /** \addtogroup lov
47 * @{
48 */
49
50 /*****************************************************************************
51 *
52 * Layout operations.
53 *
54 */
55
56 struct lov_layout_operations {
57 int (*llo_init)(const struct lu_env *env, struct lov_device *dev,
58 struct lov_object *lov,
59 const struct cl_object_conf *conf,
60 union lov_layout_state *state);
61 int (*llo_delete)(const struct lu_env *env, struct lov_object *lov,
62 union lov_layout_state *state);
63 void (*llo_fini)(const struct lu_env *env, struct lov_object *lov,
64 union lov_layout_state *state);
65 void (*llo_install)(const struct lu_env *env, struct lov_object *lov,
66 union lov_layout_state *state);
67 int (*llo_print)(const struct lu_env *env, void *cookie,
68 lu_printer_t p, const struct lu_object *o);
69 int (*llo_page_init)(const struct lu_env *env, struct cl_object *obj,
70 struct cl_page *page, struct page *vmpage);
71 int (*llo_lock_init)(const struct lu_env *env,
72 struct cl_object *obj, struct cl_lock *lock,
73 const struct cl_io *io);
74 int (*llo_io_init)(const struct lu_env *env,
75 struct cl_object *obj, struct cl_io *io);
76 int (*llo_getattr)(const struct lu_env *env, struct cl_object *obj,
77 struct cl_attr *attr);
78 };
79
80 static int lov_layout_wait(const struct lu_env *env, struct lov_object *lov);
81
82 /*****************************************************************************
83 *
84 * Lov object layout operations.
85 *
86 */
87
lov_install_empty(const struct lu_env * env,struct lov_object * lov,union lov_layout_state * state)88 static void lov_install_empty(const struct lu_env *env,
89 struct lov_object *lov,
90 union lov_layout_state *state)
91 {
92 /*
93 * File without objects.
94 */
95 }
96
lov_init_empty(const struct lu_env * env,struct lov_device * dev,struct lov_object * lov,const struct cl_object_conf * conf,union lov_layout_state * state)97 static int lov_init_empty(const struct lu_env *env,
98 struct lov_device *dev, struct lov_object *lov,
99 const struct cl_object_conf *conf,
100 union lov_layout_state *state)
101 {
102 return 0;
103 }
104
lov_install_raid0(const struct lu_env * env,struct lov_object * lov,union lov_layout_state * state)105 static void lov_install_raid0(const struct lu_env *env,
106 struct lov_object *lov,
107 union lov_layout_state *state)
108 {
109 }
110
lov_sub_find(const struct lu_env * env,struct cl_device * dev,const struct lu_fid * fid,const struct cl_object_conf * conf)111 static struct cl_object *lov_sub_find(const struct lu_env *env,
112 struct cl_device *dev,
113 const struct lu_fid *fid,
114 const struct cl_object_conf *conf)
115 {
116 struct lu_object *o;
117
118 o = lu_object_find_at(env, cl2lu_dev(dev), fid, &conf->coc_lu);
119 LASSERT(ergo(!IS_ERR(o), o->lo_dev->ld_type == &lovsub_device_type));
120 return lu2cl(o);
121 }
122
lov_init_sub(const struct lu_env * env,struct lov_object * lov,struct cl_object * stripe,struct lov_layout_raid0 * r0,int idx)123 static int lov_init_sub(const struct lu_env *env, struct lov_object *lov,
124 struct cl_object *stripe, struct lov_layout_raid0 *r0,
125 int idx)
126 {
127 struct cl_object_header *hdr;
128 struct cl_object_header *subhdr;
129 struct cl_object_header *parent;
130 struct lov_oinfo *oinfo;
131 int result;
132
133 if (OBD_FAIL_CHECK(OBD_FAIL_LOV_INIT)) {
134 /* For sanity:test_206.
135 * Do not leave the object in cache to avoid accessing
136 * freed memory. This is because osc_object is referring to
137 * lov_oinfo of lsm_stripe_data which will be freed due to
138 * this failure. */
139 cl_object_kill(env, stripe);
140 cl_object_put(env, stripe);
141 return -EIO;
142 }
143
144 hdr = cl_object_header(lov2cl(lov));
145 subhdr = cl_object_header(stripe);
146
147 oinfo = lov->lo_lsm->lsm_oinfo[idx];
148 CDEBUG(D_INODE, DFID"@%p[%d] -> "DFID"@%p: ostid: "DOSTID
149 " idx: %d gen: %d\n",
150 PFID(&subhdr->coh_lu.loh_fid), subhdr, idx,
151 PFID(&hdr->coh_lu.loh_fid), hdr, POSTID(&oinfo->loi_oi),
152 oinfo->loi_ost_idx, oinfo->loi_ost_gen);
153
154 /* reuse ->coh_attr_guard to protect coh_parent change */
155 spin_lock(&subhdr->coh_attr_guard);
156 parent = subhdr->coh_parent;
157 if (parent == NULL) {
158 subhdr->coh_parent = hdr;
159 spin_unlock(&subhdr->coh_attr_guard);
160 subhdr->coh_nesting = hdr->coh_nesting + 1;
161 lu_object_ref_add(&stripe->co_lu, "lov-parent", lov);
162 r0->lo_sub[idx] = cl2lovsub(stripe);
163 r0->lo_sub[idx]->lso_super = lov;
164 r0->lo_sub[idx]->lso_index = idx;
165 result = 0;
166 } else {
167 struct lu_object *old_obj;
168 struct lov_object *old_lov;
169 unsigned int mask = D_INODE;
170
171 spin_unlock(&subhdr->coh_attr_guard);
172 old_obj = lu_object_locate(&parent->coh_lu, &lov_device_type);
173 LASSERT(old_obj != NULL);
174 old_lov = cl2lov(lu2cl(old_obj));
175 if (old_lov->lo_layout_invalid) {
176 /* the object's layout has already changed but isn't
177 * refreshed */
178 lu_object_unhash(env, &stripe->co_lu);
179 result = -EAGAIN;
180 } else {
181 mask = D_ERROR;
182 result = -EIO;
183 }
184
185 LU_OBJECT_DEBUG(mask, env, &stripe->co_lu,
186 "stripe %d is already owned.\n", idx);
187 LU_OBJECT_DEBUG(mask, env, old_obj, "owned.\n");
188 LU_OBJECT_HEADER(mask, env, lov2lu(lov), "try to own.\n");
189 cl_object_put(env, stripe);
190 }
191 return result;
192 }
193
lov_init_raid0(const struct lu_env * env,struct lov_device * dev,struct lov_object * lov,const struct cl_object_conf * conf,union lov_layout_state * state)194 static int lov_init_raid0(const struct lu_env *env,
195 struct lov_device *dev, struct lov_object *lov,
196 const struct cl_object_conf *conf,
197 union lov_layout_state *state)
198 {
199 int result;
200 int i;
201
202 struct cl_object *stripe;
203 struct lov_thread_info *lti = lov_env_info(env);
204 struct cl_object_conf *subconf = <i->lti_stripe_conf;
205 struct lov_stripe_md *lsm = conf->u.coc_md->lsm;
206 struct lu_fid *ofid = <i->lti_fid;
207 struct lov_layout_raid0 *r0 = &state->raid0;
208
209 if (lsm->lsm_magic != LOV_MAGIC_V1 && lsm->lsm_magic != LOV_MAGIC_V3) {
210 dump_lsm(D_ERROR, lsm);
211 LASSERTF(0, "magic mismatch, expected %d/%d, actual %d.\n",
212 LOV_MAGIC_V1, LOV_MAGIC_V3, lsm->lsm_magic);
213 }
214
215 LASSERT(lov->lo_lsm == NULL);
216 lov->lo_lsm = lsm_addref(lsm);
217 r0->lo_nr = lsm->lsm_stripe_count;
218 LASSERT(r0->lo_nr <= lov_targets_nr(dev));
219
220 r0->lo_sub = libcfs_kvzalloc(r0->lo_nr * sizeof(r0->lo_sub[0]),
221 GFP_NOFS);
222 if (r0->lo_sub != NULL) {
223 result = 0;
224 subconf->coc_inode = conf->coc_inode;
225 spin_lock_init(&r0->lo_sub_lock);
226 /*
227 * Create stripe cl_objects.
228 */
229 for (i = 0; i < r0->lo_nr && result == 0; ++i) {
230 struct cl_device *subdev;
231 struct lov_oinfo *oinfo = lsm->lsm_oinfo[i];
232 int ost_idx = oinfo->loi_ost_idx;
233
234 if (lov_oinfo_is_dummy(oinfo))
235 continue;
236
237 result = ostid_to_fid(ofid, &oinfo->loi_oi,
238 oinfo->loi_ost_idx);
239 if (result != 0)
240 goto out;
241
242 subdev = lovsub2cl_dev(dev->ld_target[ost_idx]);
243 subconf->u.coc_oinfo = oinfo;
244 LASSERTF(subdev != NULL, "not init ost %d\n", ost_idx);
245 /* In the function below, .hs_keycmp resolves to
246 * lu_obj_hop_keycmp() */
247 /* coverity[overrun-buffer-val] */
248 stripe = lov_sub_find(env, subdev, ofid, subconf);
249 if (!IS_ERR(stripe)) {
250 result = lov_init_sub(env, lov, stripe, r0, i);
251 if (result == -EAGAIN) { /* try again */
252 --i;
253 result = 0;
254 }
255 } else {
256 result = PTR_ERR(stripe);
257 }
258 }
259 } else
260 result = -ENOMEM;
261 out:
262 return result;
263 }
264
lov_init_released(const struct lu_env * env,struct lov_device * dev,struct lov_object * lov,const struct cl_object_conf * conf,union lov_layout_state * state)265 static int lov_init_released(const struct lu_env *env,
266 struct lov_device *dev, struct lov_object *lov,
267 const struct cl_object_conf *conf,
268 union lov_layout_state *state)
269 {
270 struct lov_stripe_md *lsm = conf->u.coc_md->lsm;
271
272 LASSERT(lsm != NULL);
273 LASSERT(lsm_is_released(lsm));
274 LASSERT(lov->lo_lsm == NULL);
275
276 lov->lo_lsm = lsm_addref(lsm);
277 return 0;
278 }
279
lov_delete_empty(const struct lu_env * env,struct lov_object * lov,union lov_layout_state * state)280 static int lov_delete_empty(const struct lu_env *env, struct lov_object *lov,
281 union lov_layout_state *state)
282 {
283 LASSERT(lov->lo_type == LLT_EMPTY || lov->lo_type == LLT_RELEASED);
284
285 lov_layout_wait(env, lov);
286
287 cl_object_prune(env, &lov->lo_cl);
288 return 0;
289 }
290
lov_subobject_kill(const struct lu_env * env,struct lov_object * lov,struct lovsub_object * los,int idx)291 static void lov_subobject_kill(const struct lu_env *env, struct lov_object *lov,
292 struct lovsub_object *los, int idx)
293 {
294 struct cl_object *sub;
295 struct lov_layout_raid0 *r0;
296 struct lu_site *site;
297 struct lu_site_bkt_data *bkt;
298 wait_queue_t *waiter;
299
300 r0 = &lov->u.raid0;
301 LASSERT(r0->lo_sub[idx] == los);
302
303 sub = lovsub2cl(los);
304 site = sub->co_lu.lo_dev->ld_site;
305 bkt = lu_site_bkt_from_fid(site, &sub->co_lu.lo_header->loh_fid);
306
307 cl_object_kill(env, sub);
308 /* release a reference to the sub-object and ... */
309 lu_object_ref_del(&sub->co_lu, "lov-parent", lov);
310 cl_object_put(env, sub);
311
312 /* ... wait until it is actually destroyed---sub-object clears its
313 * ->lo_sub[] slot in lovsub_object_fini() */
314 if (r0->lo_sub[idx] == los) {
315 waiter = &lov_env_info(env)->lti_waiter;
316 init_waitqueue_entry(waiter, current);
317 add_wait_queue(&bkt->lsb_marche_funebre, waiter);
318 set_current_state(TASK_UNINTERRUPTIBLE);
319 while (1) {
320 /* this wait-queue is signaled at the end of
321 * lu_object_free(). */
322 set_current_state(TASK_UNINTERRUPTIBLE);
323 spin_lock(&r0->lo_sub_lock);
324 if (r0->lo_sub[idx] == los) {
325 spin_unlock(&r0->lo_sub_lock);
326 schedule();
327 } else {
328 spin_unlock(&r0->lo_sub_lock);
329 set_current_state(TASK_RUNNING);
330 break;
331 }
332 }
333 remove_wait_queue(&bkt->lsb_marche_funebre, waiter);
334 }
335 LASSERT(r0->lo_sub[idx] == NULL);
336 }
337
lov_delete_raid0(const struct lu_env * env,struct lov_object * lov,union lov_layout_state * state)338 static int lov_delete_raid0(const struct lu_env *env, struct lov_object *lov,
339 union lov_layout_state *state)
340 {
341 struct lov_layout_raid0 *r0 = &state->raid0;
342 struct lov_stripe_md *lsm = lov->lo_lsm;
343 int i;
344
345 dump_lsm(D_INODE, lsm);
346
347 lov_layout_wait(env, lov);
348 if (r0->lo_sub != NULL) {
349 for (i = 0; i < r0->lo_nr; ++i) {
350 struct lovsub_object *los = r0->lo_sub[i];
351
352 if (los != NULL) {
353 cl_locks_prune(env, &los->lso_cl, 1);
354 /*
355 * If top-level object is to be evicted from
356 * the cache, so are its sub-objects.
357 */
358 lov_subobject_kill(env, lov, los, i);
359 }
360 }
361 }
362 cl_object_prune(env, &lov->lo_cl);
363 return 0;
364 }
365
lov_fini_empty(const struct lu_env * env,struct lov_object * lov,union lov_layout_state * state)366 static void lov_fini_empty(const struct lu_env *env, struct lov_object *lov,
367 union lov_layout_state *state)
368 {
369 LASSERT(lov->lo_type == LLT_EMPTY || lov->lo_type == LLT_RELEASED);
370 }
371
lov_fini_raid0(const struct lu_env * env,struct lov_object * lov,union lov_layout_state * state)372 static void lov_fini_raid0(const struct lu_env *env, struct lov_object *lov,
373 union lov_layout_state *state)
374 {
375 struct lov_layout_raid0 *r0 = &state->raid0;
376
377 if (r0->lo_sub != NULL) {
378 kvfree(r0->lo_sub);
379 r0->lo_sub = NULL;
380 }
381
382 dump_lsm(D_INODE, lov->lo_lsm);
383 lov_free_memmd(&lov->lo_lsm);
384 }
385
lov_fini_released(const struct lu_env * env,struct lov_object * lov,union lov_layout_state * state)386 static void lov_fini_released(const struct lu_env *env, struct lov_object *lov,
387 union lov_layout_state *state)
388 {
389 dump_lsm(D_INODE, lov->lo_lsm);
390 lov_free_memmd(&lov->lo_lsm);
391 }
392
lov_print_empty(const struct lu_env * env,void * cookie,lu_printer_t p,const struct lu_object * o)393 static int lov_print_empty(const struct lu_env *env, void *cookie,
394 lu_printer_t p, const struct lu_object *o)
395 {
396 (*p)(env, cookie, "empty %d\n", lu2lov(o)->lo_layout_invalid);
397 return 0;
398 }
399
lov_print_raid0(const struct lu_env * env,void * cookie,lu_printer_t p,const struct lu_object * o)400 static int lov_print_raid0(const struct lu_env *env, void *cookie,
401 lu_printer_t p, const struct lu_object *o)
402 {
403 struct lov_object *lov = lu2lov(o);
404 struct lov_layout_raid0 *r0 = lov_r0(lov);
405 struct lov_stripe_md *lsm = lov->lo_lsm;
406 int i;
407
408 (*p)(env, cookie, "stripes: %d, %s, lsm{%p 0x%08X %d %u %u}:\n",
409 r0->lo_nr, lov->lo_layout_invalid ? "invalid" : "valid", lsm,
410 lsm->lsm_magic, atomic_read(&lsm->lsm_refc),
411 lsm->lsm_stripe_count, lsm->lsm_layout_gen);
412 for (i = 0; i < r0->lo_nr; ++i) {
413 struct lu_object *sub;
414
415 if (r0->lo_sub[i] != NULL) {
416 sub = lovsub2lu(r0->lo_sub[i]);
417 lu_object_print(env, cookie, p, sub);
418 } else {
419 (*p)(env, cookie, "sub %d absent\n", i);
420 }
421 }
422 return 0;
423 }
424
lov_print_released(const struct lu_env * env,void * cookie,lu_printer_t p,const struct lu_object * o)425 static int lov_print_released(const struct lu_env *env, void *cookie,
426 lu_printer_t p, const struct lu_object *o)
427 {
428 struct lov_object *lov = lu2lov(o);
429 struct lov_stripe_md *lsm = lov->lo_lsm;
430
431 (*p)(env, cookie,
432 "released: %s, lsm{%p 0x%08X %d %u %u}:\n",
433 lov->lo_layout_invalid ? "invalid" : "valid", lsm,
434 lsm->lsm_magic, atomic_read(&lsm->lsm_refc),
435 lsm->lsm_stripe_count, lsm->lsm_layout_gen);
436 return 0;
437 }
438
439 /**
440 * Implements cl_object_operations::coo_attr_get() method for an object
441 * without stripes (LLT_EMPTY layout type).
442 *
443 * The only attributes this layer is authoritative in this case is
444 * cl_attr::cat_blocks---it's 0.
445 */
lov_attr_get_empty(const struct lu_env * env,struct cl_object * obj,struct cl_attr * attr)446 static int lov_attr_get_empty(const struct lu_env *env, struct cl_object *obj,
447 struct cl_attr *attr)
448 {
449 attr->cat_blocks = 0;
450 return 0;
451 }
452
lov_attr_get_raid0(const struct lu_env * env,struct cl_object * obj,struct cl_attr * attr)453 static int lov_attr_get_raid0(const struct lu_env *env, struct cl_object *obj,
454 struct cl_attr *attr)
455 {
456 struct lov_object *lov = cl2lov(obj);
457 struct lov_layout_raid0 *r0 = lov_r0(lov);
458 struct cl_attr *lov_attr = &r0->lo_attr;
459 int result = 0;
460
461 /* this is called w/o holding type guard mutex, so it must be inside
462 * an on going IO otherwise lsm may be replaced.
463 * LU-2117: it turns out there exists one exception. For mmaped files,
464 * the lock of those files may be requested in the other file's IO
465 * context, and this function is called in ccc_lock_state(), it will
466 * hit this assertion.
467 * Anyway, it's still okay to call attr_get w/o type guard as layout
468 * can't go if locks exist. */
469 /* LASSERT(atomic_read(&lsm->lsm_refc) > 1); */
470
471 if (!r0->lo_attr_valid) {
472 struct lov_stripe_md *lsm = lov->lo_lsm;
473 struct ost_lvb *lvb = &lov_env_info(env)->lti_lvb;
474 __u64 kms = 0;
475
476 memset(lvb, 0, sizeof(*lvb));
477 /* XXX: timestamps can be negative by sanity:test_39m,
478 * how can it be? */
479 lvb->lvb_atime = LLONG_MIN;
480 lvb->lvb_ctime = LLONG_MIN;
481 lvb->lvb_mtime = LLONG_MIN;
482
483 /*
484 * XXX that should be replaced with a loop over sub-objects,
485 * doing cl_object_attr_get() on them. But for now, let's
486 * reuse old lov code.
487 */
488
489 /*
490 * XXX take lsm spin-lock to keep lov_merge_lvb_kms()
491 * happy. It's not needed, because new code uses
492 * ->coh_attr_guard spin-lock to protect consistency of
493 * sub-object attributes.
494 */
495 lov_stripe_lock(lsm);
496 result = lov_merge_lvb_kms(lsm, lvb, &kms);
497 lov_stripe_unlock(lsm);
498 if (result == 0) {
499 cl_lvb2attr(lov_attr, lvb);
500 lov_attr->cat_kms = kms;
501 r0->lo_attr_valid = 1;
502 }
503 }
504 if (result == 0) { /* merge results */
505 attr->cat_blocks = lov_attr->cat_blocks;
506 attr->cat_size = lov_attr->cat_size;
507 attr->cat_kms = lov_attr->cat_kms;
508 if (attr->cat_atime < lov_attr->cat_atime)
509 attr->cat_atime = lov_attr->cat_atime;
510 if (attr->cat_ctime < lov_attr->cat_ctime)
511 attr->cat_ctime = lov_attr->cat_ctime;
512 if (attr->cat_mtime < lov_attr->cat_mtime)
513 attr->cat_mtime = lov_attr->cat_mtime;
514 }
515 return result;
516 }
517
518 static const struct lov_layout_operations lov_dispatch[] = {
519 [LLT_EMPTY] = {
520 .llo_init = lov_init_empty,
521 .llo_delete = lov_delete_empty,
522 .llo_fini = lov_fini_empty,
523 .llo_install = lov_install_empty,
524 .llo_print = lov_print_empty,
525 .llo_page_init = lov_page_init_empty,
526 .llo_lock_init = lov_lock_init_empty,
527 .llo_io_init = lov_io_init_empty,
528 .llo_getattr = lov_attr_get_empty
529 },
530 [LLT_RAID0] = {
531 .llo_init = lov_init_raid0,
532 .llo_delete = lov_delete_raid0,
533 .llo_fini = lov_fini_raid0,
534 .llo_install = lov_install_raid0,
535 .llo_print = lov_print_raid0,
536 .llo_page_init = lov_page_init_raid0,
537 .llo_lock_init = lov_lock_init_raid0,
538 .llo_io_init = lov_io_init_raid0,
539 .llo_getattr = lov_attr_get_raid0
540 },
541 [LLT_RELEASED] = {
542 .llo_init = lov_init_released,
543 .llo_delete = lov_delete_empty,
544 .llo_fini = lov_fini_released,
545 .llo_install = lov_install_empty,
546 .llo_print = lov_print_released,
547 .llo_page_init = lov_page_init_empty,
548 .llo_lock_init = lov_lock_init_empty,
549 .llo_io_init = lov_io_init_released,
550 .llo_getattr = lov_attr_get_empty
551 }
552 };
553
554 /**
555 * Performs a double-dispatch based on the layout type of an object.
556 */
557 #define LOV_2DISPATCH_NOLOCK(obj, op, ...) \
558 ({ \
559 struct lov_object *__obj = (obj); \
560 enum lov_layout_type __llt; \
561 \
562 __llt = __obj->lo_type; \
563 LASSERT(0 <= __llt && __llt < ARRAY_SIZE(lov_dispatch)); \
564 lov_dispatch[__llt].op(__VA_ARGS__); \
565 })
566
567 /**
568 * Return lov_layout_type associated with a given lsm
569 */
lov_type(struct lov_stripe_md * lsm)570 static enum lov_layout_type lov_type(struct lov_stripe_md *lsm)
571 {
572 if (lsm == NULL)
573 return LLT_EMPTY;
574 if (lsm_is_released(lsm))
575 return LLT_RELEASED;
576 return LLT_RAID0;
577 }
578
lov_conf_freeze(struct lov_object * lov)579 static inline void lov_conf_freeze(struct lov_object *lov)
580 {
581 if (lov->lo_owner != current)
582 down_read(&lov->lo_type_guard);
583 }
584
lov_conf_thaw(struct lov_object * lov)585 static inline void lov_conf_thaw(struct lov_object *lov)
586 {
587 if (lov->lo_owner != current)
588 up_read(&lov->lo_type_guard);
589 }
590
591 #define LOV_2DISPATCH_MAYLOCK(obj, op, lock, ...) \
592 ({ \
593 struct lov_object *__obj = (obj); \
594 int __lock = !!(lock); \
595 typeof(lov_dispatch[0].op(__VA_ARGS__)) __result; \
596 \
597 if (__lock) \
598 lov_conf_freeze(__obj); \
599 __result = LOV_2DISPATCH_NOLOCK(obj, op, __VA_ARGS__); \
600 if (__lock) \
601 lov_conf_thaw(__obj); \
602 __result; \
603 })
604
605 /**
606 * Performs a locked double-dispatch based on the layout type of an object.
607 */
608 #define LOV_2DISPATCH(obj, op, ...) \
609 LOV_2DISPATCH_MAYLOCK(obj, op, 1, __VA_ARGS__)
610
611 #define LOV_2DISPATCH_VOID(obj, op, ...) \
612 do { \
613 struct lov_object *__obj = (obj); \
614 enum lov_layout_type __llt; \
615 \
616 lov_conf_freeze(__obj); \
617 __llt = __obj->lo_type; \
618 LASSERT(0 <= __llt && __llt < ARRAY_SIZE(lov_dispatch)); \
619 lov_dispatch[__llt].op(__VA_ARGS__); \
620 lov_conf_thaw(__obj); \
621 } while (0)
622
lov_conf_lock(struct lov_object * lov)623 static void lov_conf_lock(struct lov_object *lov)
624 {
625 LASSERT(lov->lo_owner != current);
626 down_write(&lov->lo_type_guard);
627 LASSERT(lov->lo_owner == NULL);
628 lov->lo_owner = current;
629 }
630
lov_conf_unlock(struct lov_object * lov)631 static void lov_conf_unlock(struct lov_object *lov)
632 {
633 lov->lo_owner = NULL;
634 up_write(&lov->lo_type_guard);
635 }
636
lov_layout_wait(const struct lu_env * env,struct lov_object * lov)637 static int lov_layout_wait(const struct lu_env *env, struct lov_object *lov)
638 {
639 struct l_wait_info lwi = { 0 };
640
641 while (atomic_read(&lov->lo_active_ios) > 0) {
642 CDEBUG(D_INODE, "file:"DFID" wait for active IO, now: %d.\n",
643 PFID(lu_object_fid(lov2lu(lov))),
644 atomic_read(&lov->lo_active_ios));
645
646 l_wait_event(lov->lo_waitq,
647 atomic_read(&lov->lo_active_ios) == 0, &lwi);
648 }
649 return 0;
650 }
651
lov_layout_change(const struct lu_env * unused,struct lov_object * lov,const struct cl_object_conf * conf)652 static int lov_layout_change(const struct lu_env *unused,
653 struct lov_object *lov,
654 const struct cl_object_conf *conf)
655 {
656 int result;
657 enum lov_layout_type llt = LLT_EMPTY;
658 union lov_layout_state *state = &lov->u;
659 const struct lov_layout_operations *old_ops;
660 const struct lov_layout_operations *new_ops;
661
662 struct cl_object_header *hdr = cl_object_header(&lov->lo_cl);
663 void *cookie;
664 struct lu_env *env;
665 int refcheck;
666
667 LASSERT(0 <= lov->lo_type && lov->lo_type < ARRAY_SIZE(lov_dispatch));
668
669 if (conf->u.coc_md != NULL)
670 llt = lov_type(conf->u.coc_md->lsm);
671 LASSERT(0 <= llt && llt < ARRAY_SIZE(lov_dispatch));
672
673 cookie = cl_env_reenter();
674 env = cl_env_get(&refcheck);
675 if (IS_ERR(env)) {
676 cl_env_reexit(cookie);
677 return PTR_ERR(env);
678 }
679
680 CDEBUG(D_INODE, DFID" from %s to %s\n",
681 PFID(lu_object_fid(lov2lu(lov))),
682 llt2str(lov->lo_type), llt2str(llt));
683
684 old_ops = &lov_dispatch[lov->lo_type];
685 new_ops = &lov_dispatch[llt];
686
687 result = old_ops->llo_delete(env, lov, &lov->u);
688 if (result == 0) {
689 old_ops->llo_fini(env, lov, &lov->u);
690
691 LASSERT(atomic_read(&lov->lo_active_ios) == 0);
692 LASSERT(hdr->coh_tree.rnode == NULL);
693 LASSERT(hdr->coh_pages == 0);
694
695 lov->lo_type = LLT_EMPTY;
696 result = new_ops->llo_init(env,
697 lu2lov_dev(lov->lo_cl.co_lu.lo_dev),
698 lov, conf, state);
699 if (result == 0) {
700 new_ops->llo_install(env, lov, state);
701 lov->lo_type = llt;
702 } else {
703 new_ops->llo_delete(env, lov, state);
704 new_ops->llo_fini(env, lov, state);
705 /* this file becomes an EMPTY file. */
706 }
707 }
708
709 cl_env_put(env, &refcheck);
710 cl_env_reexit(cookie);
711 return result;
712 }
713
714 /*****************************************************************************
715 *
716 * Lov object operations.
717 *
718 */
lov_object_init(const struct lu_env * env,struct lu_object * obj,const struct lu_object_conf * conf)719 int lov_object_init(const struct lu_env *env, struct lu_object *obj,
720 const struct lu_object_conf *conf)
721 {
722 struct lov_device *dev = lu2lov_dev(obj->lo_dev);
723 struct lov_object *lov = lu2lov(obj);
724 const struct cl_object_conf *cconf = lu2cl_conf(conf);
725 union lov_layout_state *set = &lov->u;
726 const struct lov_layout_operations *ops;
727 int result;
728
729 init_rwsem(&lov->lo_type_guard);
730 atomic_set(&lov->lo_active_ios, 0);
731 init_waitqueue_head(&lov->lo_waitq);
732
733 cl_object_page_init(lu2cl(obj), sizeof(struct lov_page));
734
735 /* no locking is necessary, as object is being created */
736 lov->lo_type = lov_type(cconf->u.coc_md->lsm);
737 ops = &lov_dispatch[lov->lo_type];
738 result = ops->llo_init(env, dev, lov, cconf, set);
739 if (result == 0)
740 ops->llo_install(env, lov, set);
741 return result;
742 }
743
lov_conf_set(const struct lu_env * env,struct cl_object * obj,const struct cl_object_conf * conf)744 static int lov_conf_set(const struct lu_env *env, struct cl_object *obj,
745 const struct cl_object_conf *conf)
746 {
747 struct lov_stripe_md *lsm = NULL;
748 struct lov_object *lov = cl2lov(obj);
749 int result = 0;
750
751 lov_conf_lock(lov);
752 if (conf->coc_opc == OBJECT_CONF_INVALIDATE) {
753 lov->lo_layout_invalid = true;
754 result = 0;
755 goto out;
756 }
757
758 if (conf->coc_opc == OBJECT_CONF_WAIT) {
759 if (lov->lo_layout_invalid &&
760 atomic_read(&lov->lo_active_ios) > 0) {
761 lov_conf_unlock(lov);
762 result = lov_layout_wait(env, lov);
763 lov_conf_lock(lov);
764 }
765 goto out;
766 }
767
768 LASSERT(conf->coc_opc == OBJECT_CONF_SET);
769
770 if (conf->u.coc_md != NULL)
771 lsm = conf->u.coc_md->lsm;
772 if ((lsm == NULL && lov->lo_lsm == NULL) ||
773 ((lsm != NULL && lov->lo_lsm != NULL) &&
774 (lov->lo_lsm->lsm_layout_gen == lsm->lsm_layout_gen) &&
775 (lov->lo_lsm->lsm_pattern == lsm->lsm_pattern))) {
776 /* same version of layout */
777 lov->lo_layout_invalid = false;
778 result = 0;
779 goto out;
780 }
781
782 /* will change layout - check if there still exists active IO. */
783 if (atomic_read(&lov->lo_active_ios) > 0) {
784 lov->lo_layout_invalid = true;
785 result = -EBUSY;
786 goto out;
787 }
788
789 lov->lo_layout_invalid = lov_layout_change(env, lov, conf);
790
791 out:
792 lov_conf_unlock(lov);
793 CDEBUG(D_INODE, DFID" lo_layout_invalid=%d\n",
794 PFID(lu_object_fid(lov2lu(lov))), lov->lo_layout_invalid);
795 return result;
796 }
797
lov_object_delete(const struct lu_env * env,struct lu_object * obj)798 static void lov_object_delete(const struct lu_env *env, struct lu_object *obj)
799 {
800 struct lov_object *lov = lu2lov(obj);
801
802 LOV_2DISPATCH_VOID(lov, llo_delete, env, lov, &lov->u);
803 }
804
lov_object_free(const struct lu_env * env,struct lu_object * obj)805 static void lov_object_free(const struct lu_env *env, struct lu_object *obj)
806 {
807 struct lov_object *lov = lu2lov(obj);
808
809 LOV_2DISPATCH_VOID(lov, llo_fini, env, lov, &lov->u);
810 lu_object_fini(obj);
811 kmem_cache_free(lov_object_kmem, lov);
812 }
813
lov_object_print(const struct lu_env * env,void * cookie,lu_printer_t p,const struct lu_object * o)814 static int lov_object_print(const struct lu_env *env, void *cookie,
815 lu_printer_t p, const struct lu_object *o)
816 {
817 return LOV_2DISPATCH_NOLOCK(lu2lov(o), llo_print, env, cookie, p, o);
818 }
819
lov_page_init(const struct lu_env * env,struct cl_object * obj,struct cl_page * page,struct page * vmpage)820 int lov_page_init(const struct lu_env *env, struct cl_object *obj,
821 struct cl_page *page, struct page *vmpage)
822 {
823 return LOV_2DISPATCH_NOLOCK(cl2lov(obj),
824 llo_page_init, env, obj, page, vmpage);
825 }
826
827 /**
828 * Implements cl_object_operations::clo_io_init() method for lov
829 * layer. Dispatches to the appropriate layout io initialization method.
830 */
lov_io_init(const struct lu_env * env,struct cl_object * obj,struct cl_io * io)831 int lov_io_init(const struct lu_env *env, struct cl_object *obj,
832 struct cl_io *io)
833 {
834 CL_IO_SLICE_CLEAN(lov_env_io(env), lis_cl);
835 return LOV_2DISPATCH_MAYLOCK(cl2lov(obj), llo_io_init,
836 !io->ci_ignore_layout, env, obj, io);
837 }
838
839 /**
840 * An implementation of cl_object_operations::clo_attr_get() method for lov
841 * layer. For raid0 layout this collects and merges attributes of all
842 * sub-objects.
843 */
lov_attr_get(const struct lu_env * env,struct cl_object * obj,struct cl_attr * attr)844 static int lov_attr_get(const struct lu_env *env, struct cl_object *obj,
845 struct cl_attr *attr)
846 {
847 /* do not take lock, as this function is called under a
848 * spin-lock. Layout is protected from changing by ongoing IO. */
849 return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_getattr, env, obj, attr);
850 }
851
lov_attr_set(const struct lu_env * env,struct cl_object * obj,const struct cl_attr * attr,unsigned valid)852 static int lov_attr_set(const struct lu_env *env, struct cl_object *obj,
853 const struct cl_attr *attr, unsigned valid)
854 {
855 /*
856 * No dispatch is required here, as no layout implements this.
857 */
858 return 0;
859 }
860
lov_lock_init(const struct lu_env * env,struct cl_object * obj,struct cl_lock * lock,const struct cl_io * io)861 int lov_lock_init(const struct lu_env *env, struct cl_object *obj,
862 struct cl_lock *lock, const struct cl_io *io)
863 {
864 /* No need to lock because we've taken one refcount of layout. */
865 return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_lock_init, env, obj, lock,
866 io);
867 }
868
869 static const struct cl_object_operations lov_ops = {
870 .coo_page_init = lov_page_init,
871 .coo_lock_init = lov_lock_init,
872 .coo_io_init = lov_io_init,
873 .coo_attr_get = lov_attr_get,
874 .coo_attr_set = lov_attr_set,
875 .coo_conf_set = lov_conf_set
876 };
877
878 static const struct lu_object_operations lov_lu_obj_ops = {
879 .loo_object_init = lov_object_init,
880 .loo_object_delete = lov_object_delete,
881 .loo_object_release = NULL,
882 .loo_object_free = lov_object_free,
883 .loo_object_print = lov_object_print,
884 .loo_object_invariant = NULL
885 };
886
lov_object_alloc(const struct lu_env * env,const struct lu_object_header * unused,struct lu_device * dev)887 struct lu_object *lov_object_alloc(const struct lu_env *env,
888 const struct lu_object_header *unused,
889 struct lu_device *dev)
890 {
891 struct lov_object *lov;
892 struct lu_object *obj;
893
894 lov = kmem_cache_alloc(lov_object_kmem, GFP_NOFS | __GFP_ZERO);
895 if (lov != NULL) {
896 obj = lov2lu(lov);
897 lu_object_init(obj, NULL, dev);
898 lov->lo_cl.co_ops = &lov_ops;
899 lov->lo_type = -1; /* invalid, to catch uninitialized type */
900 /*
901 * object io operation vector (cl_object::co_iop) is installed
902 * later in lov_object_init(), as different vectors are used
903 * for object with different layouts.
904 */
905 obj->lo_ops = &lov_lu_obj_ops;
906 } else
907 obj = NULL;
908 return obj;
909 }
910
lov_lsm_addref(struct lov_object * lov)911 static struct lov_stripe_md *lov_lsm_addref(struct lov_object *lov)
912 {
913 struct lov_stripe_md *lsm = NULL;
914
915 lov_conf_freeze(lov);
916 if (lov->lo_lsm != NULL) {
917 lsm = lsm_addref(lov->lo_lsm);
918 CDEBUG(D_INODE, "lsm %p addref %d/%d by %p.\n",
919 lsm, atomic_read(&lsm->lsm_refc),
920 lov->lo_layout_invalid, current);
921 }
922 lov_conf_thaw(lov);
923 return lsm;
924 }
925
lov_lsm_get(struct cl_object * clobj)926 struct lov_stripe_md *lov_lsm_get(struct cl_object *clobj)
927 {
928 struct lu_object *luobj;
929 struct lov_stripe_md *lsm = NULL;
930
931 if (clobj == NULL)
932 return NULL;
933
934 luobj = lu_object_locate(&cl_object_header(clobj)->coh_lu,
935 &lov_device_type);
936 if (luobj != NULL)
937 lsm = lov_lsm_addref(lu2lov(luobj));
938 return lsm;
939 }
940 EXPORT_SYMBOL(lov_lsm_get);
941
lov_lsm_put(struct cl_object * unused,struct lov_stripe_md * lsm)942 void lov_lsm_put(struct cl_object *unused, struct lov_stripe_md *lsm)
943 {
944 if (lsm != NULL)
945 lov_free_memmd(&lsm);
946 }
947 EXPORT_SYMBOL(lov_lsm_put);
948
lov_read_and_clear_async_rc(struct cl_object * clob)949 int lov_read_and_clear_async_rc(struct cl_object *clob)
950 {
951 struct lu_object *luobj;
952 int rc = 0;
953
954 luobj = lu_object_locate(&cl_object_header(clob)->coh_lu,
955 &lov_device_type);
956 if (luobj != NULL) {
957 struct lov_object *lov = lu2lov(luobj);
958
959 lov_conf_freeze(lov);
960 switch (lov->lo_type) {
961 case LLT_RAID0: {
962 struct lov_stripe_md *lsm;
963 int i;
964
965 lsm = lov->lo_lsm;
966 LASSERT(lsm != NULL);
967 for (i = 0; i < lsm->lsm_stripe_count; i++) {
968 struct lov_oinfo *loi = lsm->lsm_oinfo[i];
969
970 if (lov_oinfo_is_dummy(loi))
971 continue;
972
973 if (loi->loi_ar.ar_rc && !rc)
974 rc = loi->loi_ar.ar_rc;
975 loi->loi_ar.ar_rc = 0;
976 }
977 }
978 case LLT_RELEASED:
979 case LLT_EMPTY:
980 break;
981 default:
982 LBUG();
983 }
984 lov_conf_thaw(lov);
985 }
986 return rc;
987 }
988 EXPORT_SYMBOL(lov_read_and_clear_async_rc);
989
990 /** @} lov */
991