1 /*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26 /*
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2012, Intel Corporation.
31 */
32 /*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * cl_object implementation for VVP layer.
37 *
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
39 */
40
41 #define DEBUG_SUBSYSTEM S_LLITE
42
43 #include "../../include/linux/libcfs/libcfs.h"
44
45 #include "../include/obd.h"
46 #include "../include/lustre_lite.h"
47
48 #include "vvp_internal.h"
49
50 /*****************************************************************************
51 *
52 * Object operations.
53 *
54 */
55
vvp_object_print(const struct lu_env * env,void * cookie,lu_printer_t p,const struct lu_object * o)56 static int vvp_object_print(const struct lu_env *env, void *cookie,
57 lu_printer_t p, const struct lu_object *o)
58 {
59 struct ccc_object *obj = lu2ccc(o);
60 struct inode *inode = obj->cob_inode;
61 struct ll_inode_info *lli;
62
63 (*p)(env, cookie, "(%s %d %d) inode: %p ",
64 list_empty(&obj->cob_pending_list) ? "-" : "+",
65 obj->cob_transient_pages, atomic_read(&obj->cob_mmap_cnt),
66 inode);
67 if (inode) {
68 lli = ll_i2info(inode);
69 (*p)(env, cookie, "%lu/%u %o %u %d %p "DFID,
70 inode->i_ino, inode->i_generation, inode->i_mode,
71 inode->i_nlink, atomic_read(&inode->i_count),
72 lli->lli_clob, PFID(&lli->lli_fid));
73 }
74 return 0;
75 }
76
vvp_attr_get(const struct lu_env * env,struct cl_object * obj,struct cl_attr * attr)77 static int vvp_attr_get(const struct lu_env *env, struct cl_object *obj,
78 struct cl_attr *attr)
79 {
80 struct inode *inode = ccc_object_inode(obj);
81
82 /*
83 * lov overwrites most of these fields in
84 * lov_attr_get()->...lov_merge_lvb_kms(), except when inode
85 * attributes are newer.
86 */
87
88 attr->cat_size = i_size_read(inode);
89 attr->cat_mtime = inode->i_mtime.tv_sec;
90 attr->cat_atime = inode->i_atime.tv_sec;
91 attr->cat_ctime = inode->i_ctime.tv_sec;
92 attr->cat_blocks = inode->i_blocks;
93 attr->cat_uid = from_kuid(&init_user_ns, inode->i_uid);
94 attr->cat_gid = from_kgid(&init_user_ns, inode->i_gid);
95 /* KMS is not known by this layer */
96 return 0; /* layers below have to fill in the rest */
97 }
98
vvp_attr_set(const struct lu_env * env,struct cl_object * obj,const struct cl_attr * attr,unsigned valid)99 static int vvp_attr_set(const struct lu_env *env, struct cl_object *obj,
100 const struct cl_attr *attr, unsigned valid)
101 {
102 struct inode *inode = ccc_object_inode(obj);
103
104 if (valid & CAT_UID)
105 inode->i_uid = make_kuid(&init_user_ns, attr->cat_uid);
106 if (valid & CAT_GID)
107 inode->i_gid = make_kgid(&init_user_ns, attr->cat_gid);
108 if (valid & CAT_ATIME)
109 inode->i_atime.tv_sec = attr->cat_atime;
110 if (valid & CAT_MTIME)
111 inode->i_mtime.tv_sec = attr->cat_mtime;
112 if (valid & CAT_CTIME)
113 inode->i_ctime.tv_sec = attr->cat_ctime;
114 if (0 && valid & CAT_SIZE)
115 cl_isize_write_nolock(inode, attr->cat_size);
116 /* not currently necessary */
117 if (0 && valid & (CAT_UID|CAT_GID|CAT_SIZE))
118 mark_inode_dirty(inode);
119 return 0;
120 }
121
vvp_conf_set(const struct lu_env * env,struct cl_object * obj,const struct cl_object_conf * conf)122 static int vvp_conf_set(const struct lu_env *env, struct cl_object *obj,
123 const struct cl_object_conf *conf)
124 {
125 struct ll_inode_info *lli = ll_i2info(conf->coc_inode);
126
127 if (conf->coc_opc == OBJECT_CONF_INVALIDATE) {
128 CDEBUG(D_VFSTRACE, DFID ": losing layout lock\n",
129 PFID(&lli->lli_fid));
130
131 ll_layout_version_set(lli, LL_LAYOUT_GEN_NONE);
132
133 /* Clean up page mmap for this inode.
134 * The reason for us to do this is that if the page has
135 * already been installed into memory space, the process
136 * can access it without interacting with lustre, so this
137 * page may be stale due to layout change, and the process
138 * will never be notified.
139 * This operation is expensive but mmap processes have to pay
140 * a price themselves. */
141 unmap_mapping_range(conf->coc_inode->i_mapping,
142 0, OBD_OBJECT_EOF, 0);
143
144 return 0;
145 }
146
147 if (conf->coc_opc != OBJECT_CONF_SET)
148 return 0;
149
150 if (conf->u.coc_md != NULL && conf->u.coc_md->lsm != NULL) {
151 CDEBUG(D_VFSTRACE, DFID ": layout version change: %u -> %u\n",
152 PFID(&lli->lli_fid), lli->lli_layout_gen,
153 conf->u.coc_md->lsm->lsm_layout_gen);
154
155 lli->lli_has_smd = lsm_has_objects(conf->u.coc_md->lsm);
156 ll_layout_version_set(lli, conf->u.coc_md->lsm->lsm_layout_gen);
157 } else {
158 CDEBUG(D_VFSTRACE, DFID ": layout nuked: %u.\n",
159 PFID(&lli->lli_fid), lli->lli_layout_gen);
160
161 lli->lli_has_smd = false;
162 ll_layout_version_set(lli, LL_LAYOUT_GEN_EMPTY);
163 }
164 return 0;
165 }
166
167 static const struct cl_object_operations vvp_ops = {
168 .coo_page_init = vvp_page_init,
169 .coo_lock_init = vvp_lock_init,
170 .coo_io_init = vvp_io_init,
171 .coo_attr_get = vvp_attr_get,
172 .coo_attr_set = vvp_attr_set,
173 .coo_conf_set = vvp_conf_set,
174 .coo_glimpse = ccc_object_glimpse
175 };
176
177 static const struct lu_object_operations vvp_lu_obj_ops = {
178 .loo_object_init = ccc_object_init,
179 .loo_object_free = ccc_object_free,
180 .loo_object_print = vvp_object_print
181 };
182
cl_inode2ccc(struct inode * inode)183 struct ccc_object *cl_inode2ccc(struct inode *inode)
184 {
185 struct cl_inode_info *lli = cl_i2info(inode);
186 struct cl_object *obj = lli->lli_clob;
187 struct lu_object *lu;
188
189 LASSERT(obj != NULL);
190 lu = lu_object_locate(obj->co_lu.lo_header, &vvp_device_type);
191 LASSERT(lu != NULL);
192 return lu2ccc(lu);
193 }
194
vvp_object_alloc(const struct lu_env * env,const struct lu_object_header * hdr,struct lu_device * dev)195 struct lu_object *vvp_object_alloc(const struct lu_env *env,
196 const struct lu_object_header *hdr,
197 struct lu_device *dev)
198 {
199 return ccc_object_alloc(env, hdr, dev, &vvp_ops, &vvp_lu_obj_ops);
200 }
201