1 /*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26 /*
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2011, 2012, Intel Corporation.
31 */
32 /*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * Definitions shared between vvp and liblustre, and other clients in the
37 * future.
38 *
39 * Author: Oleg Drokin <oleg.drokin@sun.com>
40 * Author: Nikita Danilov <nikita.danilov@sun.com>
41 */
42
43 #ifndef LCLIENT_H
44 #define LCLIENT_H
45
46 blkcnt_t dirty_cnt(struct inode *inode);
47
48 int cl_glimpse_size0(struct inode *inode, int agl);
49 int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io,
50 struct inode *inode, struct cl_object *clob, int agl);
51
cl_glimpse_size(struct inode * inode)52 static inline int cl_glimpse_size(struct inode *inode)
53 {
54 return cl_glimpse_size0(inode, 0);
55 }
56
cl_agl(struct inode * inode)57 static inline int cl_agl(struct inode *inode)
58 {
59 return cl_glimpse_size0(inode, 1);
60 }
61
62 /**
63 * Locking policy for setattr.
64 */
65 enum ccc_setattr_lock_type {
66 /** Locking is done by server */
67 SETATTR_NOLOCK,
68 /** Extent lock is enqueued */
69 SETATTR_EXTENT_LOCK,
70 /** Existing local extent lock is used */
71 SETATTR_MATCH_LOCK
72 };
73
74 /**
75 * IO state private to vvp or slp layers.
76 */
77 struct ccc_io {
78 /** super class */
79 struct cl_io_slice cui_cl;
80 struct cl_io_lock_link cui_link;
81 /**
82 * I/O vector information to or from which read/write is going.
83 */
84 struct iov_iter *cui_iter;
85 /**
86 * Total size for the left IO.
87 */
88 size_t cui_tot_count;
89
90 union {
91 struct {
92 enum ccc_setattr_lock_type cui_local_lock;
93 } setattr;
94 } u;
95 /**
96 * True iff io is processing glimpse right now.
97 */
98 int cui_glimpse;
99 /**
100 * Layout version when this IO is initialized
101 */
102 __u32 cui_layout_gen;
103 /**
104 * File descriptor against which IO is done.
105 */
106 struct ll_file_data *cui_fd;
107 struct kiocb *cui_iocb;
108 };
109
110 /**
111 * True, if \a io is a normal io, False for splice_{read,write}.
112 * must be implemented in arch specific code.
113 */
114 int cl_is_normalio(const struct lu_env *env, const struct cl_io *io);
115
116 extern struct lu_context_key ccc_key;
117 extern struct lu_context_key ccc_session_key;
118
119 struct ccc_thread_info {
120 struct cl_lock_descr cti_descr;
121 struct cl_io cti_io;
122 struct cl_attr cti_attr;
123 };
124
ccc_env_info(const struct lu_env * env)125 static inline struct ccc_thread_info *ccc_env_info(const struct lu_env *env)
126 {
127 struct ccc_thread_info *info;
128
129 info = lu_context_key_get(&env->le_ctx, &ccc_key);
130 LASSERT(info != NULL);
131 return info;
132 }
133
ccc_env_thread_attr(const struct lu_env * env)134 static inline struct cl_attr *ccc_env_thread_attr(const struct lu_env *env)
135 {
136 struct cl_attr *attr = &ccc_env_info(env)->cti_attr;
137
138 memset(attr, 0, sizeof(*attr));
139 return attr;
140 }
141
ccc_env_thread_io(const struct lu_env * env)142 static inline struct cl_io *ccc_env_thread_io(const struct lu_env *env)
143 {
144 struct cl_io *io = &ccc_env_info(env)->cti_io;
145
146 memset(io, 0, sizeof(*io));
147 return io;
148 }
149
150 struct ccc_session {
151 struct ccc_io cs_ios;
152 };
153
ccc_env_session(const struct lu_env * env)154 static inline struct ccc_session *ccc_env_session(const struct lu_env *env)
155 {
156 struct ccc_session *ses;
157
158 ses = lu_context_key_get(env->le_ses, &ccc_session_key);
159 LASSERT(ses != NULL);
160 return ses;
161 }
162
ccc_env_io(const struct lu_env * env)163 static inline struct ccc_io *ccc_env_io(const struct lu_env *env)
164 {
165 return &ccc_env_session(env)->cs_ios;
166 }
167
168 /**
169 * ccc-private object state.
170 */
171 struct ccc_object {
172 struct cl_object_header cob_header;
173 struct cl_object cob_cl;
174 struct inode *cob_inode;
175
176 /**
177 * A list of dirty pages pending IO in the cache. Used by
178 * SOM. Protected by ll_inode_info::lli_lock.
179 *
180 * \see ccc_page::cpg_pending_linkage
181 */
182 struct list_head cob_pending_list;
183
184 /**
185 * Access this counter is protected by inode->i_sem. Now that
186 * the lifetime of transient pages must be covered by inode sem,
187 * we don't need to hold any lock..
188 */
189 int cob_transient_pages;
190 /**
191 * Number of outstanding mmaps on this file.
192 *
193 * \see ll_vm_open(), ll_vm_close().
194 */
195 atomic_t cob_mmap_cnt;
196
197 /**
198 * various flags
199 * cob_discard_page_warned
200 * if pages belonging to this object are discarded when a client
201 * is evicted, some debug info will be printed, this flag will be set
202 * during processing the first discarded page, then avoid flooding
203 * debug message for lots of discarded pages.
204 *
205 * \see ll_dirty_page_discard_warn.
206 */
207 unsigned int cob_discard_page_warned:1;
208 };
209
210 /**
211 * ccc-private page state.
212 */
213 struct ccc_page {
214 struct cl_page_slice cpg_cl;
215 int cpg_defer_uptodate;
216 int cpg_ra_used;
217 int cpg_write_queued;
218 /**
219 * Non-empty iff this page is already counted in
220 * ccc_object::cob_pending_list. Protected by
221 * ccc_object::cob_pending_guard. This list is only used as a flag,
222 * that is, never iterated through, only checked for list_empty(), but
223 * having a list is useful for debugging.
224 */
225 struct list_head cpg_pending_linkage;
226 /** VM page */
227 struct page *cpg_page;
228 };
229
cl2ccc_page(const struct cl_page_slice * slice)230 static inline struct ccc_page *cl2ccc_page(const struct cl_page_slice *slice)
231 {
232 return container_of(slice, struct ccc_page, cpg_cl);
233 }
234
235 struct ccc_device {
236 struct cl_device cdv_cl;
237 struct super_block *cdv_sb;
238 struct cl_device *cdv_next;
239 };
240
241 struct ccc_lock {
242 struct cl_lock_slice clk_cl;
243 };
244
245 struct ccc_req {
246 struct cl_req_slice crq_cl;
247 };
248
249 void *ccc_key_init (const struct lu_context *ctx,
250 struct lu_context_key *key);
251 void ccc_key_fini (const struct lu_context *ctx,
252 struct lu_context_key *key, void *data);
253 void *ccc_session_key_init(const struct lu_context *ctx,
254 struct lu_context_key *key);
255 void ccc_session_key_fini(const struct lu_context *ctx,
256 struct lu_context_key *key, void *data);
257
258 int ccc_device_init (const struct lu_env *env,
259 struct lu_device *d,
260 const char *name, struct lu_device *next);
261 struct lu_device *ccc_device_fini (const struct lu_env *env,
262 struct lu_device *d);
263 struct lu_device *ccc_device_alloc(const struct lu_env *env,
264 struct lu_device_type *t,
265 struct lustre_cfg *cfg,
266 const struct lu_device_operations *luops,
267 const struct cl_device_operations *clops);
268 struct lu_device *ccc_device_free (const struct lu_env *env,
269 struct lu_device *d);
270 struct lu_object *ccc_object_alloc(const struct lu_env *env,
271 const struct lu_object_header *hdr,
272 struct lu_device *dev,
273 const struct cl_object_operations *clops,
274 const struct lu_object_operations *luops);
275
276 int ccc_req_init(const struct lu_env *env, struct cl_device *dev,
277 struct cl_req *req);
278 void ccc_umount(const struct lu_env *env, struct cl_device *dev);
279 int ccc_global_init(struct lu_device_type *device_type);
280 void ccc_global_fini(struct lu_device_type *device_type);
281 int ccc_object_init0(const struct lu_env *env, struct ccc_object *vob,
282 const struct cl_object_conf *conf);
283 int ccc_object_init(const struct lu_env *env, struct lu_object *obj,
284 const struct lu_object_conf *conf);
285 void ccc_object_free(const struct lu_env *env, struct lu_object *obj);
286 int ccc_lock_init(const struct lu_env *env, struct cl_object *obj,
287 struct cl_lock *lock, const struct cl_io *io,
288 const struct cl_lock_operations *lkops);
289 int ccc_object_glimpse(const struct lu_env *env,
290 const struct cl_object *obj, struct ost_lvb *lvb);
291 struct page *ccc_page_vmpage(const struct lu_env *env,
292 const struct cl_page_slice *slice);
293 int ccc_page_is_under_lock(const struct lu_env *env,
294 const struct cl_page_slice *slice, struct cl_io *io);
295 int ccc_fail(const struct lu_env *env, const struct cl_page_slice *slice);
296 int ccc_transient_page_prep(const struct lu_env *env,
297 const struct cl_page_slice *slice,
298 struct cl_io *io);
299 void ccc_lock_delete(const struct lu_env *env,
300 const struct cl_lock_slice *slice);
301 void ccc_lock_fini(const struct lu_env *env, struct cl_lock_slice *slice);
302 int ccc_lock_enqueue(const struct lu_env *env,
303 const struct cl_lock_slice *slice,
304 struct cl_io *io, __u32 enqflags);
305 int ccc_lock_use(const struct lu_env *env, const struct cl_lock_slice *slice);
306 int ccc_lock_unuse(const struct lu_env *env, const struct cl_lock_slice *slice);
307 int ccc_lock_wait(const struct lu_env *env, const struct cl_lock_slice *slice);
308 int ccc_lock_fits_into(const struct lu_env *env,
309 const struct cl_lock_slice *slice,
310 const struct cl_lock_descr *need,
311 const struct cl_io *io);
312 void ccc_lock_state(const struct lu_env *env,
313 const struct cl_lock_slice *slice,
314 enum cl_lock_state state);
315
316 int ccc_io_one_lock_index(const struct lu_env *env, struct cl_io *io,
317 __u32 enqflags, enum cl_lock_mode mode,
318 pgoff_t start, pgoff_t end);
319 int ccc_io_one_lock(const struct lu_env *env, struct cl_io *io,
320 __u32 enqflags, enum cl_lock_mode mode,
321 loff_t start, loff_t end);
322 void ccc_io_end(const struct lu_env *env, const struct cl_io_slice *ios);
323 void ccc_io_advance(const struct lu_env *env, const struct cl_io_slice *ios,
324 size_t nob);
325 void ccc_io_update_iov(const struct lu_env *env, struct ccc_io *cio,
326 struct cl_io *io);
327 int ccc_prep_size(const struct lu_env *env, struct cl_object *obj,
328 struct cl_io *io, loff_t start, size_t count, int *exceed);
329 void ccc_req_completion(const struct lu_env *env,
330 const struct cl_req_slice *slice, int ioret);
331 void ccc_req_attr_set(const struct lu_env *env,
332 const struct cl_req_slice *slice,
333 const struct cl_object *obj,
334 struct cl_req_attr *oa, u64 flags);
335
336 struct lu_device *ccc2lu_dev (struct ccc_device *vdv);
337 struct lu_object *ccc2lu (struct ccc_object *vob);
338 struct ccc_device *lu2ccc_dev (const struct lu_device *d);
339 struct ccc_device *cl2ccc_dev (const struct cl_device *d);
340 struct ccc_object *lu2ccc (const struct lu_object *obj);
341 struct ccc_object *cl2ccc (const struct cl_object *obj);
342 struct ccc_lock *cl2ccc_lock (const struct cl_lock_slice *slice);
343 struct ccc_io *cl2ccc_io (const struct lu_env *env,
344 const struct cl_io_slice *slice);
345 struct ccc_req *cl2ccc_req (const struct cl_req_slice *slice);
346 struct page *cl2vm_page (const struct cl_page_slice *slice);
347 struct inode *ccc_object_inode(const struct cl_object *obj);
348 struct ccc_object *cl_inode2ccc (struct inode *inode);
349
350 int cl_setattr_ost(struct inode *inode, const struct iattr *attr);
351
352 int ccc_object_invariant(const struct cl_object *obj);
353 int cl_file_inode_init(struct inode *inode, struct lustre_md *md);
354 void cl_inode_fini(struct inode *inode);
355 int cl_local_size(struct inode *inode);
356
357 __u16 ll_dirent_type_get(struct lu_dirent *ent);
358 __u64 cl_fid_build_ino(const struct lu_fid *fid, int api32);
359 __u32 cl_fid_build_gen(const struct lu_fid *fid);
360
361 # define CLOBINVRNT(env, clob, expr) \
362 ((void)sizeof(env), (void)sizeof(clob), (void)sizeof(!!(expr)))
363
364 int cl_init_ea_size(struct obd_export *md_exp, struct obd_export *dt_exp);
365 int cl_ocd_update(struct obd_device *host,
366 struct obd_device *watched,
367 enum obd_notify_event ev, void *owner, void *data);
368
369 struct ccc_grouplock {
370 struct lu_env *cg_env;
371 struct cl_io *cg_io;
372 struct cl_lock *cg_lock;
373 unsigned long cg_gid;
374 };
375
376 int cl_get_grouplock(struct cl_object *obj, unsigned long gid, int nonblock,
377 struct ccc_grouplock *cg);
378 void cl_put_grouplock(struct ccc_grouplock *cg);
379
380 /**
381 * New interfaces to get and put lov_stripe_md from lov layer. This violates
382 * layering because lov_stripe_md is supposed to be a private data in lov.
383 *
384 * NB: If you find you have to use these interfaces for your new code, please
385 * think about it again. These interfaces may be removed in the future for
386 * better layering. */
387 struct lov_stripe_md *lov_lsm_get(struct cl_object *clobj);
388 void lov_lsm_put(struct cl_object *clobj, struct lov_stripe_md *lsm);
389 int lov_read_and_clear_async_rc(struct cl_object *clob);
390
391 struct lov_stripe_md *ccc_inode_lsm_get(struct inode *inode);
392 void ccc_inode_lsm_put(struct inode *inode, struct lov_stripe_md *lsm);
393
394 /**
395 * Data structure managing a client's cached clean pages. An LRU of
396 * pages is maintained, along with other statistics.
397 */
398 struct cl_client_cache {
399 atomic_t ccc_users; /* # of users (OSCs) of this data */
400 struct list_head ccc_lru; /* LRU list of cached clean pages */
401 spinlock_t ccc_lru_lock; /* lock for list */
402 atomic_t ccc_lru_left; /* # of LRU entries available */
403 unsigned long ccc_lru_max; /* Max # of LRU entries possible */
404 unsigned int ccc_lru_shrinkers; /* # of threads reclaiming */
405 };
406
407 #endif /*LCLIENT_H */
408