• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2015, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lustre/include/lustre/lustre_idl.h
33  *
34  * Lustre wire protocol definitions.
35  */
36 
37 /** \defgroup lustreidl lustreidl
38  *
39  * Lustre wire protocol definitions.
40  *
41  * ALL structs passing over the wire should be declared here.  Structs
42  * that are used in interfaces with userspace should go in lustre_user.h.
43  *
44  * All structs being declared here should be built from simple fixed-size
45  * types (__u8, __u16, __u32, __u64) or be built from other types or
46  * structs also declared in this file.  Similarly, all flags and magic
47  * values in those structs should also be declared here.  This ensures
48  * that the Lustre wire protocol is not influenced by external dependencies.
49  *
50  * The only other acceptable items in this file are VERY SIMPLE accessor
51  * functions to avoid callers grubbing inside the structures, and the
52  * prototypes of the swabber functions for each struct.  Nothing that
53  * depends on external functions or definitions should be in here.
54  *
55  * Structs must be properly aligned to put 64-bit values on an 8-byte
56  * boundary.  Any structs being added here must also be added to
57  * utils/wirecheck.c and "make newwiretest" run to regenerate the
58  * utils/wiretest.c sources.  This allows us to verify that wire structs
59  * have the proper alignment/size on all architectures.
60  *
61  * DO NOT CHANGE any of the structs, flags, values declared here and used
62  * in released Lustre versions.  Some structs may have padding fields that
63  * can be used.  Some structs might allow addition at the end (verify this
64  * in the code to ensure that new/old clients that see this larger struct
65  * do not fail, otherwise you need to implement protocol compatibility).
66  *
67  * We assume all nodes are either little-endian or big-endian, and we
68  * always send messages in the sender's native format.  The receiver
69  * detects the message format by checking the 'magic' field of the message
70  * (see lustre_msg_swabbed() below).
71  *
72  * Each wire type has corresponding 'lustre_swab_xxxtypexxx()' routines,
73  * implemented either here, inline (trivial implementations) or in
74  * ptlrpc/pack_generic.c.  These 'swabbers' convert the type from "other"
75  * endian, in-place in the message buffer.
76  *
77  * A swabber takes a single pointer argument.  The caller must already have
78  * verified that the length of the message buffer >= sizeof (type).
79  *
80  * For variable length types, a second 'lustre_swab_v_xxxtypexxx()' routine
81  * may be defined that swabs just the variable part, after the caller has
82  * verified that the message buffer is large enough.
83  *
84  * @{
85  */
86 
87 #ifndef _LUSTRE_IDL_H_
88 #define _LUSTRE_IDL_H_
89 
90 #include "../../../include/linux/libcfs/libcfs.h"
91 #include "../../../include/linux/lnet/types.h"
92 
93 /* Defn's shared with user-space. */
94 #include "lustre_user.h"
95 #include "lustre_errno.h"
96 #include "../lustre_ver.h"
97 
98 /*
99  *  GENERAL STUFF
100  */
101 /* FOO_REQUEST_PORTAL is for incoming requests on the FOO
102  * FOO_REPLY_PORTAL   is for incoming replies on the FOO
103  * FOO_BULK_PORTAL    is for incoming bulk on the FOO
104  */
105 
106 /* Lustre service names are following the format
107  * service name + MDT + seq name
108  */
109 #define LUSTRE_MDT_MAXNAMELEN	80
110 
111 #define CONNMGR_REQUEST_PORTAL	  1
112 #define CONNMGR_REPLY_PORTAL	    2
113 /*#define OSC_REQUEST_PORTAL	    3 */
114 #define OSC_REPLY_PORTAL		4
115 /*#define OSC_BULK_PORTAL	       5 */
116 #define OST_IO_PORTAL		   6
117 #define OST_CREATE_PORTAL	       7
118 #define OST_BULK_PORTAL		 8
119 /*#define MDC_REQUEST_PORTAL	    9 */
120 #define MDC_REPLY_PORTAL	       10
121 /*#define MDC_BULK_PORTAL	      11 */
122 #define MDS_REQUEST_PORTAL	     12
123 /*#define MDS_REPLY_PORTAL	     13 */
124 #define MDS_BULK_PORTAL		14
125 #define LDLM_CB_REQUEST_PORTAL	 15
126 #define LDLM_CB_REPLY_PORTAL	   16
127 #define LDLM_CANCEL_REQUEST_PORTAL     17
128 #define LDLM_CANCEL_REPLY_PORTAL       18
129 /*#define PTLBD_REQUEST_PORTAL	   19 */
130 /*#define PTLBD_REPLY_PORTAL	     20 */
131 /*#define PTLBD_BULK_PORTAL	      21 */
132 #define MDS_SETATTR_PORTAL	     22
133 #define MDS_READPAGE_PORTAL	    23
134 #define OUT_PORTAL		    24
135 
136 #define MGC_REPLY_PORTAL	       25
137 #define MGS_REQUEST_PORTAL	     26
138 #define MGS_REPLY_PORTAL	       27
139 #define OST_REQUEST_PORTAL	     28
140 #define FLD_REQUEST_PORTAL	     29
141 #define SEQ_METADATA_PORTAL	    30
142 #define SEQ_DATA_PORTAL		31
143 #define SEQ_CONTROLLER_PORTAL	  32
144 #define MGS_BULK_PORTAL		33
145 
146 /* Portal 63 is reserved for the Cray Inc DVS - nic@cray.com, roe@cray.com,
147  *						n8851@cray.com
148  */
149 
150 /* packet types */
151 #define PTL_RPC_MSG_REQUEST 4711
152 #define PTL_RPC_MSG_ERR     4712
153 #define PTL_RPC_MSG_REPLY   4713
154 
155 /* DON'T use swabbed values of MAGIC as magic! */
156 #define LUSTRE_MSG_MAGIC_V2 0x0BD00BD3
157 #define LUSTRE_MSG_MAGIC_V2_SWABBED 0xD30BD00B
158 
159 #define LUSTRE_MSG_MAGIC LUSTRE_MSG_MAGIC_V2
160 
161 #define PTLRPC_MSG_VERSION  0x00000003
162 #define LUSTRE_VERSION_MASK 0xffff0000
163 #define LUSTRE_OBD_VERSION  0x00010000
164 #define LUSTRE_MDS_VERSION  0x00020000
165 #define LUSTRE_OST_VERSION  0x00030000
166 #define LUSTRE_DLM_VERSION  0x00040000
167 #define LUSTRE_LOG_VERSION  0x00050000
168 #define LUSTRE_MGS_VERSION  0x00060000
169 
170 /**
171  * Describes a range of sequence, lsr_start is included but lsr_end is
172  * not in the range.
173  * Same structure is used in fld module where lsr_index field holds mdt id
174  * of the home mdt.
175  */
176 struct lu_seq_range {
177 	__u64 lsr_start;
178 	__u64 lsr_end;
179 	__u32 lsr_index;
180 	__u32 lsr_flags;
181 };
182 
183 struct lu_seq_range_array {
184 	__u32 lsra_count;
185 	__u32 lsra_padding;
186 	struct lu_seq_range lsra_lsr[0];
187 };
188 
189 #define LU_SEQ_RANGE_MDT	0x0
190 #define LU_SEQ_RANGE_OST	0x1
191 #define LU_SEQ_RANGE_ANY	0x3
192 
193 #define LU_SEQ_RANGE_MASK	0x3
194 
fld_range_type(const struct lu_seq_range * range)195 static inline unsigned fld_range_type(const struct lu_seq_range *range)
196 {
197 	return range->lsr_flags & LU_SEQ_RANGE_MASK;
198 }
199 
fld_range_is_ost(const struct lu_seq_range * range)200 static inline bool fld_range_is_ost(const struct lu_seq_range *range)
201 {
202 	return fld_range_type(range) == LU_SEQ_RANGE_OST;
203 }
204 
fld_range_is_mdt(const struct lu_seq_range * range)205 static inline bool fld_range_is_mdt(const struct lu_seq_range *range)
206 {
207 	return fld_range_type(range) == LU_SEQ_RANGE_MDT;
208 }
209 
210 /**
211  * This all range is only being used when fld client sends fld query request,
212  * but it does not know whether the seq is MDT or OST, so it will send req
213  * with ALL type, which means either seq type gotten from lookup can be
214  * expected.
215  */
fld_range_is_any(const struct lu_seq_range * range)216 static inline unsigned fld_range_is_any(const struct lu_seq_range *range)
217 {
218 	return fld_range_type(range) == LU_SEQ_RANGE_ANY;
219 }
220 
fld_range_set_type(struct lu_seq_range * range,unsigned flags)221 static inline void fld_range_set_type(struct lu_seq_range *range,
222 				      unsigned flags)
223 {
224 	range->lsr_flags |= flags;
225 }
226 
fld_range_set_mdt(struct lu_seq_range * range)227 static inline void fld_range_set_mdt(struct lu_seq_range *range)
228 {
229 	fld_range_set_type(range, LU_SEQ_RANGE_MDT);
230 }
231 
fld_range_set_ost(struct lu_seq_range * range)232 static inline void fld_range_set_ost(struct lu_seq_range *range)
233 {
234 	fld_range_set_type(range, LU_SEQ_RANGE_OST);
235 }
236 
fld_range_set_any(struct lu_seq_range * range)237 static inline void fld_range_set_any(struct lu_seq_range *range)
238 {
239 	fld_range_set_type(range, LU_SEQ_RANGE_ANY);
240 }
241 
242 /**
243  * returns  width of given range \a r
244  */
245 
range_space(const struct lu_seq_range * range)246 static inline __u64 range_space(const struct lu_seq_range *range)
247 {
248 	return range->lsr_end - range->lsr_start;
249 }
250 
251 /**
252  * initialize range to zero
253  */
254 
range_init(struct lu_seq_range * range)255 static inline void range_init(struct lu_seq_range *range)
256 {
257 	memset(range, 0, sizeof(*range));
258 }
259 
260 /**
261  * check if given seq id \a s is within given range \a r
262  */
263 
range_within(const struct lu_seq_range * range,__u64 s)264 static inline bool range_within(const struct lu_seq_range *range,
265 				__u64 s)
266 {
267 	return s >= range->lsr_start && s < range->lsr_end;
268 }
269 
range_is_sane(const struct lu_seq_range * range)270 static inline bool range_is_sane(const struct lu_seq_range *range)
271 {
272 	return (range->lsr_end >= range->lsr_start);
273 }
274 
range_is_zero(const struct lu_seq_range * range)275 static inline bool range_is_zero(const struct lu_seq_range *range)
276 {
277 	return (range->lsr_start == 0 && range->lsr_end == 0);
278 }
279 
range_is_exhausted(const struct lu_seq_range * range)280 static inline bool range_is_exhausted(const struct lu_seq_range *range)
281 
282 {
283 	return range_space(range) == 0;
284 }
285 
286 /* return 0 if two range have the same location */
range_compare_loc(const struct lu_seq_range * r1,const struct lu_seq_range * r2)287 static inline int range_compare_loc(const struct lu_seq_range *r1,
288 				    const struct lu_seq_range *r2)
289 {
290 	return r1->lsr_index != r2->lsr_index ||
291 	       r1->lsr_flags != r2->lsr_flags;
292 }
293 
294 #define DRANGE "[%#16.16Lx-%#16.16Lx):%x:%s"
295 
296 #define PRANGE(range)		\
297 	(range)->lsr_start,	\
298 	(range)->lsr_end,	\
299 	(range)->lsr_index,	\
300 	fld_range_is_mdt(range) ? "mdt" : "ost"
301 
302 /** \defgroup lu_fid lu_fid
303  * @{
304  */
305 
306 /**
307  * Flags for lustre_mdt_attrs::lma_compat and lustre_mdt_attrs::lma_incompat.
308  * Deprecated since HSM and SOM attributes are now stored in separate on-disk
309  * xattr.
310  */
311 enum lma_compat {
312 	LMAC_HSM	= 0x00000001,
313 	LMAC_SOM	= 0x00000002,
314 	LMAC_NOT_IN_OI	= 0x00000004, /* the object does NOT need OI mapping */
315 	LMAC_FID_ON_OST = 0x00000008, /* For OST-object, its OI mapping is
316 				       * under /O/<seq>/d<x>.
317 				       */
318 };
319 
320 /**
321  * Masks for all features that should be supported by a Lustre version to
322  * access a specific file.
323  * This information is stored in lustre_mdt_attrs::lma_incompat.
324  */
325 enum lma_incompat {
326 	LMAI_RELEASED		= 0x00000001, /* file is released */
327 	LMAI_AGENT		= 0x00000002, /* agent inode */
328 	LMAI_REMOTE_PARENT	= 0x00000004, /* the parent of the object
329 					       * is on the remote MDT
330 					       */
331 };
332 
333 #define LMA_INCOMPAT_SUPP	(LMAI_AGENT | LMAI_REMOTE_PARENT)
334 
335 /**
336  * fid constants
337  */
338 enum {
339 	/** LASTID file has zero OID */
340 	LUSTRE_FID_LASTID_OID = 0UL,
341 	/** initial fid id value */
342 	LUSTRE_FID_INIT_OID  = 1UL
343 };
344 
345 /** returns fid object sequence */
fid_seq(const struct lu_fid * fid)346 static inline __u64 fid_seq(const struct lu_fid *fid)
347 {
348 	return fid->f_seq;
349 }
350 
351 /** returns fid object id */
fid_oid(const struct lu_fid * fid)352 static inline __u32 fid_oid(const struct lu_fid *fid)
353 {
354 	return fid->f_oid;
355 }
356 
357 /** returns fid object version */
fid_ver(const struct lu_fid * fid)358 static inline __u32 fid_ver(const struct lu_fid *fid)
359 {
360 	return fid->f_ver;
361 }
362 
fid_zero(struct lu_fid * fid)363 static inline void fid_zero(struct lu_fid *fid)
364 {
365 	memset(fid, 0, sizeof(*fid));
366 }
367 
fid_ver_oid(const struct lu_fid * fid)368 static inline __u64 fid_ver_oid(const struct lu_fid *fid)
369 {
370 	return ((__u64)fid_ver(fid) << 32 | fid_oid(fid));
371 }
372 
373 /* copytool uses a 32b bitmask field to encode archive-Ids during register
374  * with MDT thru kuc.
375  * archive num = 0 => all
376  * archive num from 1 to 32
377  */
378 #define LL_HSM_MAX_ARCHIVE (sizeof(__u32) * 8)
379 
380 /**
381  * Note that reserved SEQ numbers below 12 will conflict with ldiskfs
382  * inodes in the IGIF namespace, so these reserved SEQ numbers can be
383  * used for other purposes and not risk collisions with existing inodes.
384  *
385  * Different FID Format
386  * http://wiki.old.lustre.org/index.php/Architecture_-_Interoperability_fids_zfs
387  */
388 enum fid_seq {
389 	FID_SEQ_OST_MDT0	= 0,
390 	FID_SEQ_LLOG		= 1, /* unnamed llogs */
391 	FID_SEQ_ECHO		= 2,
392 	FID_SEQ_OST_MDT1	= 3,
393 	FID_SEQ_OST_MAX		= 9, /* Max MDT count before OST_on_FID */
394 	FID_SEQ_LLOG_NAME	= 10, /* named llogs */
395 	FID_SEQ_RSVD		= 11,
396 	FID_SEQ_IGIF		= 12,
397 	FID_SEQ_IGIF_MAX	= 0x0ffffffffULL,
398 	FID_SEQ_IDIF		= 0x100000000ULL,
399 	FID_SEQ_IDIF_MAX	= 0x1ffffffffULL,
400 	/* Normal FID sequence starts from this value, i.e. 1<<33 */
401 	FID_SEQ_START		= 0x200000000ULL,
402 	/* sequence for local pre-defined FIDs listed in local_oid */
403 	FID_SEQ_LOCAL_FILE	= 0x200000001ULL,
404 	FID_SEQ_DOT_LUSTRE	= 0x200000002ULL,
405 	/* sequence is used for local named objects FIDs generated
406 	 * by local_object_storage library
407 	 */
408 	FID_SEQ_LOCAL_NAME	= 0x200000003ULL,
409 	/* Because current FLD will only cache the fid sequence, instead
410 	 * of oid on the client side, if the FID needs to be exposed to
411 	 * clients sides, it needs to make sure all of fids under one
412 	 * sequence will be located in one MDT.
413 	 */
414 	FID_SEQ_SPECIAL		= 0x200000004ULL,
415 	FID_SEQ_QUOTA		= 0x200000005ULL,
416 	FID_SEQ_QUOTA_GLB	= 0x200000006ULL,
417 	FID_SEQ_ROOT		= 0x200000007ULL,  /* Located on MDT0 */
418 	FID_SEQ_NORMAL		= 0x200000400ULL,
419 	FID_SEQ_LOV_DEFAULT	= 0xffffffffffffffffULL
420 };
421 
422 #define OBIF_OID_MAX_BITS	   32
423 #define OBIF_MAX_OID		(1ULL << OBIF_OID_MAX_BITS)
424 #define OBIF_OID_MASK	       ((1ULL << OBIF_OID_MAX_BITS) - 1)
425 #define IDIF_OID_MAX_BITS	   48
426 #define IDIF_MAX_OID		(1ULL << IDIF_OID_MAX_BITS)
427 #define IDIF_OID_MASK	       ((1ULL << IDIF_OID_MAX_BITS) - 1)
428 
429 /** OID for FID_SEQ_SPECIAL */
430 enum special_oid {
431 	/* Big Filesystem Lock to serialize rename operations */
432 	FID_OID_SPECIAL_BFL     = 1UL,
433 };
434 
435 /** OID for FID_SEQ_DOT_LUSTRE */
436 enum dot_lustre_oid {
437 	FID_OID_DOT_LUSTRE  = 1UL,
438 	FID_OID_DOT_LUSTRE_OBF = 2UL,
439 };
440 
fid_seq_is_mdt0(__u64 seq)441 static inline bool fid_seq_is_mdt0(__u64 seq)
442 {
443 	return (seq == FID_SEQ_OST_MDT0);
444 }
445 
fid_seq_is_mdt(__u64 seq)446 static inline bool fid_seq_is_mdt(__u64 seq)
447 {
448 	return seq == FID_SEQ_OST_MDT0 || seq >= FID_SEQ_NORMAL;
449 };
450 
fid_seq_is_echo(__u64 seq)451 static inline bool fid_seq_is_echo(__u64 seq)
452 {
453 	return (seq == FID_SEQ_ECHO);
454 }
455 
fid_is_echo(const struct lu_fid * fid)456 static inline bool fid_is_echo(const struct lu_fid *fid)
457 {
458 	return fid_seq_is_echo(fid_seq(fid));
459 }
460 
fid_seq_is_llog(__u64 seq)461 static inline bool fid_seq_is_llog(__u64 seq)
462 {
463 	return (seq == FID_SEQ_LLOG);
464 }
465 
fid_is_llog(const struct lu_fid * fid)466 static inline bool fid_is_llog(const struct lu_fid *fid)
467 {
468 	/* file with OID == 0 is not llog but contains last oid */
469 	return fid_seq_is_llog(fid_seq(fid)) && fid_oid(fid) > 0;
470 }
471 
fid_seq_is_rsvd(__u64 seq)472 static inline bool fid_seq_is_rsvd(__u64 seq)
473 {
474 	return (seq > FID_SEQ_OST_MDT0 && seq <= FID_SEQ_RSVD);
475 };
476 
fid_seq_is_special(__u64 seq)477 static inline bool fid_seq_is_special(__u64 seq)
478 {
479 	return seq == FID_SEQ_SPECIAL;
480 };
481 
fid_seq_is_local_file(__u64 seq)482 static inline bool fid_seq_is_local_file(__u64 seq)
483 {
484 	return seq == FID_SEQ_LOCAL_FILE ||
485 	       seq == FID_SEQ_LOCAL_NAME;
486 };
487 
fid_seq_is_root(__u64 seq)488 static inline bool fid_seq_is_root(__u64 seq)
489 {
490 	return seq == FID_SEQ_ROOT;
491 }
492 
fid_seq_is_dot(__u64 seq)493 static inline bool fid_seq_is_dot(__u64 seq)
494 {
495 	return seq == FID_SEQ_DOT_LUSTRE;
496 }
497 
fid_seq_is_default(__u64 seq)498 static inline bool fid_seq_is_default(__u64 seq)
499 {
500 	return seq == FID_SEQ_LOV_DEFAULT;
501 }
502 
fid_is_mdt0(const struct lu_fid * fid)503 static inline bool fid_is_mdt0(const struct lu_fid *fid)
504 {
505 	return fid_seq_is_mdt0(fid_seq(fid));
506 }
507 
lu_root_fid(struct lu_fid * fid)508 static inline void lu_root_fid(struct lu_fid *fid)
509 {
510 	fid->f_seq = FID_SEQ_ROOT;
511 	fid->f_oid = 1;
512 	fid->f_ver = 0;
513 }
514 
515 /**
516  * Check if a fid is igif or not.
517  * \param fid the fid to be tested.
518  * \return true if the fid is a igif; otherwise false.
519  */
fid_seq_is_igif(__u64 seq)520 static inline bool fid_seq_is_igif(__u64 seq)
521 {
522 	return seq >= FID_SEQ_IGIF && seq <= FID_SEQ_IGIF_MAX;
523 }
524 
fid_is_igif(const struct lu_fid * fid)525 static inline bool fid_is_igif(const struct lu_fid *fid)
526 {
527 	return fid_seq_is_igif(fid_seq(fid));
528 }
529 
530 /**
531  * Check if a fid is idif or not.
532  * \param fid the fid to be tested.
533  * \return true if the fid is a idif; otherwise false.
534  */
fid_seq_is_idif(__u64 seq)535 static inline bool fid_seq_is_idif(__u64 seq)
536 {
537 	return seq >= FID_SEQ_IDIF && seq <= FID_SEQ_IDIF_MAX;
538 }
539 
fid_is_idif(const struct lu_fid * fid)540 static inline bool fid_is_idif(const struct lu_fid *fid)
541 {
542 	return fid_seq_is_idif(fid_seq(fid));
543 }
544 
fid_is_local_file(const struct lu_fid * fid)545 static inline bool fid_is_local_file(const struct lu_fid *fid)
546 {
547 	return fid_seq_is_local_file(fid_seq(fid));
548 }
549 
fid_seq_is_norm(__u64 seq)550 static inline bool fid_seq_is_norm(__u64 seq)
551 {
552 	return (seq >= FID_SEQ_NORMAL);
553 }
554 
fid_is_norm(const struct lu_fid * fid)555 static inline bool fid_is_norm(const struct lu_fid *fid)
556 {
557 	return fid_seq_is_norm(fid_seq(fid));
558 }
559 
560 /* convert an OST objid into an IDIF FID SEQ number */
fid_idif_seq(__u64 id,__u32 ost_idx)561 static inline __u64 fid_idif_seq(__u64 id, __u32 ost_idx)
562 {
563 	return FID_SEQ_IDIF | (ost_idx << 16) | ((id >> 32) & 0xffff);
564 }
565 
566 /* convert a packed IDIF FID into an OST objid */
fid_idif_id(__u64 seq,__u32 oid,__u32 ver)567 static inline __u64 fid_idif_id(__u64 seq, __u32 oid, __u32 ver)
568 {
569 	return ((__u64)ver << 48) | ((seq & 0xffff) << 32) | oid;
570 }
571 
572 /* extract ost index from IDIF FID */
fid_idif_ost_idx(const struct lu_fid * fid)573 static inline __u32 fid_idif_ost_idx(const struct lu_fid *fid)
574 {
575 	return (fid_seq(fid) >> 16) & 0xffff;
576 }
577 
578 /* extract OST sequence (group) from a wire ost_id (id/seq) pair */
ostid_seq(const struct ost_id * ostid)579 static inline __u64 ostid_seq(const struct ost_id *ostid)
580 {
581 	if (fid_seq_is_mdt0(ostid->oi.oi_seq))
582 		return FID_SEQ_OST_MDT0;
583 
584 	if (unlikely(fid_seq_is_default(ostid->oi.oi_seq)))
585 		return FID_SEQ_LOV_DEFAULT;
586 
587 	if (fid_is_idif(&ostid->oi_fid))
588 		return FID_SEQ_OST_MDT0;
589 
590 	return fid_seq(&ostid->oi_fid);
591 }
592 
593 /* extract OST objid from a wire ost_id (id/seq) pair */
ostid_id(const struct ost_id * ostid)594 static inline __u64 ostid_id(const struct ost_id *ostid)
595 {
596 	if (fid_seq_is_mdt0(ostid->oi.oi_seq))
597 		return ostid->oi.oi_id & IDIF_OID_MASK;
598 
599 	if (unlikely(fid_seq_is_default(ostid->oi.oi_seq)))
600 		return ostid->oi.oi_id;
601 
602 	if (fid_is_idif(&ostid->oi_fid))
603 		return fid_idif_id(fid_seq(&ostid->oi_fid),
604 				   fid_oid(&ostid->oi_fid), 0);
605 
606 	return fid_oid(&ostid->oi_fid);
607 }
608 
ostid_set_seq(struct ost_id * oi,__u64 seq)609 static inline void ostid_set_seq(struct ost_id *oi, __u64 seq)
610 {
611 	if (fid_seq_is_mdt0(seq) || fid_seq_is_default(seq)) {
612 		oi->oi.oi_seq = seq;
613 	} else {
614 		oi->oi_fid.f_seq = seq;
615 		/* Note: if f_oid + f_ver is zero, we need init it
616 		 * to be 1, otherwise, ostid_seq will treat this
617 		 * as old ostid (oi_seq == 0)
618 		 */
619 		if (oi->oi_fid.f_oid == 0 && oi->oi_fid.f_ver == 0)
620 			oi->oi_fid.f_oid = LUSTRE_FID_INIT_OID;
621 	}
622 }
623 
ostid_set_seq_mdt0(struct ost_id * oi)624 static inline void ostid_set_seq_mdt0(struct ost_id *oi)
625 {
626 	ostid_set_seq(oi, FID_SEQ_OST_MDT0);
627 }
628 
ostid_set_seq_echo(struct ost_id * oi)629 static inline void ostid_set_seq_echo(struct ost_id *oi)
630 {
631 	ostid_set_seq(oi, FID_SEQ_ECHO);
632 }
633 
ostid_set_seq_llog(struct ost_id * oi)634 static inline void ostid_set_seq_llog(struct ost_id *oi)
635 {
636 	ostid_set_seq(oi, FID_SEQ_LLOG);
637 }
638 
639 /**
640  * Note: we need check oi_seq to decide where to set oi_id,
641  * so oi_seq should always be set ahead of oi_id.
642  */
ostid_set_id(struct ost_id * oi,__u64 oid)643 static inline void ostid_set_id(struct ost_id *oi, __u64 oid)
644 {
645 	if (fid_seq_is_mdt0(oi->oi.oi_seq)) {
646 		if (oid >= IDIF_MAX_OID) {
647 			CERROR("Bad %llu to set " DOSTID "\n", oid, POSTID(oi));
648 			return;
649 		}
650 		oi->oi.oi_id = oid;
651 	} else if (fid_is_idif(&oi->oi_fid)) {
652 		if (oid >= IDIF_MAX_OID) {
653 			CERROR("Bad %llu to set "DOSTID"\n",
654 			       oid, POSTID(oi));
655 			return;
656 		}
657 		oi->oi_fid.f_seq = fid_idif_seq(oid,
658 						fid_idif_ost_idx(&oi->oi_fid));
659 		oi->oi_fid.f_oid = oid;
660 		oi->oi_fid.f_ver = oid >> 48;
661 	} else {
662 		if (oid >= OBIF_MAX_OID) {
663 			CERROR("Bad %llu to set " DOSTID "\n", oid, POSTID(oi));
664 			return;
665 		}
666 		oi->oi_fid.f_oid = oid;
667 	}
668 }
669 
fid_set_id(struct lu_fid * fid,__u64 oid)670 static inline int fid_set_id(struct lu_fid *fid, __u64 oid)
671 {
672 	if (unlikely(fid_seq_is_igif(fid->f_seq))) {
673 		CERROR("bad IGIF, "DFID"\n", PFID(fid));
674 		return -EBADF;
675 	}
676 
677 	if (fid_is_idif(fid)) {
678 		if (oid >= IDIF_MAX_OID) {
679 			CERROR("Too large OID %#llx to set IDIF "DFID"\n",
680 			       (unsigned long long)oid, PFID(fid));
681 			return -EBADF;
682 		}
683 		fid->f_seq = fid_idif_seq(oid, fid_idif_ost_idx(fid));
684 		fid->f_oid = oid;
685 		fid->f_ver = oid >> 48;
686 	} else {
687 		if (oid >= OBIF_MAX_OID) {
688 			CERROR("Too large OID %#llx to set REG "DFID"\n",
689 			       (unsigned long long)oid, PFID(fid));
690 			return -EBADF;
691 		}
692 		fid->f_oid = oid;
693 	}
694 	return 0;
695 }
696 
697 /**
698  * Unpack an OST object id/seq (group) into a FID.  This is needed for
699  * converting all obdo, lmm, lsm, etc. 64-bit id/seq pairs into proper
700  * FIDs.  Note that if an id/seq is already in FID/IDIF format it will
701  * be passed through unchanged.  Only legacy OST objects in "group 0"
702  * will be mapped into the IDIF namespace so that they can fit into the
703  * struct lu_fid fields without loss.  For reference see:
704  * http://wiki.old.lustre.org/index.php/Architecture_-_Interoperability_fids_zfs
705  */
ostid_to_fid(struct lu_fid * fid,struct ost_id * ostid,__u32 ost_idx)706 static inline int ostid_to_fid(struct lu_fid *fid, struct ost_id *ostid,
707 			       __u32 ost_idx)
708 {
709 	__u64 seq = ostid_seq(ostid);
710 
711 	if (ost_idx > 0xffff) {
712 		CERROR("bad ost_idx, "DOSTID" ost_idx:%u\n", POSTID(ostid),
713 		       ost_idx);
714 		return -EBADF;
715 	}
716 
717 	if (fid_seq_is_mdt0(seq)) {
718 		__u64 oid = ostid_id(ostid);
719 
720 		/* This is a "legacy" (old 1.x/2.early) OST object in "group 0"
721 		 * that we map into the IDIF namespace.  It allows up to 2^48
722 		 * objects per OST, as this is the object namespace that has
723 		 * been in production for years.  This can handle create rates
724 		 * of 1M objects/s/OST for 9 years, or combinations thereof.
725 		 */
726 		if (oid >= IDIF_MAX_OID) {
727 			CERROR("bad MDT0 id, " DOSTID " ost_idx:%u\n",
728 			       POSTID(ostid), ost_idx);
729 			return -EBADF;
730 		}
731 		fid->f_seq = fid_idif_seq(oid, ost_idx);
732 		/* truncate to 32 bits by assignment */
733 		fid->f_oid = oid;
734 		/* in theory, not currently used */
735 		fid->f_ver = oid >> 48;
736 	} else if (likely(!fid_seq_is_default(seq))) {
737 	       /* This is either an IDIF object, which identifies objects across
738 		* all OSTs, or a regular FID.  The IDIF namespace maps legacy
739 		* OST objects into the FID namespace.  In both cases, we just
740 		* pass the FID through, no conversion needed.
741 		*/
742 		if (ostid->oi_fid.f_ver != 0) {
743 			CERROR("bad MDT0 id, " DOSTID " ost_idx:%u\n",
744 			       POSTID(ostid), ost_idx);
745 			return -EBADF;
746 		}
747 		*fid = ostid->oi_fid;
748 	}
749 
750 	return 0;
751 }
752 
753 /* pack any OST FID into an ostid (id/seq) for the wire/disk */
fid_to_ostid(const struct lu_fid * fid,struct ost_id * ostid)754 static inline int fid_to_ostid(const struct lu_fid *fid, struct ost_id *ostid)
755 {
756 	if (unlikely(fid_seq_is_igif(fid->f_seq))) {
757 		CERROR("bad IGIF, "DFID"\n", PFID(fid));
758 		return -EBADF;
759 	}
760 
761 	if (fid_is_idif(fid)) {
762 		ostid_set_seq_mdt0(ostid);
763 		ostid_set_id(ostid, fid_idif_id(fid_seq(fid), fid_oid(fid),
764 						fid_ver(fid)));
765 	} else {
766 		ostid->oi_fid = *fid;
767 	}
768 
769 	return 0;
770 }
771 
772 /* Check whether the fid is for LAST_ID */
fid_is_last_id(const struct lu_fid * fid)773 static inline bool fid_is_last_id(const struct lu_fid *fid)
774 {
775 	return (fid_oid(fid) == 0);
776 }
777 
778 /**
779  * Get inode number from a igif.
780  * \param fid a igif to get inode number from.
781  * \return inode number for the igif.
782  */
lu_igif_ino(const struct lu_fid * fid)783 static inline ino_t lu_igif_ino(const struct lu_fid *fid)
784 {
785 	return fid_seq(fid);
786 }
787 
788 void lustre_swab_ost_id(struct ost_id *oid);
789 
790 /**
791  * Get inode generation from a igif.
792  * \param fid a igif to get inode generation from.
793  * \return inode generation for the igif.
794  */
lu_igif_gen(const struct lu_fid * fid)795 static inline __u32 lu_igif_gen(const struct lu_fid *fid)
796 {
797 	return fid_oid(fid);
798 }
799 
800 /**
801  * Build igif from the inode number/generation.
802  */
lu_igif_build(struct lu_fid * fid,__u32 ino,__u32 gen)803 static inline void lu_igif_build(struct lu_fid *fid, __u32 ino, __u32 gen)
804 {
805 	fid->f_seq = ino;
806 	fid->f_oid = gen;
807 	fid->f_ver = 0;
808 }
809 
810 /*
811  * Fids are transmitted across network (in the sender byte-ordering),
812  * and stored on disk in big-endian order.
813  */
fid_cpu_to_le(struct lu_fid * dst,const struct lu_fid * src)814 static inline void fid_cpu_to_le(struct lu_fid *dst, const struct lu_fid *src)
815 {
816 	dst->f_seq = cpu_to_le64(fid_seq(src));
817 	dst->f_oid = cpu_to_le32(fid_oid(src));
818 	dst->f_ver = cpu_to_le32(fid_ver(src));
819 }
820 
fid_le_to_cpu(struct lu_fid * dst,const struct lu_fid * src)821 static inline void fid_le_to_cpu(struct lu_fid *dst, const struct lu_fid *src)
822 {
823 	dst->f_seq = le64_to_cpu(fid_seq(src));
824 	dst->f_oid = le32_to_cpu(fid_oid(src));
825 	dst->f_ver = le32_to_cpu(fid_ver(src));
826 }
827 
fid_cpu_to_be(struct lu_fid * dst,const struct lu_fid * src)828 static inline void fid_cpu_to_be(struct lu_fid *dst, const struct lu_fid *src)
829 {
830 	dst->f_seq = cpu_to_be64(fid_seq(src));
831 	dst->f_oid = cpu_to_be32(fid_oid(src));
832 	dst->f_ver = cpu_to_be32(fid_ver(src));
833 }
834 
fid_be_to_cpu(struct lu_fid * dst,const struct lu_fid * src)835 static inline void fid_be_to_cpu(struct lu_fid *dst, const struct lu_fid *src)
836 {
837 	dst->f_seq = be64_to_cpu(fid_seq(src));
838 	dst->f_oid = be32_to_cpu(fid_oid(src));
839 	dst->f_ver = be32_to_cpu(fid_ver(src));
840 }
841 
fid_is_sane(const struct lu_fid * fid)842 static inline bool fid_is_sane(const struct lu_fid *fid)
843 {
844 	return fid &&
845 	       ((fid_seq(fid) >= FID_SEQ_START && fid_ver(fid) == 0) ||
846 		fid_is_igif(fid) || fid_is_idif(fid) ||
847 		fid_seq_is_rsvd(fid_seq(fid)));
848 }
849 
850 void lustre_swab_lu_fid(struct lu_fid *fid);
851 void lustre_swab_lu_seq_range(struct lu_seq_range *range);
852 
lu_fid_eq(const struct lu_fid * f0,const struct lu_fid * f1)853 static inline bool lu_fid_eq(const struct lu_fid *f0, const struct lu_fid *f1)
854 {
855 	return memcmp(f0, f1, sizeof(*f0)) == 0;
856 }
857 
858 #define __diff_normalize(val0, val1)			    \
859 ({							      \
860 	typeof(val0) __val0 = (val0);			   \
861 	typeof(val1) __val1 = (val1);			   \
862 								\
863 	(__val0 == __val1 ? 0 : __val0 > __val1 ? 1 : -1);     \
864 })
865 
lu_fid_cmp(const struct lu_fid * f0,const struct lu_fid * f1)866 static inline int lu_fid_cmp(const struct lu_fid *f0,
867 			     const struct lu_fid *f1)
868 {
869 	return
870 		__diff_normalize(fid_seq(f0), fid_seq(f1)) ?:
871 		__diff_normalize(fid_oid(f0), fid_oid(f1)) ?:
872 		__diff_normalize(fid_ver(f0), fid_ver(f1));
873 }
874 
ostid_cpu_to_le(const struct ost_id * src_oi,struct ost_id * dst_oi)875 static inline void ostid_cpu_to_le(const struct ost_id *src_oi,
876 				   struct ost_id *dst_oi)
877 {
878 	if (fid_seq_is_mdt0(ostid_seq(src_oi))) {
879 		dst_oi->oi.oi_id = cpu_to_le64(src_oi->oi.oi_id);
880 		dst_oi->oi.oi_seq = cpu_to_le64(src_oi->oi.oi_seq);
881 	} else {
882 		fid_cpu_to_le(&dst_oi->oi_fid, &src_oi->oi_fid);
883 	}
884 }
885 
ostid_le_to_cpu(const struct ost_id * src_oi,struct ost_id * dst_oi)886 static inline void ostid_le_to_cpu(const struct ost_id *src_oi,
887 				   struct ost_id *dst_oi)
888 {
889 	if (fid_seq_is_mdt0(ostid_seq(src_oi))) {
890 		dst_oi->oi.oi_id = le64_to_cpu(src_oi->oi.oi_id);
891 		dst_oi->oi.oi_seq = le64_to_cpu(src_oi->oi.oi_seq);
892 	} else {
893 		fid_le_to_cpu(&dst_oi->oi_fid, &src_oi->oi_fid);
894 	}
895 }
896 
897 /** @} lu_fid */
898 
899 /** \defgroup lu_dir lu_dir
900  * @{
901  */
902 
903 /**
904  * Enumeration of possible directory entry attributes.
905  *
906  * Attributes follow directory entry header in the order they appear in this
907  * enumeration.
908  */
909 enum lu_dirent_attrs {
910 	LUDA_FID		= 0x0001,
911 	LUDA_TYPE		= 0x0002,
912 	LUDA_64BITHASH		= 0x0004,
913 };
914 
915 /**
916  * Layout of readdir pages, as transmitted on wire.
917  */
918 struct lu_dirent {
919 	/** valid if LUDA_FID is set. */
920 	struct lu_fid lde_fid;
921 	/** a unique entry identifier: a hash or an offset. */
922 	__u64	 lde_hash;
923 	/** total record length, including all attributes. */
924 	__u16	 lde_reclen;
925 	/** name length */
926 	__u16	 lde_namelen;
927 	/** optional variable size attributes following this entry.
928 	 *  taken from enum lu_dirent_attrs.
929 	 */
930 	__u32	 lde_attrs;
931 	/** name is followed by the attributes indicated in ->ldp_attrs, in
932 	 *  their natural order. After the last attribute, padding bytes are
933 	 *  added to make ->lde_reclen a multiple of 8.
934 	 */
935 	char	  lde_name[0];
936 };
937 
938 /*
939  * Definitions of optional directory entry attributes formats.
940  *
941  * Individual attributes do not have their length encoded in a generic way. It
942  * is assumed that consumer of an attribute knows its format. This means that
943  * it is impossible to skip over an unknown attribute, except by skipping over all
944  * remaining attributes (by using ->lde_reclen), which is not too
945  * constraining, because new server versions will append new attributes at
946  * the end of an entry.
947  */
948 
949 /**
950  * Fid directory attribute: a fid of an object referenced by the entry. This
951  * will be almost always requested by the client and supplied by the server.
952  *
953  * Aligned to 8 bytes.
954  */
955 /* To have compatibility with 1.8, lets have fid in lu_dirent struct. */
956 
957 /**
958  * File type.
959  *
960  * Aligned to 2 bytes.
961  */
962 struct luda_type {
963 	__u16 lt_type;
964 };
965 
966 #ifndef IFSHIFT
967 #define IFSHIFT                 12
968 #endif
969 
970 #ifndef IFTODT
971 #define IFTODT(type)		(((type) & S_IFMT) >> IFSHIFT)
972 #endif
973 #ifndef DTTOIF
974 #define DTTOIF(dirtype)		((dirtype) << IFSHIFT)
975 #endif
976 
977 struct lu_dirpage {
978 	__u64	    ldp_hash_start;
979 	__u64	    ldp_hash_end;
980 	__u32	    ldp_flags;
981 	__u32	    ldp_pad0;
982 	struct lu_dirent ldp_entries[0];
983 };
984 
985 enum lu_dirpage_flags {
986 	/**
987 	 * dirpage contains no entry.
988 	 */
989 	LDF_EMPTY   = 1 << 0,
990 	/**
991 	 * last entry's lde_hash equals ldp_hash_end.
992 	 */
993 	LDF_COLLIDE = 1 << 1
994 };
995 
lu_dirent_start(struct lu_dirpage * dp)996 static inline struct lu_dirent *lu_dirent_start(struct lu_dirpage *dp)
997 {
998 	if (le32_to_cpu(dp->ldp_flags) & LDF_EMPTY)
999 		return NULL;
1000 	else
1001 		return dp->ldp_entries;
1002 }
1003 
lu_dirent_next(struct lu_dirent * ent)1004 static inline struct lu_dirent *lu_dirent_next(struct lu_dirent *ent)
1005 {
1006 	struct lu_dirent *next;
1007 
1008 	if (le16_to_cpu(ent->lde_reclen) != 0)
1009 		next = ((void *)ent) + le16_to_cpu(ent->lde_reclen);
1010 	else
1011 		next = NULL;
1012 
1013 	return next;
1014 }
1015 
lu_dirent_calc_size(size_t namelen,__u16 attr)1016 static inline size_t lu_dirent_calc_size(size_t namelen, __u16 attr)
1017 {
1018 	size_t size;
1019 
1020 	if (attr & LUDA_TYPE) {
1021 		const size_t align = sizeof(struct luda_type) - 1;
1022 
1023 		size = (sizeof(struct lu_dirent) + namelen + align) & ~align;
1024 		size += sizeof(struct luda_type);
1025 	} else {
1026 		size = sizeof(struct lu_dirent) + namelen;
1027 	}
1028 
1029 	return (size + 7) & ~7;
1030 }
1031 
1032 #define MDS_DIR_END_OFF 0xfffffffffffffffeULL
1033 
1034 /**
1035  * MDS_READPAGE page size
1036  *
1037  * This is the directory page size packed in MDS_READPAGE RPC.
1038  * It's different than PAGE_SIZE because the client needs to
1039  * access the struct lu_dirpage header packed at the beginning of
1040  * the "page" and without this there isn't any way to know find the
1041  * lu_dirpage header is if client and server PAGE_SIZE differ.
1042  */
1043 #define LU_PAGE_SHIFT 12
1044 #define LU_PAGE_SIZE  (1UL << LU_PAGE_SHIFT)
1045 #define LU_PAGE_MASK  (~(LU_PAGE_SIZE - 1))
1046 
1047 #define LU_PAGE_COUNT (1 << (PAGE_SHIFT - LU_PAGE_SHIFT))
1048 
1049 /** @} lu_dir */
1050 
1051 struct lustre_handle {
1052 	__u64 cookie;
1053 };
1054 
1055 #define DEAD_HANDLE_MAGIC 0xdeadbeefcafebabeULL
1056 
lustre_handle_is_used(const struct lustre_handle * lh)1057 static inline bool lustre_handle_is_used(const struct lustre_handle *lh)
1058 {
1059 	return lh->cookie != 0ull;
1060 }
1061 
lustre_handle_equal(const struct lustre_handle * lh1,const struct lustre_handle * lh2)1062 static inline bool lustre_handle_equal(const struct lustre_handle *lh1,
1063 				       const struct lustre_handle *lh2)
1064 {
1065 	return lh1->cookie == lh2->cookie;
1066 }
1067 
lustre_handle_copy(struct lustre_handle * tgt,const struct lustre_handle * src)1068 static inline void lustre_handle_copy(struct lustre_handle *tgt,
1069 				      const struct lustre_handle *src)
1070 {
1071 	tgt->cookie = src->cookie;
1072 }
1073 
1074 /* flags for lm_flags */
1075 #define MSGHDR_AT_SUPPORT	       0x1
1076 #define MSGHDR_CKSUM_INCOMPAT18	 0x2
1077 
1078 #define lustre_msg lustre_msg_v2
1079 /* we depend on this structure to be 8-byte aligned */
1080 /* this type is only endian-adjusted in lustre_unpack_msg() */
1081 struct lustre_msg_v2 {
1082 	__u32 lm_bufcount;
1083 	__u32 lm_secflvr;
1084 	__u32 lm_magic;
1085 	__u32 lm_repsize;
1086 	__u32 lm_cksum;
1087 	__u32 lm_flags;
1088 	__u32 lm_padding_2;
1089 	__u32 lm_padding_3;
1090 	__u32 lm_buflens[0];
1091 };
1092 
1093 /* without gss, ptlrpc_body is put at the first buffer. */
1094 #define PTLRPC_NUM_VERSIONS     4
1095 
1096 struct ptlrpc_body_v3 {
1097 	struct lustre_handle pb_handle;
1098 	__u32 pb_type;
1099 	__u32 pb_version;
1100 	__u32 pb_opc;
1101 	__u32 pb_status;
1102 	__u64 pb_last_xid;
1103 	__u64 pb_last_seen;
1104 	__u64 pb_last_committed;
1105 	__u64 pb_transno;
1106 	__u32 pb_flags;
1107 	__u32 pb_op_flags;
1108 	__u32 pb_conn_cnt;
1109 	__u32 pb_timeout;  /* for req, the deadline, for rep, the service est */
1110 	__u32 pb_service_time; /* for rep, actual service time */
1111 	__u32 pb_limit;
1112 	__u64 pb_slv;
1113 	/* VBR: pre-versions */
1114 	__u64 pb_pre_versions[PTLRPC_NUM_VERSIONS];
1115 	/* padding for future needs */
1116 	__u64 pb_padding[4];
1117 	char  pb_jobid[LUSTRE_JOBID_SIZE];
1118 };
1119 
1120 #define ptlrpc_body     ptlrpc_body_v3
1121 
1122 struct ptlrpc_body_v2 {
1123 	struct lustre_handle pb_handle;
1124 	__u32 pb_type;
1125 	__u32 pb_version;
1126 	__u32 pb_opc;
1127 	__u32 pb_status;
1128 	__u64 pb_last_xid;
1129 	__u64 pb_last_seen;
1130 	__u64 pb_last_committed;
1131 	__u64 pb_transno;
1132 	__u32 pb_flags;
1133 	__u32 pb_op_flags;
1134 	__u32 pb_conn_cnt;
1135 	__u32 pb_timeout;  /* for req, the deadline, for rep, the service est */
1136 	__u32 pb_service_time; /* for rep, actual service time, also used for
1137 				* net_latency of req
1138 				*/
1139 	__u32 pb_limit;
1140 	__u64 pb_slv;
1141 	/* VBR: pre-versions */
1142 	__u64 pb_pre_versions[PTLRPC_NUM_VERSIONS];
1143 	/* padding for future needs */
1144 	__u64 pb_padding[4];
1145 };
1146 
1147 void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb);
1148 
1149 /* message body offset for lustre_msg_v2 */
1150 /* ptlrpc body offset in all request/reply messages */
1151 #define MSG_PTLRPC_BODY_OFF	     0
1152 
1153 /* normal request/reply message record offset */
1154 #define REQ_REC_OFF		     1
1155 #define REPLY_REC_OFF		   1
1156 
1157 /* ldlm request message body offset */
1158 #define DLM_LOCKREQ_OFF		 1 /* lockreq offset */
1159 #define DLM_REQ_REC_OFF		 2 /* normal dlm request record offset */
1160 
1161 /* ldlm intent lock message body offset */
1162 #define DLM_INTENT_IT_OFF	       2 /* intent lock it offset */
1163 #define DLM_INTENT_REC_OFF	      3 /* intent lock record offset */
1164 
1165 /* ldlm reply message body offset */
1166 #define DLM_LOCKREPLY_OFF	       1 /* lockrep offset */
1167 #define DLM_REPLY_REC_OFF	       2 /* reply record offset */
1168 
1169 /** only use in req->rq_{req,rep}_swab_mask */
1170 #define MSG_PTLRPC_HEADER_OFF	   31
1171 
1172 /* Flags that are operation-specific go in the top 16 bits. */
1173 #define MSG_OP_FLAG_MASK   0xffff0000
1174 #define MSG_OP_FLAG_SHIFT  16
1175 
1176 /* Flags that apply to all requests are in the bottom 16 bits */
1177 #define MSG_GEN_FLAG_MASK     0x0000ffff
1178 #define MSG_LAST_REPLAY	   0x0001
1179 #define MSG_RESENT		0x0002
1180 #define MSG_REPLAY		0x0004
1181 /* #define MSG_AT_SUPPORT	 0x0008
1182  * This was used in early prototypes of adaptive timeouts, and while there
1183  * shouldn't be any users of that code there also isn't a need for using this
1184  * bits. Defer usage until at least 1.10 to avoid potential conflict.
1185  */
1186 #define MSG_DELAY_REPLAY	  0x0010
1187 #define MSG_VERSION_REPLAY	0x0020
1188 #define MSG_REQ_REPLAY_DONE       0x0040
1189 #define MSG_LOCK_REPLAY_DONE      0x0080
1190 
1191 /*
1192  * Flags for all connect opcodes (MDS_CONNECT, OST_CONNECT)
1193  */
1194 
1195 #define MSG_CONNECT_RECOVERING  0x00000001
1196 #define MSG_CONNECT_RECONNECT   0x00000002
1197 #define MSG_CONNECT_REPLAYABLE  0x00000004
1198 /*#define MSG_CONNECT_PEER	0x8 */
1199 #define MSG_CONNECT_LIBCLIENT   0x00000010
1200 #define MSG_CONNECT_INITIAL     0x00000020
1201 #define MSG_CONNECT_ASYNC       0x00000040
1202 #define MSG_CONNECT_NEXT_VER    0x00000080 /* use next version of lustre_msg */
1203 #define MSG_CONNECT_TRANSNO     0x00000100 /* report transno */
1204 
1205 /* Connect flags */
1206 #define OBD_CONNECT_RDONLY		  0x1ULL /*client has read-only access*/
1207 #define OBD_CONNECT_INDEX		  0x2ULL /*connect specific LOV idx */
1208 #define OBD_CONNECT_MDS			  0x4ULL /*connect from MDT to OST */
1209 #define OBD_CONNECT_GRANT		  0x8ULL /*OSC gets grant at connect */
1210 #define OBD_CONNECT_SRVLOCK		 0x10ULL /*server takes locks for cli */
1211 #define OBD_CONNECT_VERSION		 0x20ULL /*Lustre versions in ocd */
1212 #define OBD_CONNECT_REQPORTAL		 0x40ULL /*Separate non-IO req portal */
1213 #define OBD_CONNECT_ACL			 0x80ULL /*access control lists */
1214 #define OBD_CONNECT_XATTR		0x100ULL /*client use extended attr */
1215 #define OBD_CONNECT_CROW		0x200ULL /*MDS+OST create obj on write*/
1216 #define OBD_CONNECT_TRUNCLOCK		0x400ULL /*locks on server for punch */
1217 #define OBD_CONNECT_TRANSNO		0x800ULL /*replay sends init transno */
1218 #define OBD_CONNECT_IBITS	       0x1000ULL /*support for inodebits locks*/
1219 #define OBD_CONNECT_JOIN	       0x2000ULL /*files can be concatenated.
1220 						  *We do not support JOIN FILE
1221 						  *anymore, reserve this flags
1222 						  *just for preventing such bit
1223 						  *to be reused.
1224 						  */
1225 #define OBD_CONNECT_ATTRFID	       0x4000ULL /*Server can GetAttr By Fid*/
1226 #define OBD_CONNECT_NODEVOH	       0x8000ULL /*No open hndl on specl nodes*/
1227 #define OBD_CONNECT_RMT_CLIENT	      0x10000ULL /* Remote client, never used
1228 						  * in production. Removed in
1229 						  * 2.9. Keep this flag to
1230 						  * avoid reuse.
1231 						  */
1232 #define OBD_CONNECT_RMT_CLIENT_FORCE  0x20000ULL /* Remote client by force,
1233 						  * never used in production.
1234 						  * Removed in 2.9. Keep this
1235 						  * flag to avoid reuse
1236 						  */
1237 #define OBD_CONNECT_BRW_SIZE	      0x40000ULL /*Max bytes per rpc */
1238 #define OBD_CONNECT_QUOTA64	      0x80000ULL /*Not used since 2.4 */
1239 #define OBD_CONNECT_MDS_CAPA	     0x100000ULL /*MDS capability */
1240 #define OBD_CONNECT_OSS_CAPA	     0x200000ULL /*OSS capability */
1241 #define OBD_CONNECT_CANCELSET	     0x400000ULL /*Early batched cancels. */
1242 #define OBD_CONNECT_SOM		     0x800000ULL /*Size on MDS */
1243 #define OBD_CONNECT_AT		    0x1000000ULL /*client uses AT */
1244 #define OBD_CONNECT_LRU_RESIZE      0x2000000ULL /*LRU resize feature. */
1245 #define OBD_CONNECT_MDS_MDS	    0x4000000ULL /*MDS-MDS connection */
1246 #define OBD_CONNECT_REAL	    0x8000000ULL /*real connection */
1247 #define OBD_CONNECT_CHANGE_QS      0x10000000ULL /*Not used since 2.4 */
1248 #define OBD_CONNECT_CKSUM	   0x20000000ULL /*support several cksum algos*/
1249 #define OBD_CONNECT_FID		   0x40000000ULL /*FID is supported by server */
1250 #define OBD_CONNECT_VBR		   0x80000000ULL /*version based recovery */
1251 #define OBD_CONNECT_LOV_V3	  0x100000000ULL /*client supports LOV v3 EA */
1252 #define OBD_CONNECT_GRANT_SHRINK  0x200000000ULL /* support grant shrink */
1253 #define OBD_CONNECT_SKIP_ORPHAN   0x400000000ULL /* don't reuse orphan objids */
1254 #define OBD_CONNECT_MAX_EASIZE    0x800000000ULL /* preserved for large EA */
1255 #define OBD_CONNECT_FULL20       0x1000000000ULL /* it is 2.0 client */
1256 #define OBD_CONNECT_LAYOUTLOCK   0x2000000000ULL /* client uses layout lock */
1257 #define OBD_CONNECT_64BITHASH    0x4000000000ULL /* client supports 64-bits
1258 						  * directory hash
1259 						  */
1260 #define OBD_CONNECT_MAXBYTES     0x8000000000ULL /* max stripe size */
1261 #define OBD_CONNECT_IMP_RECOV   0x10000000000ULL /* imp recovery support */
1262 #define OBD_CONNECT_JOBSTATS    0x20000000000ULL /* jobid in ptlrpc_body */
1263 #define OBD_CONNECT_UMASK       0x40000000000ULL /* create uses client umask */
1264 #define OBD_CONNECT_EINPROGRESS 0x80000000000ULL /* client handles -EINPROGRESS
1265 						  * RPC error properly
1266 						  */
1267 #define OBD_CONNECT_GRANT_PARAM 0x100000000000ULL/* extra grant params used for
1268 						  * finer space reservation
1269 						  */
1270 #define OBD_CONNECT_FLOCK_OWNER 0x200000000000ULL /* for the fixed 1.8
1271 						   * policy and 2.x server
1272 						   */
1273 #define OBD_CONNECT_LVB_TYPE	0x400000000000ULL /* variable type of LVB */
1274 #define OBD_CONNECT_NANOSEC_TIME 0x800000000000ULL /* nanosecond timestamps */
1275 #define OBD_CONNECT_LIGHTWEIGHT 0x1000000000000ULL/* lightweight connection */
1276 #define OBD_CONNECT_SHORTIO     0x2000000000000ULL/* short io */
1277 #define OBD_CONNECT_PINGLESS	0x4000000000000ULL/* pings not required */
1278 #define OBD_CONNECT_FLOCK_DEAD	0x8000000000000ULL/* flock deadlock detection */
1279 #define OBD_CONNECT_DISP_STRIPE 0x10000000000000ULL/*create stripe disposition*/
1280 #define OBD_CONNECT_OPEN_BY_FID	0x20000000000000ULL	/* open by fid won't pack
1281 							 * name in request
1282 							 */
1283 #define OBD_CONNECT_LFSCK	0x40000000000000ULL/* support online LFSCK */
1284 #define OBD_CONNECT_UNLINK_CLOSE 0x100000000000000ULL/* close file in unlink */
1285 #define OBD_CONNECT_DIR_STRIPE	 0x400000000000000ULL/* striped DNE dir */
1286 
1287 /* XXX README XXX:
1288  * Please DO NOT add flag values here before first ensuring that this same
1289  * flag value is not in use on some other branch.  Please clear any such
1290  * changes with senior engineers before starting to use a new flag.  Then,
1291  * submit a small patch against EVERY branch that ONLY adds the new flag,
1292  * updates obd_connect_names[] for lprocfs_rd_connect_flags(), adds the
1293  * flag to check_obd_connect_data(), and updates wiretests accordingly, so it
1294  * can be approved and landed easily to reserve the flag for future use.
1295  */
1296 
1297 /* The MNE_SWAB flag is overloading the MDS_MDS bit only for the MGS
1298  * connection.  It is a temporary bug fix for Imperative Recovery interop
1299  * between 2.2 and 2.3 x86/ppc nodes, and can be removed when interop for
1300  * 2.2 clients/servers is no longer needed.  LU-1252/LU-1644.
1301  */
1302 #define OBD_CONNECT_MNE_SWAB		 OBD_CONNECT_MDS_MDS
1303 
1304 #define OCD_HAS_FLAG(ocd, flg)  \
1305 	(!!((ocd)->ocd_connect_flags & OBD_CONNECT_##flg))
1306 
1307 /* Features required for this version of the client to work with server */
1308 #define CLIENT_CONNECT_MDT_REQD (OBD_CONNECT_IBITS | OBD_CONNECT_FID | \
1309 				 OBD_CONNECT_FULL20)
1310 
1311 /* This structure is used for both request and reply.
1312  *
1313  * If we eventually have separate connect data for different types, which we
1314  * almost certainly will, then perhaps we stick a union in here.
1315  */
1316 struct obd_connect_data_v1 {
1317 	__u64 ocd_connect_flags; /* OBD_CONNECT_* per above */
1318 	__u32 ocd_version;	 /* lustre release version number */
1319 	__u32 ocd_grant;	 /* initial cache grant amount (bytes) */
1320 	__u32 ocd_index;	 /* LOV index to connect to */
1321 	__u32 ocd_brw_size;	 /* Maximum BRW size in bytes, must be 2^n */
1322 	__u64 ocd_ibits_known;   /* inode bits this client understands */
1323 	__u8  ocd_blocksize;     /* log2 of the backend filesystem blocksize */
1324 	__u8  ocd_inodespace;    /* log2 of the per-inode space consumption */
1325 	__u16 ocd_grant_extent;  /* per-extent grant overhead, in 1K blocks */
1326 	__u32 ocd_unused;	/* also fix lustre_swab_connect */
1327 	__u64 ocd_transno;       /* first transno from client to be replayed */
1328 	__u32 ocd_group;	 /* MDS group on OST */
1329 	__u32 ocd_cksum_types;   /* supported checksum algorithms */
1330 	__u32 ocd_max_easize;    /* How big LOV EA can be on MDS */
1331 	__u32 ocd_instance;      /* also fix lustre_swab_connect */
1332 	__u64 ocd_maxbytes;      /* Maximum stripe size in bytes */
1333 };
1334 
1335 struct obd_connect_data {
1336 	__u64 ocd_connect_flags; /* OBD_CONNECT_* per above */
1337 	__u32 ocd_version;	 /* lustre release version number */
1338 	__u32 ocd_grant;	 /* initial cache grant amount (bytes) */
1339 	__u32 ocd_index;	 /* LOV index to connect to */
1340 	__u32 ocd_brw_size;	 /* Maximum BRW size in bytes */
1341 	__u64 ocd_ibits_known;   /* inode bits this client understands */
1342 	__u8  ocd_blocksize;     /* log2 of the backend filesystem blocksize */
1343 	__u8  ocd_inodespace;    /* log2 of the per-inode space consumption */
1344 	__u16 ocd_grant_extent;  /* per-extent grant overhead, in 1K blocks */
1345 	__u32 ocd_unused;	 /* also fix lustre_swab_connect */
1346 	__u64 ocd_transno;       /* first transno from client to be replayed */
1347 	__u32 ocd_group;	 /* MDS group on OST */
1348 	__u32 ocd_cksum_types;   /* supported checksum algorithms */
1349 	__u32 ocd_max_easize;    /* How big LOV EA can be on MDS */
1350 	__u32 ocd_instance;      /* instance # of this target */
1351 	__u64 ocd_maxbytes;      /* Maximum stripe size in bytes */
1352 	/* Fields after ocd_maxbytes are only accessible by the receiver
1353 	 * if the corresponding flag in ocd_connect_flags is set. Accessing
1354 	 * any field after ocd_maxbytes on the receiver without a valid flag
1355 	 * may result in out-of-bound memory access and kernel oops.
1356 	 */
1357 	__u64 padding1;	  /* added 2.1.0. also fix lustre_swab_connect */
1358 	__u64 padding2;	  /* added 2.1.0. also fix lustre_swab_connect */
1359 	__u64 padding3;	  /* added 2.1.0. also fix lustre_swab_connect */
1360 	__u64 padding4;	  /* added 2.1.0. also fix lustre_swab_connect */
1361 	__u64 padding5;	  /* added 2.1.0. also fix lustre_swab_connect */
1362 	__u64 padding6;	  /* added 2.1.0. also fix lustre_swab_connect */
1363 	__u64 padding7;	  /* added 2.1.0. also fix lustre_swab_connect */
1364 	__u64 padding8;	  /* added 2.1.0. also fix lustre_swab_connect */
1365 	__u64 padding9;	  /* added 2.1.0. also fix lustre_swab_connect */
1366 	__u64 paddingA;	  /* added 2.1.0. also fix lustre_swab_connect */
1367 	__u64 paddingB;	  /* added 2.1.0. also fix lustre_swab_connect */
1368 	__u64 paddingC;	  /* added 2.1.0. also fix lustre_swab_connect */
1369 	__u64 paddingD;	  /* added 2.1.0. also fix lustre_swab_connect */
1370 	__u64 paddingE;	  /* added 2.1.0. also fix lustre_swab_connect */
1371 	__u64 paddingF;	  /* added 2.1.0. also fix lustre_swab_connect */
1372 };
1373 
1374 /* XXX README XXX:
1375  * Please DO NOT use any fields here before first ensuring that this same
1376  * field is not in use on some other branch.  Please clear any such changes
1377  * with senior engineers before starting to use a new field.  Then, submit
1378  * a small patch against EVERY branch that ONLY adds the new field along with
1379  * the matching OBD_CONNECT flag, so that can be approved and landed easily to
1380  * reserve the flag for future use.
1381  */
1382 
1383 void lustre_swab_connect(struct obd_connect_data *ocd);
1384 
1385 /*
1386  * Supported checksum algorithms. Up to 32 checksum types are supported.
1387  * (32-bit mask stored in obd_connect_data::ocd_cksum_types)
1388  * Please update DECLARE_CKSUM_NAME/OBD_CKSUM_ALL in obd.h when adding a new
1389  * algorithm and also the OBD_FL_CKSUM* flags.
1390  */
1391 enum cksum_type {
1392 	OBD_CKSUM_CRC32  = 0x00000001,
1393 	OBD_CKSUM_ADLER  = 0x00000002,
1394 	OBD_CKSUM_CRC32C = 0x00000004,
1395 };
1396 
1397 /*
1398  *   OST requests: OBDO & OBD request records
1399  */
1400 
1401 /* opcodes */
1402 enum ost_cmd {
1403 	OST_REPLY      =  0,       /* reply ? */
1404 	OST_GETATTR    =  1,
1405 	OST_SETATTR    =  2,
1406 	OST_READ       =  3,
1407 	OST_WRITE      =  4,
1408 	OST_CREATE     =  5,
1409 	OST_DESTROY    =  6,
1410 	OST_GET_INFO   =  7,
1411 	OST_CONNECT    =  8,
1412 	OST_DISCONNECT =  9,
1413 	OST_PUNCH      = 10,
1414 	OST_OPEN       = 11,
1415 	OST_CLOSE      = 12,
1416 	OST_STATFS     = 13,
1417 	OST_SYNC       = 16,
1418 	OST_SET_INFO   = 17,
1419 	OST_QUOTACHECK = 18,
1420 	OST_QUOTACTL   = 19,
1421 	OST_QUOTA_ADJUST_QUNIT = 20, /* not used since 2.4 */
1422 	OST_LAST_OPC
1423 };
1424 #define OST_FIRST_OPC  OST_REPLY
1425 
1426 enum obdo_flags {
1427 	OBD_FL_INLINEDATA   = 0x00000001,
1428 	OBD_FL_OBDMDEXISTS  = 0x00000002,
1429 	OBD_FL_DELORPHAN    = 0x00000004, /* if set in o_flags delete orphans */
1430 	OBD_FL_NORPC	    = 0x00000008, /* set in o_flags do in OSC not OST */
1431 	OBD_FL_IDONLY       = 0x00000010, /* set in o_flags only adjust obj id*/
1432 	OBD_FL_RECREATE_OBJS = 0x00000020, /* recreate missing obj */
1433 	OBD_FL_DEBUG_CHECK  = 0x00000040, /* echo client/server debug check */
1434 	OBD_FL_NO_USRQUOTA  = 0x00000100, /* the object's owner is over quota */
1435 	OBD_FL_NO_GRPQUOTA  = 0x00000200, /* the object's group is over quota */
1436 	OBD_FL_CREATE_CROW  = 0x00000400, /* object should be create on write */
1437 	OBD_FL_SRVLOCK      = 0x00000800, /* delegate DLM locking to server */
1438 	OBD_FL_CKSUM_CRC32  = 0x00001000, /* CRC32 checksum type */
1439 	OBD_FL_CKSUM_ADLER  = 0x00002000, /* ADLER checksum type */
1440 	OBD_FL_CKSUM_CRC32C = 0x00004000, /* CRC32C checksum type */
1441 	OBD_FL_CKSUM_RSVD2  = 0x00008000, /* for future cksum types */
1442 	OBD_FL_CKSUM_RSVD3  = 0x00010000, /* for future cksum types */
1443 	OBD_FL_SHRINK_GRANT = 0x00020000, /* object shrink the grant */
1444 	OBD_FL_MMAP	    = 0x00040000, /* object is mmapped on the client.
1445 					   * XXX: obsoleted - reserved for old
1446 					   * clients prior than 2.2
1447 					   */
1448 	OBD_FL_RECOV_RESEND = 0x00080000, /* recoverable resent */
1449 	OBD_FL_NOSPC_BLK    = 0x00100000, /* no more block space on OST */
1450 	OBD_FL_FLUSH	    = 0x00200000, /* flush pages on the OST */
1451 	OBD_FL_SHORT_IO	    = 0x00400000, /* short io request */
1452 
1453 	/* Note that while these checksum values are currently separate bits,
1454 	 * in 2.x we can actually allow all values from 1-31 if we wanted.
1455 	 */
1456 	OBD_FL_CKSUM_ALL    = OBD_FL_CKSUM_CRC32 | OBD_FL_CKSUM_ADLER |
1457 			      OBD_FL_CKSUM_CRC32C,
1458 
1459 	/* mask for local-only flag, which won't be sent over network */
1460 	OBD_FL_LOCAL_MASK   = 0xF0000000,
1461 };
1462 
1463 /*
1464  * All LOV EA magics should have the same postfix, if some new version
1465  * Lustre instroduces new LOV EA magic, then when down-grade to an old
1466  * Lustre, even though the old version system does not recognizes such
1467  * new magic, it still can distinguish the corrupted cases by checking
1468  * the magic's postfix.
1469  */
1470 #define LOV_MAGIC_MAGIC 0x0BD0
1471 #define LOV_MAGIC_MASK  0xFFFF
1472 
1473 #define LOV_MAGIC_V1		(0x0BD10000 | LOV_MAGIC_MAGIC)
1474 #define LOV_MAGIC_JOIN_V1	(0x0BD20000 | LOV_MAGIC_MAGIC)
1475 #define LOV_MAGIC_V3		(0x0BD30000 | LOV_MAGIC_MAGIC)
1476 #define LOV_MAGIC_MIGRATE	(0x0BD40000 | LOV_MAGIC_MAGIC)
1477 /* reserved for specifying OSTs */
1478 #define LOV_MAGIC_SPECIFIC	(0x0BD50000 | LOV_MAGIC_MAGIC)
1479 #define LOV_MAGIC		LOV_MAGIC_V1
1480 
1481 /*
1482  * magic for fully defined striping
1483  * the idea is that we should have different magics for striping "hints"
1484  * (struct lov_user_md_v[13]) and defined ready-to-use striping (struct
1485  * lov_mds_md_v[13]). at the moment the magics are used in wire protocol,
1486  * we can't just change it w/o long way preparation, but we still need a
1487  * mechanism to allow LOD to differentiate hint versus ready striping.
1488  * so, at the moment we do a trick: MDT knows what to expect from request
1489  * depending on the case (replay uses ready striping, non-replay req uses
1490  * hints), so MDT replaces magic with appropriate one and now LOD can
1491  * easily understand what's inside -bzzz
1492  */
1493 #define LOV_MAGIC_V1_DEF  0x0CD10BD0
1494 #define LOV_MAGIC_V3_DEF  0x0CD30BD0
1495 
1496 #define lov_pattern(pattern)		(pattern & ~LOV_PATTERN_F_MASK)
1497 #define lov_pattern_flags(pattern)	(pattern & LOV_PATTERN_F_MASK)
1498 
1499 #define lov_ost_data lov_ost_data_v1
1500 struct lov_ost_data_v1 {	  /* per-stripe data structure (little-endian)*/
1501 	struct ost_id l_ost_oi;	  /* OST object ID */
1502 	__u32 l_ost_gen;	  /* generation of this l_ost_idx */
1503 	__u32 l_ost_idx;	  /* OST index in LOV (lov_tgt_desc->tgts) */
1504 };
1505 
1506 #define lov_mds_md lov_mds_md_v1
1507 struct lov_mds_md_v1 {	    /* LOV EA mds/wire data (little-endian) */
1508 	__u32 lmm_magic;	  /* magic number = LOV_MAGIC_V1 */
1509 	__u32 lmm_pattern;	/* LOV_PATTERN_RAID0, LOV_PATTERN_RAID1 */
1510 	struct ost_id	lmm_oi;	  /* LOV object ID */
1511 	__u32 lmm_stripe_size;    /* size of stripe in bytes */
1512 	/* lmm_stripe_count used to be __u32 */
1513 	__u16 lmm_stripe_count;   /* num stripes in use for this object */
1514 	__u16 lmm_layout_gen;     /* layout generation number */
1515 	struct lov_ost_data_v1 lmm_objects[0]; /* per-stripe data */
1516 };
1517 
1518 /**
1519  * Sigh, because pre-2.4 uses
1520  * struct lov_mds_md_v1 {
1521  *	........
1522  *	__u64 lmm_object_id;
1523  *	__u64 lmm_object_seq;
1524  *      ......
1525  *      }
1526  * to identify the LOV(MDT) object, and lmm_object_seq will
1527  * be normal_fid, which make it hard to combine these conversion
1528  * to ostid_to FID. so we will do lmm_oi/fid conversion separately
1529  *
1530  * We can tell the lmm_oi by this way,
1531  * 1.8: lmm_object_id = {inode}, lmm_object_gr = 0
1532  * 2.1: lmm_object_id = {oid < 128k}, lmm_object_seq = FID_SEQ_NORMAL
1533  * 2.4: lmm_oi.f_seq = FID_SEQ_NORMAL, lmm_oi.f_oid = {oid < 128k},
1534  *      lmm_oi.f_ver = 0
1535  *
1536  * But currently lmm_oi/lsm_oi does not have any "real" usages,
1537  * except for printing some information, and the user can always
1538  * get the real FID from LMA, besides this multiple case check might
1539  * make swab more complicate. So we will keep using id/seq for lmm_oi.
1540  */
1541 
fid_to_lmm_oi(const struct lu_fid * fid,struct ost_id * oi)1542 static inline void fid_to_lmm_oi(const struct lu_fid *fid,
1543 				 struct ost_id *oi)
1544 {
1545 	oi->oi.oi_id = fid_oid(fid);
1546 	oi->oi.oi_seq = fid_seq(fid);
1547 }
1548 
lmm_oi_set_seq(struct ost_id * oi,__u64 seq)1549 static inline void lmm_oi_set_seq(struct ost_id *oi, __u64 seq)
1550 {
1551 	oi->oi.oi_seq = seq;
1552 }
1553 
lmm_oi_set_id(struct ost_id * oi,__u64 oid)1554 static inline void lmm_oi_set_id(struct ost_id *oi, __u64 oid)
1555 {
1556 	oi->oi.oi_id = oid;
1557 }
1558 
lmm_oi_id(const struct ost_id * oi)1559 static inline __u64 lmm_oi_id(const struct ost_id *oi)
1560 {
1561 	return oi->oi.oi_id;
1562 }
1563 
lmm_oi_seq(const struct ost_id * oi)1564 static inline __u64 lmm_oi_seq(const struct ost_id *oi)
1565 {
1566 	return oi->oi.oi_seq;
1567 }
1568 
lmm_oi_le_to_cpu(struct ost_id * dst_oi,const struct ost_id * src_oi)1569 static inline void lmm_oi_le_to_cpu(struct ost_id *dst_oi,
1570 				    const struct ost_id *src_oi)
1571 {
1572 	dst_oi->oi.oi_id = le64_to_cpu(src_oi->oi.oi_id);
1573 	dst_oi->oi.oi_seq = le64_to_cpu(src_oi->oi.oi_seq);
1574 }
1575 
lmm_oi_cpu_to_le(struct ost_id * dst_oi,const struct ost_id * src_oi)1576 static inline void lmm_oi_cpu_to_le(struct ost_id *dst_oi,
1577 				    const struct ost_id *src_oi)
1578 {
1579 	dst_oi->oi.oi_id = cpu_to_le64(src_oi->oi.oi_id);
1580 	dst_oi->oi.oi_seq = cpu_to_le64(src_oi->oi.oi_seq);
1581 }
1582 
1583 /* extern void lustre_swab_lov_mds_md(struct lov_mds_md *llm); */
1584 
1585 #define MAX_MD_SIZE							\
1586 	(sizeof(struct lov_mds_md) + 4 * sizeof(struct lov_ost_data))
1587 #define MIN_MD_SIZE							\
1588 	(sizeof(struct lov_mds_md) + 1 * sizeof(struct lov_ost_data))
1589 
1590 #define XATTR_NAME_ACL_ACCESS   "system.posix_acl_access"
1591 #define XATTR_NAME_ACL_DEFAULT  "system.posix_acl_default"
1592 #define XATTR_USER_PREFIX       "user."
1593 #define XATTR_TRUSTED_PREFIX    "trusted."
1594 #define XATTR_SECURITY_PREFIX   "security."
1595 #define XATTR_LUSTRE_PREFIX     "lustre."
1596 
1597 #define XATTR_NAME_LOV	  "trusted.lov"
1598 #define XATTR_NAME_LMA	  "trusted.lma"
1599 #define XATTR_NAME_LMV	  "trusted.lmv"
1600 #define XATTR_NAME_DEFAULT_LMV	"trusted.dmv"
1601 #define XATTR_NAME_LINK	 "trusted.link"
1602 #define XATTR_NAME_FID	  "trusted.fid"
1603 #define XATTR_NAME_VERSION      "trusted.version"
1604 #define XATTR_NAME_SOM		"trusted.som"
1605 #define XATTR_NAME_HSM		"trusted.hsm"
1606 #define XATTR_NAME_LFSCK_NAMESPACE "trusted.lfsck_namespace"
1607 
1608 struct lov_mds_md_v3 {	    /* LOV EA mds/wire data (little-endian) */
1609 	__u32 lmm_magic;	  /* magic number = LOV_MAGIC_V3 */
1610 	__u32 lmm_pattern;	/* LOV_PATTERN_RAID0, LOV_PATTERN_RAID1 */
1611 	struct ost_id	lmm_oi;	  /* LOV object ID */
1612 	__u32 lmm_stripe_size;    /* size of stripe in bytes */
1613 	/* lmm_stripe_count used to be __u32 */
1614 	__u16 lmm_stripe_count;   /* num stripes in use for this object */
1615 	__u16 lmm_layout_gen;     /* layout generation number */
1616 	char  lmm_pool_name[LOV_MAXPOOLNAME + 1]; /* must be 32bit aligned */
1617 	struct lov_ost_data_v1 lmm_objects[0]; /* per-stripe data */
1618 };
1619 
lov_mds_md_size(__u16 stripes,__u32 lmm_magic)1620 static inline __u32 lov_mds_md_size(__u16 stripes, __u32 lmm_magic)
1621 {
1622 	if (lmm_magic == LOV_MAGIC_V3)
1623 		return sizeof(struct lov_mds_md_v3) +
1624 				stripes * sizeof(struct lov_ost_data_v1);
1625 	else
1626 		return sizeof(struct lov_mds_md_v1) +
1627 				stripes * sizeof(struct lov_ost_data_v1);
1628 }
1629 
1630 static inline __u32
lov_mds_md_max_stripe_count(size_t buf_size,__u32 lmm_magic)1631 lov_mds_md_max_stripe_count(size_t buf_size, __u32 lmm_magic)
1632 {
1633 	switch (lmm_magic) {
1634 	case LOV_MAGIC_V1: {
1635 		struct lov_mds_md_v1 lmm;
1636 
1637 		if (buf_size < sizeof(lmm))
1638 			return 0;
1639 
1640 		return (buf_size - sizeof(lmm)) / sizeof(lmm.lmm_objects[0]);
1641 	}
1642 	case LOV_MAGIC_V3: {
1643 		struct lov_mds_md_v3 lmm;
1644 
1645 		if (buf_size < sizeof(lmm))
1646 			return 0;
1647 
1648 		return (buf_size - sizeof(lmm)) / sizeof(lmm.lmm_objects[0]);
1649 	}
1650 	default:
1651 		return 0;
1652 	}
1653 }
1654 
1655 #define OBD_MD_FLID	   (0x00000001ULL) /* object ID */
1656 #define OBD_MD_FLATIME     (0x00000002ULL) /* access time */
1657 #define OBD_MD_FLMTIME     (0x00000004ULL) /* data modification time */
1658 #define OBD_MD_FLCTIME     (0x00000008ULL) /* change time */
1659 #define OBD_MD_FLSIZE      (0x00000010ULL) /* size */
1660 #define OBD_MD_FLBLOCKS    (0x00000020ULL) /* allocated blocks count */
1661 #define OBD_MD_FLBLKSZ     (0x00000040ULL) /* block size */
1662 #define OBD_MD_FLMODE      (0x00000080ULL) /* access bits (mode & ~S_IFMT) */
1663 #define OBD_MD_FLTYPE      (0x00000100ULL) /* object type (mode & S_IFMT) */
1664 #define OBD_MD_FLUID       (0x00000200ULL) /* user ID */
1665 #define OBD_MD_FLGID       (0x00000400ULL) /* group ID */
1666 #define OBD_MD_FLFLAGS     (0x00000800ULL) /* flags word */
1667 #define OBD_MD_FLNLINK     (0x00002000ULL) /* link count */
1668 #define OBD_MD_FLGENER     (0x00004000ULL) /* generation number */
1669 /*#define OBD_MD_FLINLINE    (0x00008000ULL)  inline data. used until 1.6.5 */
1670 #define OBD_MD_FLRDEV      (0x00010000ULL) /* device number */
1671 #define OBD_MD_FLEASIZE    (0x00020000ULL) /* extended attribute data */
1672 #define OBD_MD_LINKNAME    (0x00040000ULL) /* symbolic link target */
1673 #define OBD_MD_FLHANDLE    (0x00080000ULL) /* file/lock handle */
1674 #define OBD_MD_FLCKSUM     (0x00100000ULL) /* bulk data checksum */
1675 #define OBD_MD_FLQOS       (0x00200000ULL) /* quality of service stats */
1676 /*#define OBD_MD_FLOSCOPQ    (0x00400000ULL) osc opaque data, never used */
1677 #define OBD_MD_FLCOOKIE    (0x00800000ULL) /* log cancellation cookie */
1678 #define OBD_MD_FLGROUP     (0x01000000ULL) /* group */
1679 #define OBD_MD_FLFID       (0x02000000ULL) /* ->ost write inline fid */
1680 #define OBD_MD_FLEPOCH     (0x04000000ULL) /* ->ost write with ioepoch */
1681 					   /* ->mds if epoch opens or closes
1682 					    */
1683 #define OBD_MD_FLGRANT     (0x08000000ULL) /* ost preallocation space grant */
1684 #define OBD_MD_FLDIREA     (0x10000000ULL) /* dir's extended attribute data */
1685 #define OBD_MD_FLUSRQUOTA  (0x20000000ULL) /* over quota flags sent from ost */
1686 #define OBD_MD_FLGRPQUOTA  (0x40000000ULL) /* over quota flags sent from ost */
1687 #define OBD_MD_FLMODEASIZE (0x80000000ULL) /* EA size will be changed */
1688 
1689 #define OBD_MD_MDS	   (0x0000000100000000ULL) /* where an inode lives on */
1690 #define OBD_MD_REINT       (0x0000000200000000ULL) /* reintegrate oa */
1691 #define OBD_MD_MEA	   (0x0000000400000000ULL) /* CMD split EA  */
1692 #define OBD_MD_TSTATE      (0x0000000800000000ULL) /* transient state field */
1693 
1694 #define OBD_MD_FLXATTR       (0x0000001000000000ULL) /* xattr */
1695 #define OBD_MD_FLXATTRLS     (0x0000002000000000ULL) /* xattr list */
1696 #define OBD_MD_FLXATTRRM     (0x0000004000000000ULL) /* xattr remove */
1697 #define OBD_MD_FLACL	     (0x0000008000000000ULL) /* ACL */
1698 /*	OBD_MD_FLRMTPERM     (0x0000010000000000ULL) remote perm, obsolete */
1699 #define OBD_MD_FLMDSCAPA     (0x0000020000000000ULL) /* MDS capability */
1700 #define OBD_MD_FLOSSCAPA     (0x0000040000000000ULL) /* OSS capability */
1701 #define OBD_MD_FLCKSPLIT     (0x0000080000000000ULL) /* Check split on server */
1702 #define OBD_MD_FLCROSSREF    (0x0000100000000000ULL) /* Cross-ref case */
1703 #define OBD_MD_FLGETATTRLOCK (0x0000200000000000ULL) /* Get IOEpoch attributes
1704 						      * under lock; for xattr
1705 						      * requests means the
1706 						      * client holds the lock
1707 						      */
1708 #define OBD_MD_FLOBJCOUNT    (0x0000400000000000ULL) /* for multiple destroy */
1709 
1710 /*	OBD_MD_FLRMTLSETFACL (0x0001000000000000ULL) lfs lsetfacl, obsolete */
1711 /*	OBD_MD_FLRMTLGETFACL (0x0002000000000000ULL) lfs lgetfacl, obsolete */
1712 /*	OBD_MD_FLRMTRSETFACL (0x0004000000000000ULL) lfs rsetfacl, obsolete */
1713 /*	OBD_MD_FLRMTRGETFACL (0x0008000000000000ULL) lfs rgetfacl, obsolete */
1714 
1715 #define OBD_MD_FLDATAVERSION (0x0010000000000000ULL) /* iversion sum */
1716 #define OBD_MD_FLRELEASED    (0x0020000000000000ULL) /* file released */
1717 
1718 #define OBD_MD_DEFAULT_MEA   (0x0040000000000000ULL) /* default MEA */
1719 
1720 #define OBD_MD_FLGETATTR (OBD_MD_FLID    | OBD_MD_FLATIME | OBD_MD_FLMTIME | \
1721 			  OBD_MD_FLCTIME | OBD_MD_FLSIZE  | OBD_MD_FLBLKSZ | \
1722 			  OBD_MD_FLMODE  | OBD_MD_FLTYPE  | OBD_MD_FLUID   | \
1723 			  OBD_MD_FLGID   | OBD_MD_FLFLAGS | OBD_MD_FLNLINK | \
1724 			  OBD_MD_FLGENER | OBD_MD_FLRDEV  | OBD_MD_FLGROUP)
1725 
1726 #define OBD_MD_FLXATTRALL (OBD_MD_FLXATTR | OBD_MD_FLXATTRLS)
1727 
1728 /* don't forget obdo_fid which is way down at the bottom so it can
1729  * come after the definition of llog_cookie
1730  */
1731 
1732 enum hss_valid {
1733 	HSS_SETMASK	= 0x01,
1734 	HSS_CLEARMASK	= 0x02,
1735 	HSS_ARCHIVE_ID	= 0x04,
1736 };
1737 
1738 struct hsm_state_set {
1739 	__u32	hss_valid;
1740 	__u32	hss_archive_id;
1741 	__u64	hss_setmask;
1742 	__u64	hss_clearmask;
1743 };
1744 
1745 void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
1746 void lustre_swab_hsm_state_set(struct hsm_state_set *hss);
1747 
1748 void lustre_swab_obd_statfs(struct obd_statfs *os);
1749 
1750 /* ost_body.data values for OST_BRW */
1751 
1752 #define OBD_BRW_READ		0x01
1753 #define OBD_BRW_WRITE		0x02
1754 #define OBD_BRW_RWMASK		(OBD_BRW_READ | OBD_BRW_WRITE)
1755 #define OBD_BRW_SYNC		0x08 /* this page is a part of synchronous
1756 				      * transfer and is not accounted in
1757 				      * the grant.
1758 				      */
1759 #define OBD_BRW_CHECK		0x10
1760 #define OBD_BRW_FROM_GRANT      0x20 /* the osc manages this under llite */
1761 #define OBD_BRW_GRANTED		0x40 /* the ost manages this */
1762 #define OBD_BRW_NOCACHE		0x80 /* this page is a part of non-cached IO */
1763 #define OBD_BRW_NOQUOTA	       0x100
1764 #define OBD_BRW_SRVLOCK	       0x200 /* Client holds no lock over this page */
1765 #define OBD_BRW_ASYNC	       0x400 /* Server may delay commit to disk */
1766 #define OBD_BRW_MEMALLOC       0x800 /* Client runs in the "kswapd" context */
1767 #define OBD_BRW_OVER_USRQUOTA 0x1000 /* Running out of user quota */
1768 #define OBD_BRW_OVER_GRPQUOTA 0x2000 /* Running out of group quota */
1769 #define OBD_BRW_SOFT_SYNC     0x4000 /* This flag notifies the server
1770 				      * that the client is running low on
1771 				      * space for unstable pages; asking
1772 				      * it to sync quickly
1773 				      */
1774 
1775 #define OBD_OBJECT_EOF	LUSTRE_EOF
1776 
1777 #define OST_MIN_PRECREATE 32
1778 #define OST_MAX_PRECREATE 20000
1779 
1780 struct obd_ioobj {
1781 	struct ost_id	ioo_oid;	/* object ID, if multi-obj BRW */
1782 	__u32		ioo_max_brw;	/* low 16 bits were o_mode before 2.4,
1783 					 * now (PTLRPC_BULK_OPS_COUNT - 1) in
1784 					 * high 16 bits in 2.4 and later
1785 					 */
1786 	__u32		ioo_bufcnt;	/* number of niobufs for this object */
1787 };
1788 
1789 #define IOOBJ_MAX_BRW_BITS	16
1790 #define IOOBJ_TYPE_MASK		((1U << IOOBJ_MAX_BRW_BITS) - 1)
1791 #define ioobj_max_brw_get(ioo)	(((ioo)->ioo_max_brw >> IOOBJ_MAX_BRW_BITS) + 1)
1792 #define ioobj_max_brw_set(ioo, num)					\
1793 do { (ioo)->ioo_max_brw = ((num) - 1) << IOOBJ_MAX_BRW_BITS; } while (0)
1794 
1795 void lustre_swab_obd_ioobj(struct obd_ioobj *ioo);
1796 
1797 /* multiple of 8 bytes => can array */
1798 struct niobuf_remote {
1799 	__u64	rnb_offset;
1800 	__u32	rnb_len;
1801 	__u32	rnb_flags;
1802 };
1803 
1804 void lustre_swab_niobuf_remote(struct niobuf_remote *nbr);
1805 
1806 /* lock value block communicated between the filter and llite */
1807 
1808 /* OST_LVB_ERR_INIT is needed because the return code in rc is
1809  * negative, i.e. because ((MASK + rc) & MASK) != MASK.
1810  */
1811 #define OST_LVB_ERR_INIT 0xffbadbad80000000ULL
1812 #define OST_LVB_ERR_MASK 0xffbadbad00000000ULL
1813 #define OST_LVB_IS_ERR(blocks)					  \
1814 	((blocks & OST_LVB_ERR_MASK) == OST_LVB_ERR_MASK)
1815 #define OST_LVB_SET_ERR(blocks, rc)				     \
1816 	do { blocks = OST_LVB_ERR_INIT + rc; } while (0)
1817 #define OST_LVB_GET_ERR(blocks)    (int)(blocks - OST_LVB_ERR_INIT)
1818 
1819 struct ost_lvb_v1 {
1820 	__u64		lvb_size;
1821 	__s64		lvb_mtime;
1822 	__s64		lvb_atime;
1823 	__s64		lvb_ctime;
1824 	__u64		lvb_blocks;
1825 };
1826 
1827 void lustre_swab_ost_lvb_v1(struct ost_lvb_v1 *lvb);
1828 
1829 struct ost_lvb {
1830 	__u64		lvb_size;
1831 	__s64		lvb_mtime;
1832 	__s64		lvb_atime;
1833 	__s64		lvb_ctime;
1834 	__u64		lvb_blocks;
1835 	__u32		lvb_mtime_ns;
1836 	__u32		lvb_atime_ns;
1837 	__u32		lvb_ctime_ns;
1838 	__u32		lvb_padding;
1839 };
1840 
1841 void lustre_swab_ost_lvb(struct ost_lvb *lvb);
1842 
1843 /*
1844  *   lquota data structures
1845  */
1846 
1847 /* The lquota_id structure is an union of all the possible identifier types that
1848  * can be used with quota, this includes:
1849  * - 64-bit user ID
1850  * - 64-bit group ID
1851  * - a FID which can be used for per-directory quota in the future
1852  */
1853 union lquota_id {
1854 	struct lu_fid	qid_fid; /* FID for per-directory quota */
1855 	__u64		qid_uid; /* user identifier */
1856 	__u64		qid_gid; /* group identifier */
1857 };
1858 
1859 /* quotactl management */
1860 struct obd_quotactl {
1861 	__u32			qc_cmd;
1862 	__u32			qc_type; /* see Q_* flag below */
1863 	__u32			qc_id;
1864 	__u32			qc_stat;
1865 	struct obd_dqinfo	qc_dqinfo;
1866 	struct obd_dqblk	qc_dqblk;
1867 };
1868 
1869 void lustre_swab_obd_quotactl(struct obd_quotactl *q);
1870 
1871 #define Q_COPY(out, in, member) (out)->member = (in)->member
1872 
1873 #define QCTL_COPY(out, in)		\
1874 do {					\
1875 	Q_COPY(out, in, qc_cmd);	\
1876 	Q_COPY(out, in, qc_type);	\
1877 	Q_COPY(out, in, qc_id);		\
1878 	Q_COPY(out, in, qc_stat);	\
1879 	Q_COPY(out, in, qc_dqinfo);	\
1880 	Q_COPY(out, in, qc_dqblk);	\
1881 } while (0)
1882 
1883 /* Data structures associated with the quota locks */
1884 
1885 /* Glimpse descriptor used for the index & per-ID quota locks */
1886 struct ldlm_gl_lquota_desc {
1887 	union lquota_id	gl_id;    /* quota ID subject to the glimpse */
1888 	__u64		gl_flags; /* see LQUOTA_FL* below */
1889 	__u64		gl_ver;   /* new index version */
1890 	__u64		gl_hardlimit; /* new hardlimit or qunit value */
1891 	__u64		gl_softlimit; /* new softlimit */
1892 	__u64		gl_time;
1893 	__u64		gl_pad2;
1894 };
1895 
1896 /* quota glimpse flags */
1897 #define LQUOTA_FL_EDQUOT 0x1 /* user/group out of quota space on QMT */
1898 
1899 /* LVB used with quota (global and per-ID) locks */
1900 struct lquota_lvb {
1901 	__u64	lvb_flags;	/* see LQUOTA_FL* above */
1902 	__u64	lvb_id_may_rel; /* space that might be released later */
1903 	__u64	lvb_id_rel;     /* space released by the slave for this ID */
1904 	__u64	lvb_id_qunit;   /* current qunit value */
1905 	__u64	lvb_pad1;
1906 };
1907 
1908 void lustre_swab_lquota_lvb(struct lquota_lvb *lvb);
1909 
1910 /* op codes */
1911 enum quota_cmd {
1912 	QUOTA_DQACQ	= 601,
1913 	QUOTA_DQREL	= 602,
1914 	QUOTA_LAST_OPC
1915 };
1916 #define QUOTA_FIRST_OPC	QUOTA_DQACQ
1917 
1918 /*
1919  *   MDS REQ RECORDS
1920  */
1921 
1922 /* opcodes */
1923 enum mds_cmd {
1924 	MDS_GETATTR		= 33,
1925 	MDS_GETATTR_NAME	= 34,
1926 	MDS_CLOSE		= 35,
1927 	MDS_REINT		= 36,
1928 	MDS_READPAGE		= 37,
1929 	MDS_CONNECT		= 38,
1930 	MDS_DISCONNECT		= 39,
1931 	MDS_GETSTATUS		= 40,
1932 	MDS_STATFS		= 41,
1933 	MDS_PIN			= 42, /* obsolete, never used in a release */
1934 	MDS_UNPIN		= 43, /* obsolete, never used in a release */
1935 	MDS_SYNC		= 44,
1936 	MDS_DONE_WRITING	= 45,
1937 	MDS_SET_INFO		= 46,
1938 	MDS_QUOTACHECK		= 47,
1939 	MDS_QUOTACTL		= 48,
1940 	MDS_GETXATTR		= 49,
1941 	MDS_SETXATTR		= 50, /* obsolete, now it's MDS_REINT op */
1942 	MDS_WRITEPAGE		= 51,
1943 	MDS_IS_SUBDIR		= 52, /* obsolete, never used in a release */
1944 	MDS_GET_INFO		= 53,
1945 	MDS_HSM_STATE_GET	= 54,
1946 	MDS_HSM_STATE_SET	= 55,
1947 	MDS_HSM_ACTION		= 56,
1948 	MDS_HSM_PROGRESS	= 57,
1949 	MDS_HSM_REQUEST		= 58,
1950 	MDS_HSM_CT_REGISTER	= 59,
1951 	MDS_HSM_CT_UNREGISTER	= 60,
1952 	MDS_SWAP_LAYOUTS	= 61,
1953 	MDS_LAST_OPC
1954 };
1955 
1956 #define MDS_FIRST_OPC    MDS_GETATTR
1957 
1958 /*
1959  * Do not exceed 63
1960  */
1961 
1962 enum mdt_reint_cmd {
1963 	REINT_SETATTR  = 1,
1964 	REINT_CREATE   = 2,
1965 	REINT_LINK     = 3,
1966 	REINT_UNLINK   = 4,
1967 	REINT_RENAME   = 5,
1968 	REINT_OPEN     = 6,
1969 	REINT_SETXATTR = 7,
1970 	REINT_RMENTRY  = 8,
1971 	REINT_MIGRATE  = 9,
1972 	REINT_MAX
1973 };
1974 
1975 void lustre_swab_generic_32s(__u32 *val);
1976 
1977 /* the disposition of the intent outlines what was executed */
1978 #define DISP_IT_EXECD	0x00000001
1979 #define DISP_LOOKUP_EXECD    0x00000002
1980 #define DISP_LOOKUP_NEG      0x00000004
1981 #define DISP_LOOKUP_POS      0x00000008
1982 #define DISP_OPEN_CREATE     0x00000010
1983 #define DISP_OPEN_OPEN       0x00000020
1984 #define DISP_ENQ_COMPLETE    0x00400000		/* obsolete and unused */
1985 #define DISP_ENQ_OPEN_REF    0x00800000
1986 #define DISP_ENQ_CREATE_REF  0x01000000
1987 #define DISP_OPEN_LOCK       0x02000000
1988 #define DISP_OPEN_LEASE      0x04000000
1989 #define DISP_OPEN_STRIPE     0x08000000
1990 #define DISP_OPEN_DENY		0x10000000
1991 
1992 /* INODE LOCK PARTS */
1993 #define MDS_INODELOCK_LOOKUP 0x000001	/* For namespace, dentry etc, and also
1994 					 * was used to protect permission (mode,
1995 					 * owner, group etc) before 2.4.
1996 					 */
1997 #define MDS_INODELOCK_UPDATE 0x000002	/* size, links, timestamps */
1998 #define MDS_INODELOCK_OPEN   0x000004	/* For opened files */
1999 #define MDS_INODELOCK_LAYOUT 0x000008	/* for layout */
2000 
2001 /* The PERM bit is added int 2.4, and it is used to protect permission(mode,
2002  * owner, group, acl etc), so to separate the permission from LOOKUP lock.
2003  * Because for remote directories(in DNE), these locks will be granted by
2004  * different MDTs(different ldlm namespace).
2005  *
2006  * For local directory, MDT will always grant UPDATE_LOCK|PERM_LOCK together.
2007  * For Remote directory, the master MDT, where the remote directory is, will
2008  * grant UPDATE_LOCK|PERM_LOCK, and the remote MDT, where the name entry is,
2009  * will grant LOOKUP_LOCK.
2010  */
2011 #define MDS_INODELOCK_PERM   0x000010
2012 #define MDS_INODELOCK_XATTR  0x000020	/* extended attributes */
2013 
2014 #define MDS_INODELOCK_MAXSHIFT 5
2015 /* This FULL lock is useful to take on unlink sort of operations */
2016 #define MDS_INODELOCK_FULL ((1 << (MDS_INODELOCK_MAXSHIFT + 1)) - 1)
2017 
2018 /* NOTE: until Lustre 1.8.7/2.1.1 the fid_ver() was packed into name[2],
2019  * but was moved into name[1] along with the OID to avoid consuming the
2020  * name[2,3] fields that need to be used for the quota id (also a FID).
2021  */
2022 enum {
2023 	LUSTRE_RES_ID_SEQ_OFF = 0,
2024 	LUSTRE_RES_ID_VER_OID_OFF = 1,
2025 	LUSTRE_RES_ID_WAS_VER_OFF = 2, /* see note above */
2026 	LUSTRE_RES_ID_QUOTA_SEQ_OFF = 2,
2027 	LUSTRE_RES_ID_QUOTA_VER_OID_OFF = 3,
2028 	LUSTRE_RES_ID_HSH_OFF = 3
2029 };
2030 
2031 #define MDS_STATUS_CONN 1
2032 #define MDS_STATUS_LOV 2
2033 
2034 /* mdt_thread_info.mti_flags. */
2035 enum md_op_flags {
2036 	/* The flag indicates Size-on-MDS attributes are changed. */
2037 	MF_SOM_CHANGE	   = (1 << 0),
2038 	/* Flags indicates an epoch opens or closes. */
2039 	MF_EPOCH_OPEN	   = (1 << 1),
2040 	MF_EPOCH_CLOSE	  = (1 << 2),
2041 	MF_MDC_CANCEL_FID1      = (1 << 3),
2042 	MF_MDC_CANCEL_FID2      = (1 << 4),
2043 	MF_MDC_CANCEL_FID3      = (1 << 5),
2044 	MF_MDC_CANCEL_FID4      = (1 << 6),
2045 	/* There is a pending attribute update. */
2046 	MF_SOM_AU	       = (1 << 7),
2047 	/* Cancel OST locks while getattr OST attributes. */
2048 	MF_GETATTR_LOCK	 = (1 << 8),
2049 	MF_GET_MDT_IDX	  = (1 << 9),
2050 };
2051 
2052 #define MF_SOM_LOCAL_FLAGS (MF_SOM_CHANGE | MF_EPOCH_OPEN | MF_EPOCH_CLOSE)
2053 
2054 #define LUSTRE_BFLAG_UNCOMMITTED_WRITES   0x1
2055 
2056 /* these should be identical to their EXT4_*_FL counterparts, they are
2057  * redefined here only to avoid dragging in fs/ext4/ext4.h
2058  */
2059 #define LUSTRE_SYNC_FL	 0x00000008 /* Synchronous updates */
2060 #define LUSTRE_IMMUTABLE_FL    0x00000010 /* Immutable file */
2061 #define LUSTRE_APPEND_FL       0x00000020 /* writes to file may only append */
2062 #define LUSTRE_NOATIME_FL      0x00000080 /* do not update atime */
2063 #define LUSTRE_DIRSYNC_FL      0x00010000 /* dirsync behaviour (dir only) */
2064 
2065 /* Convert wire LUSTRE_*_FL to corresponding client local VFS S_* values
2066  * for the client inode i_flags.  The LUSTRE_*_FL are the Lustre wire
2067  * protocol equivalents of LDISKFS_*_FL values stored on disk, while
2068  * the S_* flags are kernel-internal values that change between kernel
2069  * versions.  These flags are set/cleared via FSFILT_IOC_{GET,SET}_FLAGS.
2070  * See b=16526 for a full history.
2071  */
ll_ext_to_inode_flags(int flags)2072 static inline int ll_ext_to_inode_flags(int flags)
2073 {
2074 	return (((flags & LUSTRE_SYNC_FL)      ? S_SYNC      : 0) |
2075 		((flags & LUSTRE_NOATIME_FL)   ? S_NOATIME   : 0) |
2076 		((flags & LUSTRE_APPEND_FL)    ? S_APPEND    : 0) |
2077 		((flags & LUSTRE_DIRSYNC_FL)   ? S_DIRSYNC   : 0) |
2078 		((flags & LUSTRE_IMMUTABLE_FL) ? S_IMMUTABLE : 0));
2079 }
2080 
ll_inode_to_ext_flags(int iflags)2081 static inline int ll_inode_to_ext_flags(int iflags)
2082 {
2083 	return (((iflags & S_SYNC)      ? LUSTRE_SYNC_FL      : 0) |
2084 		((iflags & S_NOATIME)   ? LUSTRE_NOATIME_FL   : 0) |
2085 		((iflags & S_APPEND)    ? LUSTRE_APPEND_FL    : 0) |
2086 		((iflags & S_DIRSYNC)   ? LUSTRE_DIRSYNC_FL   : 0) |
2087 		((iflags & S_IMMUTABLE) ? LUSTRE_IMMUTABLE_FL : 0));
2088 }
2089 
2090 /* 64 possible states */
2091 enum md_transient_state {
2092 	MS_RESTORE	= (1 << 0),	/* restore is running */
2093 };
2094 
2095 struct mdt_body {
2096 	struct lu_fid mbo_fid1;
2097 	struct lu_fid mbo_fid2;
2098 	struct lustre_handle mbo_handle;
2099 	__u64	mbo_valid;
2100 	__u64	mbo_size;	/* Offset, in the case of MDS_READPAGE */
2101 	__s64	mbo_mtime;
2102 	__s64	mbo_atime;
2103 	__s64	mbo_ctime;
2104 	__u64	mbo_blocks;	/* XID, in the case of MDS_READPAGE */
2105 	__u64	mbo_ioepoch;
2106 	__u64	mbo_t_state;	/* transient file state defined in
2107 				 * enum md_transient_state
2108 				 * was "ino" until 2.4.0
2109 				 */
2110 	__u32	mbo_fsuid;
2111 	__u32	mbo_fsgid;
2112 	__u32	mbo_capability;
2113 	__u32	mbo_mode;
2114 	__u32	mbo_uid;
2115 	__u32	mbo_gid;
2116 	__u32	mbo_flags;
2117 	__u32	mbo_rdev;
2118 	__u32	mbo_nlink;	/* #bytes to read in the case of MDS_READPAGE */
2119 	__u32	mbo_unused2;	/* was "generation" until 2.4.0 */
2120 	__u32	mbo_suppgid;
2121 	__u32	mbo_eadatasize;
2122 	__u32	mbo_aclsize;
2123 	__u32	mbo_max_mdsize;
2124 	__u32	mbo_max_cookiesize;
2125 	__u32	mbo_uid_h;	/* high 32-bits of uid, for FUID */
2126 	__u32	mbo_gid_h;	/* high 32-bits of gid, for FUID */
2127 	__u32	mbo_padding_5;	/* also fix lustre_swab_mdt_body */
2128 	__u64	mbo_padding_6;
2129 	__u64	mbo_padding_7;
2130 	__u64	mbo_padding_8;
2131 	__u64	mbo_padding_9;
2132 	__u64	mbo_padding_10;
2133 }; /* 216 */
2134 
2135 void lustre_swab_mdt_body(struct mdt_body *b);
2136 
2137 struct mdt_ioepoch {
2138 	struct lustre_handle handle;
2139 	__u64  ioepoch;
2140 	__u32  flags;
2141 	__u32  padding;
2142 };
2143 
2144 void lustre_swab_mdt_ioepoch(struct mdt_ioepoch *b);
2145 
2146 /* permissions for md_perm.mp_perm */
2147 enum {
2148 	CFS_SETUID_PERM = 0x01,
2149 	CFS_SETGID_PERM = 0x02,
2150 	CFS_SETGRP_PERM = 0x04,
2151 };
2152 
2153 struct mdt_rec_setattr {
2154 	__u32	   sa_opcode;
2155 	__u32	   sa_cap;
2156 	__u32	   sa_fsuid;
2157 	__u32	   sa_fsuid_h;
2158 	__u32	   sa_fsgid;
2159 	__u32	   sa_fsgid_h;
2160 	__u32	   sa_suppgid;
2161 	__u32	   sa_suppgid_h;
2162 	__u32	   sa_padding_1;
2163 	__u32	   sa_padding_1_h;
2164 	struct lu_fid   sa_fid;
2165 	__u64	   sa_valid;
2166 	__u32	   sa_uid;
2167 	__u32	   sa_gid;
2168 	__u64	   sa_size;
2169 	__u64	   sa_blocks;
2170 	__s64	   sa_mtime;
2171 	__s64	   sa_atime;
2172 	__s64	   sa_ctime;
2173 	__u32	   sa_attr_flags;
2174 	__u32	   sa_mode;
2175 	__u32	   sa_bias;      /* some operation flags */
2176 	__u32	   sa_padding_3;
2177 	__u32	   sa_padding_4;
2178 	__u32	   sa_padding_5;
2179 };
2180 
2181 void lustre_swab_mdt_rec_setattr(struct mdt_rec_setattr *sa);
2182 
2183 /*
2184  * Attribute flags used in mdt_rec_setattr::sa_valid.
2185  * The kernel's #defines for ATTR_* should not be used over the network
2186  * since the client and MDS may run different kernels (see bug 13828)
2187  * Therefore, we should only use MDS_ATTR_* attributes for sa_valid.
2188  */
2189 #define MDS_ATTR_MODE	       0x1ULL /* = 1 */
2190 #define MDS_ATTR_UID	       0x2ULL /* = 2 */
2191 #define MDS_ATTR_GID	       0x4ULL /* = 4 */
2192 #define MDS_ATTR_SIZE	       0x8ULL /* = 8 */
2193 #define MDS_ATTR_ATIME	      0x10ULL /* = 16 */
2194 #define MDS_ATTR_MTIME	      0x20ULL /* = 32 */
2195 #define MDS_ATTR_CTIME	      0x40ULL /* = 64 */
2196 #define MDS_ATTR_ATIME_SET    0x80ULL /* = 128 */
2197 #define MDS_ATTR_MTIME_SET   0x100ULL /* = 256 */
2198 #define MDS_ATTR_FORCE       0x200ULL /* = 512, Not a change, but a change it */
2199 #define MDS_ATTR_ATTR_FLAG   0x400ULL /* = 1024 */
2200 #define MDS_ATTR_KILL_SUID   0x800ULL /* = 2048 */
2201 #define MDS_ATTR_KILL_SGID  0x1000ULL /* = 4096 */
2202 #define MDS_ATTR_CTIME_SET  0x2000ULL /* = 8192 */
2203 #define MDS_ATTR_FROM_OPEN  0x4000ULL /* = 16384, called from open path,
2204 				       * ie O_TRUNC
2205 				       */
2206 #define MDS_ATTR_BLOCKS     0x8000ULL /* = 32768 */
2207 
2208 #define MDS_FMODE_CLOSED	 00000000
2209 #define MDS_FMODE_EXEC	   00000004
2210 /* IO Epoch is opened on a closed file. */
2211 #define MDS_FMODE_EPOCH	  01000000
2212 /* IO Epoch is opened on a file truncate. */
2213 #define MDS_FMODE_TRUNC	  02000000
2214 /* Size-on-MDS Attribute Update is pending. */
2215 #define MDS_FMODE_SOM	    04000000
2216 
2217 #define MDS_OPEN_CREATED	 00000010
2218 #define MDS_OPEN_CROSS	   00000020
2219 
2220 #define MDS_OPEN_CREAT	   00000100
2221 #define MDS_OPEN_EXCL	    00000200
2222 #define MDS_OPEN_TRUNC	   00001000
2223 #define MDS_OPEN_APPEND	  00002000
2224 #define MDS_OPEN_SYNC	    00010000
2225 #define MDS_OPEN_DIRECTORY       00200000
2226 
2227 #define MDS_OPEN_BY_FID		040000000 /* open_by_fid for known object */
2228 #define MDS_OPEN_DELAY_CREATE  0100000000 /* delay initial object create */
2229 #define MDS_OPEN_OWNEROVERRIDE 0200000000 /* NFSD rw-reopen ro file for owner */
2230 #define MDS_OPEN_JOIN_FILE     0400000000 /* open for join file.
2231 					   * We do not support JOIN FILE
2232 					   * anymore, reserve this flags
2233 					   * just for preventing such bit
2234 					   * to be reused.
2235 					   */
2236 
2237 #define MDS_OPEN_LOCK	      04000000000 /* This open requires open lock */
2238 #define MDS_OPEN_HAS_EA      010000000000 /* specify object create pattern */
2239 #define MDS_OPEN_HAS_OBJS    020000000000 /* Just set the EA the obj exist */
2240 #define MDS_OPEN_NORESTORE  0100000000000ULL /* Do not restore file at open */
2241 #define MDS_OPEN_NEWSTRIPE  0200000000000ULL /* New stripe needed (restripe or
2242 					      * hsm restore) */
2243 #define MDS_OPEN_VOLATILE   0400000000000ULL /* File is volatile = created
2244 						unlinked */
2245 #define MDS_OPEN_LEASE	   01000000000000ULL /* Open the file and grant lease
2246 					      * delegation, succeed if it's not
2247 					      * being opened with conflict mode.
2248 					      */
2249 #define MDS_OPEN_RELEASE   02000000000000ULL /* Open the file for HSM release */
2250 
2251 #define MDS_OPEN_FL_INTERNAL (MDS_OPEN_HAS_EA | MDS_OPEN_HAS_OBJS |	\
2252 			      MDS_OPEN_OWNEROVERRIDE | MDS_OPEN_LOCK |	\
2253 			      MDS_OPEN_BY_FID | MDS_OPEN_LEASE |	\
2254 			      MDS_OPEN_RELEASE)
2255 
2256 enum mds_op_bias {
2257 	MDS_CHECK_SPLIT		= 1 << 0,
2258 	MDS_CROSS_REF		= 1 << 1,
2259 	MDS_VTX_BYPASS		= 1 << 2,
2260 	MDS_PERM_BYPASS		= 1 << 3,
2261 	MDS_SOM			= 1 << 4,
2262 	MDS_QUOTA_IGNORE	= 1 << 5,
2263 	MDS_CLOSE_CLEANUP	= 1 << 6,
2264 	MDS_KEEP_ORPHAN		= 1 << 7,
2265 	MDS_RECOV_OPEN		= 1 << 8,
2266 	MDS_DATA_MODIFIED	= 1 << 9,
2267 	MDS_CREATE_VOLATILE	= 1 << 10,
2268 	MDS_OWNEROVERRIDE	= 1 << 11,
2269 	MDS_HSM_RELEASE		= 1 << 12,
2270 	MDS_RENAME_MIGRATE	= BIT(13),
2271 };
2272 
2273 /* instance of mdt_reint_rec */
2274 struct mdt_rec_create {
2275 	__u32	   cr_opcode;
2276 	__u32	   cr_cap;
2277 	__u32	   cr_fsuid;
2278 	__u32	   cr_fsuid_h;
2279 	__u32	   cr_fsgid;
2280 	__u32	   cr_fsgid_h;
2281 	__u32	   cr_suppgid1;
2282 	__u32	   cr_suppgid1_h;
2283 	__u32	   cr_suppgid2;
2284 	__u32	   cr_suppgid2_h;
2285 	struct lu_fid   cr_fid1;
2286 	struct lu_fid   cr_fid2;
2287 	struct lustre_handle cr_old_handle; /* handle in case of open replay */
2288 	__s64	   cr_time;
2289 	__u64	   cr_rdev;
2290 	__u64	   cr_ioepoch;
2291 	__u64	   cr_padding_1;   /* rr_blocks */
2292 	__u32	   cr_mode;
2293 	__u32	   cr_bias;
2294 	/* use of helpers set/get_mrc_cr_flags() is needed to access
2295 	 * 64 bits cr_flags [cr_flags_l, cr_flags_h], this is done to
2296 	 * extend cr_flags size without breaking 1.8 compat
2297 	 */
2298 	__u32	   cr_flags_l;     /* for use with open, low  32 bits  */
2299 	__u32	   cr_flags_h;     /* for use with open, high 32 bits */
2300 	__u32	   cr_umask;       /* umask for create */
2301 	__u32	   cr_padding_4;   /* rr_padding_4 */
2302 };
2303 
set_mrc_cr_flags(struct mdt_rec_create * mrc,__u64 flags)2304 static inline void set_mrc_cr_flags(struct mdt_rec_create *mrc, __u64 flags)
2305 {
2306 	mrc->cr_flags_l = (__u32)(flags & 0xFFFFFFFFUll);
2307 	mrc->cr_flags_h = (__u32)(flags >> 32);
2308 }
2309 
get_mrc_cr_flags(struct mdt_rec_create * mrc)2310 static inline __u64 get_mrc_cr_flags(struct mdt_rec_create *mrc)
2311 {
2312 	return ((__u64)(mrc->cr_flags_l) | ((__u64)mrc->cr_flags_h << 32));
2313 }
2314 
2315 /* instance of mdt_reint_rec */
2316 struct mdt_rec_link {
2317 	__u32	   lk_opcode;
2318 	__u32	   lk_cap;
2319 	__u32	   lk_fsuid;
2320 	__u32	   lk_fsuid_h;
2321 	__u32	   lk_fsgid;
2322 	__u32	   lk_fsgid_h;
2323 	__u32	   lk_suppgid1;
2324 	__u32	   lk_suppgid1_h;
2325 	__u32	   lk_suppgid2;
2326 	__u32	   lk_suppgid2_h;
2327 	struct lu_fid   lk_fid1;
2328 	struct lu_fid   lk_fid2;
2329 	__s64	   lk_time;
2330 	__u64	   lk_padding_1;   /* rr_atime */
2331 	__u64	   lk_padding_2;   /* rr_ctime */
2332 	__u64	   lk_padding_3;   /* rr_size */
2333 	__u64	   lk_padding_4;   /* rr_blocks */
2334 	__u32	   lk_bias;
2335 	__u32	   lk_padding_5;   /* rr_mode */
2336 	__u32	   lk_padding_6;   /* rr_flags */
2337 	__u32	   lk_padding_7;   /* rr_padding_2 */
2338 	__u32	   lk_padding_8;   /* rr_padding_3 */
2339 	__u32	   lk_padding_9;   /* rr_padding_4 */
2340 };
2341 
2342 /* instance of mdt_reint_rec */
2343 struct mdt_rec_unlink {
2344 	__u32	   ul_opcode;
2345 	__u32	   ul_cap;
2346 	__u32	   ul_fsuid;
2347 	__u32	   ul_fsuid_h;
2348 	__u32	   ul_fsgid;
2349 	__u32	   ul_fsgid_h;
2350 	__u32	   ul_suppgid1;
2351 	__u32	   ul_suppgid1_h;
2352 	__u32	   ul_suppgid2;
2353 	__u32	   ul_suppgid2_h;
2354 	struct lu_fid   ul_fid1;
2355 	struct lu_fid   ul_fid2;
2356 	__s64	   ul_time;
2357 	__u64	   ul_padding_2;   /* rr_atime */
2358 	__u64	   ul_padding_3;   /* rr_ctime */
2359 	__u64	   ul_padding_4;   /* rr_size */
2360 	__u64	   ul_padding_5;   /* rr_blocks */
2361 	__u32	   ul_bias;
2362 	__u32	   ul_mode;
2363 	__u32	   ul_padding_6;   /* rr_flags */
2364 	__u32	   ul_padding_7;   /* rr_padding_2 */
2365 	__u32	   ul_padding_8;   /* rr_padding_3 */
2366 	__u32	   ul_padding_9;   /* rr_padding_4 */
2367 };
2368 
2369 /* instance of mdt_reint_rec */
2370 struct mdt_rec_rename {
2371 	__u32	   rn_opcode;
2372 	__u32	   rn_cap;
2373 	__u32	   rn_fsuid;
2374 	__u32	   rn_fsuid_h;
2375 	__u32	   rn_fsgid;
2376 	__u32	   rn_fsgid_h;
2377 	__u32	   rn_suppgid1;
2378 	__u32	   rn_suppgid1_h;
2379 	__u32	   rn_suppgid2;
2380 	__u32	   rn_suppgid2_h;
2381 	struct lu_fid   rn_fid1;
2382 	struct lu_fid   rn_fid2;
2383 	__s64	   rn_time;
2384 	__u64	   rn_padding_1;   /* rr_atime */
2385 	__u64	   rn_padding_2;   /* rr_ctime */
2386 	__u64	   rn_padding_3;   /* rr_size */
2387 	__u64	   rn_padding_4;   /* rr_blocks */
2388 	__u32	   rn_bias;	/* some operation flags */
2389 	__u32	   rn_mode;	/* cross-ref rename has mode */
2390 	__u32	   rn_padding_5;   /* rr_flags */
2391 	__u32	   rn_padding_6;   /* rr_padding_2 */
2392 	__u32	   rn_padding_7;   /* rr_padding_3 */
2393 	__u32	   rn_padding_8;   /* rr_padding_4 */
2394 };
2395 
2396 /* instance of mdt_reint_rec */
2397 struct mdt_rec_setxattr {
2398 	__u32	   sx_opcode;
2399 	__u32	   sx_cap;
2400 	__u32	   sx_fsuid;
2401 	__u32	   sx_fsuid_h;
2402 	__u32	   sx_fsgid;
2403 	__u32	   sx_fsgid_h;
2404 	__u32	   sx_suppgid1;
2405 	__u32	   sx_suppgid1_h;
2406 	__u32	   sx_suppgid2;
2407 	__u32	   sx_suppgid2_h;
2408 	struct lu_fid   sx_fid;
2409 	__u64	   sx_padding_1;   /* These three are rr_fid2 */
2410 	__u32	   sx_padding_2;
2411 	__u32	   sx_padding_3;
2412 	__u64	   sx_valid;
2413 	__s64	   sx_time;
2414 	__u64	   sx_padding_5;   /* rr_ctime */
2415 	__u64	   sx_padding_6;   /* rr_size */
2416 	__u64	   sx_padding_7;   /* rr_blocks */
2417 	__u32	   sx_size;
2418 	__u32	   sx_flags;
2419 	__u32	   sx_padding_8;   /* rr_flags */
2420 	__u32	   sx_padding_9;   /* rr_padding_2 */
2421 	__u32	   sx_padding_10;  /* rr_padding_3 */
2422 	__u32	   sx_padding_11;  /* rr_padding_4 */
2423 };
2424 
2425 /*
2426  * mdt_rec_reint is the template for all mdt_reint_xxx structures.
2427  * Do NOT change the size of various members, otherwise the value
2428  * will be broken in lustre_swab_mdt_rec_reint().
2429  *
2430  * If you add new members in other mdt_reint_xxx structures and need to use the
2431  * rr_padding_x fields, then update lustre_swab_mdt_rec_reint() also.
2432  */
2433 struct mdt_rec_reint {
2434 	__u32	   rr_opcode;
2435 	__u32	   rr_cap;
2436 	__u32	   rr_fsuid;
2437 	__u32	   rr_fsuid_h;
2438 	__u32	   rr_fsgid;
2439 	__u32	   rr_fsgid_h;
2440 	__u32	   rr_suppgid1;
2441 	__u32	   rr_suppgid1_h;
2442 	__u32	   rr_suppgid2;
2443 	__u32	   rr_suppgid2_h;
2444 	struct lu_fid   rr_fid1;
2445 	struct lu_fid   rr_fid2;
2446 	__s64	   rr_mtime;
2447 	__s64	   rr_atime;
2448 	__s64	   rr_ctime;
2449 	__u64	   rr_size;
2450 	__u64	   rr_blocks;
2451 	__u32	   rr_bias;
2452 	__u32	   rr_mode;
2453 	__u32	   rr_flags;
2454 	__u32	   rr_flags_h;
2455 	__u32	   rr_umask;
2456 	__u32	   rr_padding_4; /* also fix lustre_swab_mdt_rec_reint */
2457 };
2458 
2459 void lustre_swab_mdt_rec_reint(struct mdt_rec_reint *rr);
2460 
2461 /* lmv structures */
2462 struct lmv_desc {
2463 	__u32 ld_tgt_count;		/* how many MDS's */
2464 	__u32 ld_active_tgt_count;	 /* how many active */
2465 	__u32 ld_default_stripe_count;     /* how many objects are used */
2466 	__u32 ld_pattern;		  /* default hash pattern */
2467 	__u64 ld_default_hash_size;
2468 	__u64 ld_padding_1;		/* also fix lustre_swab_lmv_desc */
2469 	__u32 ld_padding_2;		/* also fix lustre_swab_lmv_desc */
2470 	__u32 ld_qos_maxage;	       /* in second */
2471 	__u32 ld_padding_3;		/* also fix lustre_swab_lmv_desc */
2472 	__u32 ld_padding_4;		/* also fix lustre_swab_lmv_desc */
2473 	struct obd_uuid ld_uuid;
2474 };
2475 
2476 /* LMV layout EA, and it will be stored both in master and slave object */
2477 struct lmv_mds_md_v1 {
2478 	__u32 lmv_magic;
2479 	__u32 lmv_stripe_count;
2480 	__u32 lmv_master_mdt_index;	/* On master object, it is master
2481 					 * MDT index, on slave object, it
2482 					 * is stripe index of the slave obj
2483 					 */
2484 	__u32 lmv_hash_type;		/* dir stripe policy, i.e. indicate
2485 					 * which hash function to be used,
2486 					 * Note: only lower 16 bits is being
2487 					 * used for now. Higher 16 bits will
2488 					 * be used to mark the object status,
2489 					 * for example migrating or dead.
2490 					 */
2491 	__u32 lmv_layout_version;	/* Used for directory restriping */
2492 	__u32 lmv_padding1;
2493 	__u64 lmv_padding2;
2494 	__u64 lmv_padding3;
2495 	char lmv_pool_name[LOV_MAXPOOLNAME + 1];/* pool name */
2496 	struct lu_fid lmv_stripe_fids[0];	/* FIDs for each stripe */
2497 };
2498 
2499 #define LMV_MAGIC_V1	 0x0CD20CD0	/* normal stripe lmv magic */
2500 #define LMV_MAGIC	 LMV_MAGIC_V1
2501 
2502 /* #define LMV_USER_MAGIC 0x0CD30CD0 */
2503 #define LMV_MAGIC_STRIPE 0x0CD40CD0	/* magic for dir sub_stripe */
2504 
2505 /*
2506  *Right now only the lower part(0-16bits) of lmv_hash_type is being used,
2507  * and the higher part will be the flag to indicate the status of object,
2508  * for example the object is being migrated. And the hash function
2509  * might be interpreted differently with different flags.
2510  */
2511 #define LMV_HASH_TYPE_MASK		0x0000ffff
2512 
2513 #define LMV_HASH_FLAG_MIGRATION		0x80000000
2514 #define LMV_HASH_FLAG_DEAD		0x40000000
2515 
2516 /**
2517  * The FNV-1a hash algorithm is as follows:
2518  *     hash = FNV_offset_basis
2519  *     for each octet_of_data to be hashed
2520  *             hash = hash XOR octet_of_data
2521  *             hash = hash × FNV_prime
2522  *     return hash
2523  * http://en.wikipedia.org/wiki/Fowler–Noll–Vo_hash_function#FNV-1a_hash
2524  *
2525  * http://www.isthe.com/chongo/tech/comp/fnv/index.html#FNV-reference-source
2526  * FNV_prime is 2^40 + 2^8 + 0xb3 = 0x100000001b3ULL
2527  **/
2528 #define LUSTRE_FNV_1A_64_PRIME		0x100000001b3ULL
2529 #define LUSTRE_FNV_1A_64_OFFSET_BIAS	0xcbf29ce484222325ULL
lustre_hash_fnv_1a_64(const void * buf,size_t size)2530 static inline __u64 lustre_hash_fnv_1a_64(const void *buf, size_t size)
2531 {
2532 	__u64 hash = LUSTRE_FNV_1A_64_OFFSET_BIAS;
2533 	const unsigned char *p = buf;
2534 	size_t i;
2535 
2536 	for (i = 0; i < size; i++) {
2537 		hash ^= p[i];
2538 		hash *= LUSTRE_FNV_1A_64_PRIME;
2539 	}
2540 
2541 	return hash;
2542 }
2543 
2544 union lmv_mds_md {
2545 	__u32			lmv_magic;
2546 	struct lmv_mds_md_v1	lmv_md_v1;
2547 	struct lmv_user_md	lmv_user_md;
2548 };
2549 
2550 void lustre_swab_lmv_mds_md(union lmv_mds_md *lmm);
2551 
lmv_mds_md_size(int stripe_count,unsigned int lmm_magic)2552 static inline ssize_t lmv_mds_md_size(int stripe_count, unsigned int lmm_magic)
2553 {
2554 	ssize_t len = -EINVAL;
2555 
2556 	switch (lmm_magic) {
2557 	case LMV_MAGIC_V1: {
2558 		struct lmv_mds_md_v1 *lmm1;
2559 
2560 		len = sizeof(*lmm1);
2561 		len += stripe_count * sizeof(lmm1->lmv_stripe_fids[0]);
2562 		break; }
2563 	default:
2564 		break;
2565 	}
2566 	return len;
2567 }
2568 
lmv_mds_md_stripe_count_get(const union lmv_mds_md * lmm)2569 static inline int lmv_mds_md_stripe_count_get(const union lmv_mds_md *lmm)
2570 {
2571 	switch (le32_to_cpu(lmm->lmv_magic)) {
2572 	case LMV_MAGIC_V1:
2573 		return le32_to_cpu(lmm->lmv_md_v1.lmv_stripe_count);
2574 	case LMV_USER_MAGIC:
2575 		return le32_to_cpu(lmm->lmv_user_md.lum_stripe_count);
2576 	default:
2577 		return -EINVAL;
2578 	}
2579 }
2580 
lmv_mds_md_stripe_count_set(union lmv_mds_md * lmm,unsigned int stripe_count)2581 static inline int lmv_mds_md_stripe_count_set(union lmv_mds_md *lmm,
2582 					      unsigned int stripe_count)
2583 {
2584 	int rc = 0;
2585 
2586 	switch (le32_to_cpu(lmm->lmv_magic)) {
2587 	case LMV_MAGIC_V1:
2588 		lmm->lmv_md_v1.lmv_stripe_count = cpu_to_le32(stripe_count);
2589 		break;
2590 	case LMV_USER_MAGIC:
2591 		lmm->lmv_user_md.lum_stripe_count = cpu_to_le32(stripe_count);
2592 		break;
2593 	default:
2594 		rc = -EINVAL;
2595 		break;
2596 	}
2597 	return rc;
2598 }
2599 
2600 enum fld_rpc_opc {
2601 	FLD_QUERY	= 900,
2602 	FLD_READ	= 901,
2603 	FLD_LAST_OPC,
2604 	FLD_FIRST_OPC	= FLD_QUERY
2605 };
2606 
2607 enum seq_rpc_opc {
2608 	SEQ_QUERY		       = 700,
2609 	SEQ_LAST_OPC,
2610 	SEQ_FIRST_OPC		   = SEQ_QUERY
2611 };
2612 
2613 enum seq_op {
2614 	SEQ_ALLOC_SUPER = 0,
2615 	SEQ_ALLOC_META = 1
2616 };
2617 
2618 enum fld_op {
2619 	FLD_CREATE = 0,
2620 	FLD_DELETE = 1,
2621 	FLD_LOOKUP = 2,
2622 };
2623 
2624 /*
2625  *  LOV data structures
2626  */
2627 
2628 #define LOV_MAX_UUID_BUFFER_SIZE  8192
2629 /* The size of the buffer the lov/mdc reserves for the
2630  * array of UUIDs returned by the MDS.  With the current
2631  * protocol, this will limit the max number of OSTs per LOV
2632  */
2633 
2634 #define LOV_DESC_MAGIC 0xB0CCDE5C
2635 #define LOV_DESC_QOS_MAXAGE_DEFAULT 5  /* Seconds */
2636 #define LOV_DESC_STRIPE_SIZE_DEFAULT (1 << LNET_MTU_BITS)
2637 
2638 /* LOV settings descriptor (should only contain static info) */
2639 struct lov_desc {
2640 	__u32 ld_tgt_count;		/* how many OBD's */
2641 	__u32 ld_active_tgt_count;	/* how many active */
2642 	__u32 ld_default_stripe_count;  /* how many objects are used */
2643 	__u32 ld_pattern;		/* default PATTERN_RAID0 */
2644 	__u64 ld_default_stripe_size;   /* in bytes */
2645 	__u64 ld_default_stripe_offset; /* in bytes */
2646 	__u32 ld_padding_0;		/* unused */
2647 	__u32 ld_qos_maxage;		/* in second */
2648 	__u32 ld_padding_1;		/* also fix lustre_swab_lov_desc */
2649 	__u32 ld_padding_2;		/* also fix lustre_swab_lov_desc */
2650 	struct obd_uuid ld_uuid;
2651 };
2652 
2653 #define ld_magic ld_active_tgt_count       /* for swabbing from llogs */
2654 
2655 void lustre_swab_lov_desc(struct lov_desc *ld);
2656 
2657 /*
2658  *   LDLM requests:
2659  */
2660 /* opcodes -- MUST be distinct from OST/MDS opcodes */
2661 enum ldlm_cmd {
2662 	LDLM_ENQUEUE     = 101,
2663 	LDLM_CONVERT     = 102,
2664 	LDLM_CANCEL      = 103,
2665 	LDLM_BL_CALLBACK = 104,
2666 	LDLM_CP_CALLBACK = 105,
2667 	LDLM_GL_CALLBACK = 106,
2668 	LDLM_SET_INFO    = 107,
2669 	LDLM_LAST_OPC
2670 };
2671 #define LDLM_FIRST_OPC LDLM_ENQUEUE
2672 
2673 #define RES_NAME_SIZE 4
2674 struct ldlm_res_id {
2675 	__u64 name[RES_NAME_SIZE];
2676 };
2677 
2678 #define DLDLMRES	"[%#llx:%#llx:%#llx].%llx"
2679 #define PLDLMRES(res)	(res)->lr_name.name[0], (res)->lr_name.name[1], \
2680 			(res)->lr_name.name[2], (res)->lr_name.name[3]
2681 
ldlm_res_eq(const struct ldlm_res_id * res0,const struct ldlm_res_id * res1)2682 static inline bool ldlm_res_eq(const struct ldlm_res_id *res0,
2683 			       const struct ldlm_res_id *res1)
2684 {
2685 	return !memcmp(res0, res1, sizeof(*res0));
2686 }
2687 
2688 /* lock types */
2689 enum ldlm_mode {
2690 	LCK_MINMODE = 0,
2691 	LCK_EX      = 1,
2692 	LCK_PW      = 2,
2693 	LCK_PR      = 4,
2694 	LCK_CW      = 8,
2695 	LCK_CR      = 16,
2696 	LCK_NL      = 32,
2697 	LCK_GROUP   = 64,
2698 	LCK_COS     = 128,
2699 	LCK_MAXMODE
2700 };
2701 
2702 #define LCK_MODE_NUM    8
2703 
2704 enum ldlm_type {
2705 	LDLM_PLAIN     = 10,
2706 	LDLM_EXTENT    = 11,
2707 	LDLM_FLOCK     = 12,
2708 	LDLM_IBITS     = 13,
2709 	LDLM_MAX_TYPE
2710 };
2711 
2712 #define LDLM_MIN_TYPE LDLM_PLAIN
2713 
2714 struct ldlm_extent {
2715 	__u64 start;
2716 	__u64 end;
2717 	__u64 gid;
2718 };
2719 
ldlm_extent_overlap(const struct ldlm_extent * ex1,const struct ldlm_extent * ex2)2720 static inline int ldlm_extent_overlap(const struct ldlm_extent *ex1,
2721 				      const struct ldlm_extent *ex2)
2722 {
2723 	return (ex1->start <= ex2->end) && (ex2->start <= ex1->end);
2724 }
2725 
2726 /* check if @ex1 contains @ex2 */
ldlm_extent_contain(const struct ldlm_extent * ex1,const struct ldlm_extent * ex2)2727 static inline int ldlm_extent_contain(const struct ldlm_extent *ex1,
2728 				      const struct ldlm_extent *ex2)
2729 {
2730 	return (ex1->start <= ex2->start) && (ex1->end >= ex2->end);
2731 }
2732 
2733 struct ldlm_inodebits {
2734 	__u64 bits;
2735 };
2736 
2737 struct ldlm_flock_wire {
2738 	__u64 lfw_start;
2739 	__u64 lfw_end;
2740 	__u64 lfw_owner;
2741 	__u32 lfw_padding;
2742 	__u32 lfw_pid;
2743 };
2744 
2745 /* it's important that the fields of the ldlm_extent structure match
2746  * the first fields of the ldlm_flock structure because there is only
2747  * one ldlm_swab routine to process the ldlm_policy_data_t union. if
2748  * this ever changes we will need to swab the union differently based
2749  * on the resource type.
2750  */
2751 
2752 typedef union {
2753 	struct ldlm_extent l_extent;
2754 	struct ldlm_flock_wire l_flock;
2755 	struct ldlm_inodebits l_inodebits;
2756 } ldlm_wire_policy_data_t;
2757 
2758 union ldlm_gl_desc {
2759 	struct ldlm_gl_lquota_desc	lquota_desc;
2760 };
2761 
2762 void lustre_swab_gl_desc(union ldlm_gl_desc *);
2763 
2764 struct ldlm_intent {
2765 	__u64 opc;
2766 };
2767 
2768 void lustre_swab_ldlm_intent(struct ldlm_intent *i);
2769 
2770 struct ldlm_resource_desc {
2771 	enum ldlm_type lr_type;
2772 	__u32 lr_padding;       /* also fix lustre_swab_ldlm_resource_desc */
2773 	struct ldlm_res_id lr_name;
2774 };
2775 
2776 struct ldlm_lock_desc {
2777 	struct ldlm_resource_desc l_resource;
2778 	enum ldlm_mode l_req_mode;
2779 	enum ldlm_mode l_granted_mode;
2780 	ldlm_wire_policy_data_t l_policy_data;
2781 };
2782 
2783 #define LDLM_LOCKREQ_HANDLES 2
2784 #define LDLM_ENQUEUE_CANCEL_OFF 1
2785 
2786 struct ldlm_request {
2787 	__u32 lock_flags;
2788 	__u32 lock_count;
2789 	struct ldlm_lock_desc lock_desc;
2790 	struct lustre_handle lock_handle[LDLM_LOCKREQ_HANDLES];
2791 };
2792 
2793 void lustre_swab_ldlm_request(struct ldlm_request *rq);
2794 
2795 /* If LDLM_ENQUEUE, 1 slot is already occupied, 1 is available.
2796  * Otherwise, 2 are available.
2797  */
2798 #define ldlm_request_bufsize(count, type)				\
2799 ({								      \
2800 	int _avail = LDLM_LOCKREQ_HANDLES;			      \
2801 	_avail -= (type == LDLM_ENQUEUE ? LDLM_ENQUEUE_CANCEL_OFF : 0); \
2802 	sizeof(struct ldlm_request) +				   \
2803 	(count > _avail ? count - _avail : 0) *			 \
2804 	sizeof(struct lustre_handle);				   \
2805 })
2806 
2807 struct ldlm_reply {
2808 	__u32 lock_flags;
2809 	__u32 lock_padding;     /* also fix lustre_swab_ldlm_reply */
2810 	struct ldlm_lock_desc lock_desc;
2811 	struct lustre_handle lock_handle;
2812 	__u64  lock_policy_res1;
2813 	__u64  lock_policy_res2;
2814 };
2815 
2816 void lustre_swab_ldlm_reply(struct ldlm_reply *r);
2817 
2818 #define ldlm_flags_to_wire(flags)    ((__u32)(flags))
2819 #define ldlm_flags_from_wire(flags)  ((__u64)(flags))
2820 
2821 /*
2822  * Opcodes for mountconf (mgs and mgc)
2823  */
2824 enum mgs_cmd {
2825 	MGS_CONNECT = 250,
2826 	MGS_DISCONNECT,
2827 	MGS_EXCEPTION,	 /* node died, etc. */
2828 	MGS_TARGET_REG,	/* whenever target starts up */
2829 	MGS_TARGET_DEL,
2830 	MGS_SET_INFO,
2831 	MGS_CONFIG_READ,
2832 	MGS_LAST_OPC
2833 };
2834 #define MGS_FIRST_OPC MGS_CONNECT
2835 
2836 #define MGS_PARAM_MAXLEN 1024
2837 #define KEY_SET_INFO "set_info"
2838 
2839 struct mgs_send_param {
2840 	char	     mgs_param[MGS_PARAM_MAXLEN];
2841 };
2842 
2843 /* We pass this info to the MGS so it can write config logs */
2844 #define MTI_NAME_MAXLEN  64
2845 #define MTI_PARAM_MAXLEN 4096
2846 #define MTI_NIDS_MAX     32
2847 struct mgs_target_info {
2848 	__u32	    mti_lustre_ver;
2849 	__u32	    mti_stripe_index;
2850 	__u32	    mti_config_ver;
2851 	__u32	    mti_flags;
2852 	__u32	    mti_nid_count;
2853 	__u32	    mti_instance; /* Running instance of target */
2854 	char	     mti_fsname[MTI_NAME_MAXLEN];
2855 	char	     mti_svname[MTI_NAME_MAXLEN];
2856 	char	     mti_uuid[sizeof(struct obd_uuid)];
2857 	__u64	    mti_nids[MTI_NIDS_MAX];     /* host nids (lnet_nid_t)*/
2858 	char	     mti_params[MTI_PARAM_MAXLEN];
2859 };
2860 
2861 void lustre_swab_mgs_target_info(struct mgs_target_info *oinfo);
2862 
2863 struct mgs_nidtbl_entry {
2864 	__u64	   mne_version;    /* table version of this entry */
2865 	__u32	   mne_instance;   /* target instance # */
2866 	__u32	   mne_index;      /* target index */
2867 	__u32	   mne_length;     /* length of this entry - by bytes */
2868 	__u8	    mne_type;       /* target type LDD_F_SV_TYPE_OST/MDT */
2869 	__u8	    mne_nid_type;   /* type of nid(mbz). for ipv6. */
2870 	__u8	    mne_nid_size;   /* size of each NID, by bytes */
2871 	__u8	    mne_nid_count;  /* # of NIDs in buffer */
2872 	union {
2873 		lnet_nid_t nids[0];     /* variable size buffer for NIDs. */
2874 	} u;
2875 };
2876 
2877 void lustre_swab_mgs_nidtbl_entry(struct mgs_nidtbl_entry *oinfo);
2878 
2879 struct mgs_config_body {
2880 	char     mcb_name[MTI_NAME_MAXLEN]; /* logname */
2881 	__u64    mcb_offset;    /* next index of config log to request */
2882 	__u16    mcb_type;      /* type of log: CONFIG_T_[CONFIG|RECOVER] */
2883 	__u8     mcb_reserved;
2884 	__u8     mcb_bits;      /* bits unit size of config log */
2885 	__u32    mcb_units;     /* # of units for bulk transfer */
2886 };
2887 
2888 void lustre_swab_mgs_config_body(struct mgs_config_body *body);
2889 
2890 struct mgs_config_res {
2891 	__u64    mcr_offset;    /* index of last config log */
2892 	__u64    mcr_size;      /* size of the log */
2893 };
2894 
2895 void lustre_swab_mgs_config_res(struct mgs_config_res *body);
2896 
2897 /* Config marker flags (in config log) */
2898 #define CM_START       0x01
2899 #define CM_END	 0x02
2900 #define CM_SKIP	0x04
2901 #define CM_UPGRADE146  0x08
2902 #define CM_EXCLUDE     0x10
2903 #define CM_START_SKIP (CM_START | CM_SKIP)
2904 
2905 struct cfg_marker {
2906 	__u32	     cm_step;       /* aka config version */
2907 	__u32	     cm_flags;
2908 	__u32	     cm_vers;       /* lustre release version number */
2909 	__u32	     cm_padding;    /* 64 bit align */
2910 	__s64	     cm_createtime; /*when this record was first created */
2911 	__s64	     cm_canceltime; /*when this record is no longer valid*/
2912 	char	      cm_tgtname[MTI_NAME_MAXLEN];
2913 	char	      cm_comment[MTI_NAME_MAXLEN];
2914 };
2915 
2916 void lustre_swab_cfg_marker(struct cfg_marker *marker, int swab, int size);
2917 
2918 /*
2919  * Opcodes for multiple servers.
2920  */
2921 
2922 enum obd_cmd {
2923 	OBD_PING = 400,
2924 	OBD_LOG_CANCEL,
2925 	OBD_QC_CALLBACK,
2926 	OBD_IDX_READ,
2927 	OBD_LAST_OPC
2928 };
2929 #define OBD_FIRST_OPC OBD_PING
2930 
2931 /**
2932  * llog contexts indices.
2933  *
2934  * There is compatibility problem with indexes below, they are not
2935  * continuous and must keep their numbers for compatibility needs.
2936  * See LU-5218 for details.
2937  */
2938 enum llog_ctxt_id {
2939 	LLOG_CONFIG_ORIG_CTXT  =  0,
2940 	LLOG_CONFIG_REPL_CTXT = 1,
2941 	LLOG_MDS_OST_ORIG_CTXT = 2,
2942 	LLOG_MDS_OST_REPL_CTXT = 3, /* kept just to avoid re-assignment */
2943 	LLOG_SIZE_ORIG_CTXT = 4,
2944 	LLOG_SIZE_REPL_CTXT = 5,
2945 	LLOG_TEST_ORIG_CTXT = 8,
2946 	LLOG_TEST_REPL_CTXT = 9, /* kept just to avoid re-assignment */
2947 	LLOG_CHANGELOG_ORIG_CTXT = 12, /**< changelog generation on mdd */
2948 	LLOG_CHANGELOG_REPL_CTXT = 13, /**< changelog access on clients */
2949 	/* for multiple changelog consumers */
2950 	LLOG_CHANGELOG_USER_ORIG_CTXT = 14,
2951 	LLOG_AGENT_ORIG_CTXT = 15, /**< agent requests generation on cdt */
2952 	LLOG_MAX_CTXTS
2953 };
2954 
2955 /** Identifier for a single log object */
2956 struct llog_logid {
2957 	struct ost_id		lgl_oi;
2958 	__u32		   lgl_ogen;
2959 } __packed;
2960 
2961 /** Records written to the CATALOGS list */
2962 #define CATLIST "CATALOGS"
2963 struct llog_catid {
2964 	struct llog_logid       lci_logid;
2965 	__u32		   lci_padding1;
2966 	__u32		   lci_padding2;
2967 	__u32		   lci_padding3;
2968 } __packed;
2969 
2970 /* Log data record types - there is no specific reason that these need to
2971  * be related to the RPC opcodes, but no reason not to (may be handy later?)
2972  */
2973 #define LLOG_OP_MAGIC 0x10600000
2974 #define LLOG_OP_MASK  0xfff00000
2975 
2976 enum llog_op_type {
2977 	LLOG_PAD_MAGIC		= LLOG_OP_MAGIC | 0x00000,
2978 	OST_SZ_REC		= LLOG_OP_MAGIC | 0x00f00,
2979 	/* OST_RAID1_REC	= LLOG_OP_MAGIC | 0x01000, never used */
2980 	MDS_UNLINK_REC		= LLOG_OP_MAGIC | 0x10000 | (MDS_REINT << 8) |
2981 				  REINT_UNLINK, /* obsolete after 2.5.0 */
2982 	MDS_UNLINK64_REC	= LLOG_OP_MAGIC | 0x90000 | (MDS_REINT << 8) |
2983 				  REINT_UNLINK,
2984 	/* MDS_SETATTR_REC	= LLOG_OP_MAGIC | 0x12401, obsolete 1.8.0 */
2985 	MDS_SETATTR64_REC	= LLOG_OP_MAGIC | 0x90000 | (MDS_REINT << 8) |
2986 				  REINT_SETATTR,
2987 	OBD_CFG_REC		= LLOG_OP_MAGIC | 0x20000,
2988 	/* PTL_CFG_REC		= LLOG_OP_MAGIC | 0x30000, obsolete 1.4.0 */
2989 	LLOG_GEN_REC		= LLOG_OP_MAGIC | 0x40000,
2990 	/* LLOG_JOIN_REC	= LLOG_OP_MAGIC | 0x50000, obsolete  1.8.0 */
2991 	CHANGELOG_REC		= LLOG_OP_MAGIC | 0x60000,
2992 	CHANGELOG_USER_REC	= LLOG_OP_MAGIC | 0x70000,
2993 	HSM_AGENT_REC		= LLOG_OP_MAGIC | 0x80000,
2994 	LLOG_HDR_MAGIC		= LLOG_OP_MAGIC | 0x45539,
2995 	LLOG_LOGID_MAGIC	= LLOG_OP_MAGIC | 0x4553b,
2996 };
2997 
2998 #define LLOG_REC_HDR_NEEDS_SWABBING(r) \
2999 	(((r)->lrh_type & __swab32(LLOG_OP_MASK)) == __swab32(LLOG_OP_MAGIC))
3000 
3001 /** Log record header - stored in little endian order.
3002  * Each record must start with this struct, end with a llog_rec_tail,
3003  * and be a multiple of 256 bits in size.
3004  */
3005 struct llog_rec_hdr {
3006 	__u32	lrh_len;
3007 	__u32	lrh_index;
3008 	__u32	lrh_type;
3009 	__u32	lrh_id;
3010 };
3011 
3012 struct llog_rec_tail {
3013 	__u32	lrt_len;
3014 	__u32	lrt_index;
3015 };
3016 
3017 /* Where data follow just after header */
3018 #define REC_DATA(ptr)						\
3019 	((void *)((char *)ptr + sizeof(struct llog_rec_hdr)))
3020 
3021 #define REC_DATA_LEN(rec)					\
3022 	(rec->lrh_len - sizeof(struct llog_rec_hdr) -		\
3023 	 sizeof(struct llog_rec_tail))
3024 
3025 struct llog_logid_rec {
3026 	struct llog_rec_hdr	lid_hdr;
3027 	struct llog_logid	lid_id;
3028 	__u32			lid_padding1;
3029 	__u64			lid_padding2;
3030 	__u64			lid_padding3;
3031 	struct llog_rec_tail	lid_tail;
3032 } __packed;
3033 
3034 struct llog_unlink_rec {
3035 	struct llog_rec_hdr	lur_hdr;
3036 	__u64			lur_oid;
3037 	__u32			lur_oseq;
3038 	__u32			lur_count;
3039 	struct llog_rec_tail	lur_tail;
3040 } __packed;
3041 
3042 struct llog_unlink64_rec {
3043 	struct llog_rec_hdr	lur_hdr;
3044 	struct lu_fid		lur_fid;
3045 	__u32			lur_count; /* to destroy the lost precreated */
3046 	__u32			lur_padding1;
3047 	__u64			lur_padding2;
3048 	__u64			lur_padding3;
3049 	struct llog_rec_tail    lur_tail;
3050 } __packed;
3051 
3052 struct llog_setattr64_rec {
3053 	struct llog_rec_hdr	lsr_hdr;
3054 	struct ost_id		lsr_oi;
3055 	__u32			lsr_uid;
3056 	__u32			lsr_uid_h;
3057 	__u32			lsr_gid;
3058 	__u32			lsr_gid_h;
3059 	__u64			lsr_valid;
3060 	struct llog_rec_tail    lsr_tail;
3061 } __packed;
3062 
3063 struct llog_size_change_rec {
3064 	struct llog_rec_hdr	lsc_hdr;
3065 	struct ll_fid		lsc_fid;
3066 	__u32			lsc_ioepoch;
3067 	__u32			lsc_padding1;
3068 	__u64			lsc_padding2;
3069 	__u64			lsc_padding3;
3070 	struct llog_rec_tail	lsc_tail;
3071 } __packed;
3072 
3073 /* changelog llog name, needed by client replicators */
3074 #define CHANGELOG_CATALOG "changelog_catalog"
3075 
3076 struct changelog_setinfo {
3077 	__u64 cs_recno;
3078 	__u32 cs_id;
3079 } __packed;
3080 
3081 /** changelog record */
3082 struct llog_changelog_rec {
3083 	struct llog_rec_hdr	cr_hdr;
3084 	struct changelog_rec	cr;		/**< Variable length field */
3085 	struct llog_rec_tail	cr_do_not_use;	/**< for_sizezof_only */
3086 } __packed;
3087 
3088 struct llog_changelog_user_rec {
3089 	struct llog_rec_hdr   cur_hdr;
3090 	__u32		 cur_id;
3091 	__u32		 cur_padding;
3092 	__u64		 cur_endrec;
3093 	struct llog_rec_tail  cur_tail;
3094 } __packed;
3095 
3096 enum agent_req_status {
3097 	ARS_WAITING,
3098 	ARS_STARTED,
3099 	ARS_FAILED,
3100 	ARS_CANCELED,
3101 	ARS_SUCCEED,
3102 };
3103 
agent_req_status2name(const enum agent_req_status ars)3104 static inline const char *agent_req_status2name(const enum agent_req_status ars)
3105 {
3106 	switch (ars) {
3107 	case ARS_WAITING:
3108 		return "WAITING";
3109 	case ARS_STARTED:
3110 		return "STARTED";
3111 	case ARS_FAILED:
3112 		return "FAILED";
3113 	case ARS_CANCELED:
3114 		return "CANCELED";
3115 	case ARS_SUCCEED:
3116 		return "SUCCEED";
3117 	default:
3118 		return "UNKNOWN";
3119 	}
3120 }
3121 
agent_req_in_final_state(enum agent_req_status ars)3122 static inline bool agent_req_in_final_state(enum agent_req_status ars)
3123 {
3124 	return ((ars == ARS_SUCCEED) || (ars == ARS_FAILED) ||
3125 		(ars == ARS_CANCELED));
3126 }
3127 
3128 struct llog_agent_req_rec {
3129 	struct llog_rec_hdr	arr_hdr;	/**< record header */
3130 	__u32			arr_status;	/**< status of the request */
3131 						/* must match enum
3132 						 * agent_req_status
3133 						 */
3134 	__u32			arr_archive_id;	/**< backend archive number */
3135 	__u64			arr_flags;	/**< req flags */
3136 	__u64			arr_compound_id;/**< compound cookie */
3137 	__u64			arr_req_create;	/**< req. creation time */
3138 	__u64			arr_req_change;	/**< req. status change time */
3139 	struct hsm_action_item	arr_hai;	/**< req. to the agent */
3140 	struct llog_rec_tail	arr_tail;   /**< record tail for_sizezof_only */
3141 } __packed;
3142 
3143 /* Old llog gen for compatibility */
3144 struct llog_gen {
3145 	__u64 mnt_cnt;
3146 	__u64 conn_cnt;
3147 } __packed;
3148 
3149 struct llog_gen_rec {
3150 	struct llog_rec_hdr	lgr_hdr;
3151 	struct llog_gen		lgr_gen;
3152 	__u64			padding1;
3153 	__u64			padding2;
3154 	__u64			padding3;
3155 	struct llog_rec_tail	lgr_tail;
3156 };
3157 
3158 /* On-disk header structure of each log object, stored in little endian order */
3159 #define LLOG_CHUNK_SIZE	 8192
3160 #define LLOG_HEADER_SIZE	(96)
3161 #define LLOG_BITMAP_BYTES       (LLOG_CHUNK_SIZE - LLOG_HEADER_SIZE)
3162 
3163 #define LLOG_MIN_REC_SIZE       (24) /* round(llog_rec_hdr + llog_rec_tail) */
3164 
3165 /* flags for the logs */
3166 enum llog_flag {
3167 	LLOG_F_ZAP_WHEN_EMPTY	= 0x1,
3168 	LLOG_F_IS_CAT		= 0x2,
3169 	LLOG_F_IS_PLAIN		= 0x4,
3170 	LLOG_F_EXT_JOBID        = BIT(3),
3171 
3172 	LLOG_F_EXT_MASK = LLOG_F_EXT_JOBID,
3173 };
3174 
3175 struct llog_log_hdr {
3176 	struct llog_rec_hdr     llh_hdr;
3177 	__s64		   llh_timestamp;
3178 	__u32		   llh_count;
3179 	__u32		   llh_bitmap_offset;
3180 	__u32		   llh_size;
3181 	__u32		   llh_flags;
3182 	__u32		   llh_cat_idx;
3183 	/* for a catalog the first plain slot is next to it */
3184 	struct obd_uuid	 llh_tgtuuid;
3185 	__u32		   llh_reserved[LLOG_HEADER_SIZE / sizeof(__u32) - 23];
3186 	__u32		   llh_bitmap[LLOG_BITMAP_BYTES / sizeof(__u32)];
3187 	struct llog_rec_tail    llh_tail;
3188 } __packed;
3189 
3190 #define LLOG_BITMAP_SIZE(llh)  (__u32)((llh->llh_hdr.lrh_len -		\
3191 					llh->llh_bitmap_offset -	\
3192 					sizeof(llh->llh_tail)) * 8)
3193 
3194 /** log cookies are used to reference a specific log file and a record
3195  * therein
3196  */
3197 struct llog_cookie {
3198 	struct llog_logid       lgc_lgl;
3199 	__u32		   lgc_subsys;
3200 	__u32		   lgc_index;
3201 	__u32		   lgc_padding;
3202 } __packed;
3203 
3204 /** llog protocol */
3205 enum llogd_rpc_ops {
3206 	LLOG_ORIGIN_HANDLE_CREATE       = 501,
3207 	LLOG_ORIGIN_HANDLE_NEXT_BLOCK   = 502,
3208 	LLOG_ORIGIN_HANDLE_READ_HEADER  = 503,
3209 	LLOG_ORIGIN_HANDLE_WRITE_REC    = 504,
3210 	LLOG_ORIGIN_HANDLE_CLOSE	= 505,
3211 	LLOG_ORIGIN_CONNECT		= 506,
3212 	LLOG_CATINFO			= 507,  /* deprecated */
3213 	LLOG_ORIGIN_HANDLE_PREV_BLOCK   = 508,
3214 	LLOG_ORIGIN_HANDLE_DESTROY      = 509,  /* for destroy llog object*/
3215 	LLOG_LAST_OPC,
3216 	LLOG_FIRST_OPC		  = LLOG_ORIGIN_HANDLE_CREATE
3217 };
3218 
3219 struct llogd_body {
3220 	struct llog_logid  lgd_logid;
3221 	__u32 lgd_ctxt_idx;
3222 	__u32 lgd_llh_flags;
3223 	__u32 lgd_index;
3224 	__u32 lgd_saved_index;
3225 	__u32 lgd_len;
3226 	__u64 lgd_cur_offset;
3227 } __packed;
3228 
3229 struct llogd_conn_body {
3230 	struct llog_gen	 lgdc_gen;
3231 	struct llog_logid       lgdc_logid;
3232 	__u32		   lgdc_ctxt_idx;
3233 } __packed;
3234 
3235 /* Note: 64-bit types are 64-bit aligned in structure */
3236 struct obdo {
3237 	__u64		o_valid;	/* hot fields in this obdo */
3238 	struct ost_id	o_oi;
3239 	__u64		o_parent_seq;
3240 	__u64		o_size;	 /* o_size-o_blocks == ost_lvb */
3241 	__s64		o_mtime;
3242 	__s64		o_atime;
3243 	__s64		o_ctime;
3244 	__u64		o_blocks;       /* brw: cli sent cached bytes */
3245 	__u64		o_grant;
3246 
3247 	/* 32-bit fields start here: keep an even number of them via padding */
3248 	__u32		o_blksize;      /* optimal IO blocksize */
3249 	__u32		o_mode;	 /* brw: cli sent cache remain */
3250 	__u32		o_uid;
3251 	__u32		o_gid;
3252 	__u32		o_flags;
3253 	__u32		o_nlink;	/* brw: checksum */
3254 	__u32		o_parent_oid;
3255 	__u32		o_misc;		/* brw: o_dropped */
3256 
3257 	__u64		   o_ioepoch;      /* epoch in ost writes */
3258 	__u32		   o_stripe_idx;   /* holds stripe idx */
3259 	__u32		   o_parent_ver;
3260 	struct lustre_handle    o_handle;  /* brw: lock handle to prolong locks
3261 					    */
3262 	struct llog_cookie      o_lcookie; /* destroy: unlink cookie from MDS
3263 					    */
3264 	__u32			o_uid_h;
3265 	__u32			o_gid_h;
3266 
3267 	__u64			o_data_version; /* getattr: sum of iversion for
3268 						 * each stripe.
3269 						 * brw: grant space consumed on
3270 						 * the client for the write
3271 						 */
3272 	__u64			o_padding_4;
3273 	__u64			o_padding_5;
3274 	__u64			o_padding_6;
3275 };
3276 
3277 #define o_dirty   o_blocks
3278 #define o_undirty o_mode
3279 #define o_dropped o_misc
3280 #define o_cksum   o_nlink
3281 #define o_grant_used o_data_version
3282 
lustre_set_wire_obdo(const struct obd_connect_data * ocd,struct obdo * wobdo,const struct obdo * lobdo)3283 static inline void lustre_set_wire_obdo(const struct obd_connect_data *ocd,
3284 					struct obdo *wobdo,
3285 					const struct obdo *lobdo)
3286 {
3287 	*wobdo = *lobdo;
3288 	wobdo->o_flags &= ~OBD_FL_LOCAL_MASK;
3289 	if (!ocd)
3290 		return;
3291 
3292 	if (unlikely(!(ocd->ocd_connect_flags & OBD_CONNECT_FID)) &&
3293 	    fid_seq_is_echo(ostid_seq(&lobdo->o_oi))) {
3294 		/* Currently OBD_FL_OSTID will only be used when 2.4 echo
3295 		 * client communicate with pre-2.4 server
3296 		 */
3297 		wobdo->o_oi.oi.oi_id = fid_oid(&lobdo->o_oi.oi_fid);
3298 		wobdo->o_oi.oi.oi_seq = fid_seq(&lobdo->o_oi.oi_fid);
3299 	}
3300 }
3301 
lustre_get_wire_obdo(const struct obd_connect_data * ocd,struct obdo * lobdo,const struct obdo * wobdo)3302 static inline void lustre_get_wire_obdo(const struct obd_connect_data *ocd,
3303 					struct obdo *lobdo,
3304 					const struct obdo *wobdo)
3305 {
3306 	__u32 local_flags = 0;
3307 
3308 	if (lobdo->o_valid & OBD_MD_FLFLAGS)
3309 		local_flags = lobdo->o_flags & OBD_FL_LOCAL_MASK;
3310 
3311 	*lobdo = *wobdo;
3312 	if (local_flags != 0) {
3313 		lobdo->o_valid |= OBD_MD_FLFLAGS;
3314 		lobdo->o_flags &= ~OBD_FL_LOCAL_MASK;
3315 		lobdo->o_flags |= local_flags;
3316 	}
3317 	if (!ocd)
3318 		return;
3319 
3320 	if (unlikely(!(ocd->ocd_connect_flags & OBD_CONNECT_FID)) &&
3321 	    fid_seq_is_echo(wobdo->o_oi.oi.oi_seq)) {
3322 		/* see above */
3323 		lobdo->o_oi.oi_fid.f_seq = wobdo->o_oi.oi.oi_seq;
3324 		lobdo->o_oi.oi_fid.f_oid = wobdo->o_oi.oi.oi_id;
3325 		lobdo->o_oi.oi_fid.f_ver = 0;
3326 	}
3327 }
3328 
3329 /* request structure for OST's */
3330 struct ost_body {
3331 	struct  obdo oa;
3332 };
3333 
3334 /* Key for FIEMAP to be used in get_info calls */
3335 struct ll_fiemap_info_key {
3336 	char    name[8];
3337 	struct  obdo oa;
3338 	struct  ll_user_fiemap fiemap;
3339 };
3340 
3341 void lustre_swab_ost_body(struct ost_body *b);
3342 void lustre_swab_ost_last_id(__u64 *id);
3343 void lustre_swab_fiemap(struct ll_user_fiemap *fiemap);
3344 
3345 void lustre_swab_lov_user_md_v1(struct lov_user_md_v1 *lum);
3346 void lustre_swab_lov_user_md_v3(struct lov_user_md_v3 *lum);
3347 void lustre_swab_lov_user_md_objects(struct lov_user_ost_data *lod,
3348 				     int stripe_count);
3349 void lustre_swab_lov_mds_md(struct lov_mds_md *lmm);
3350 
3351 /* llog_swab.c */
3352 void lustre_swab_llogd_body(struct llogd_body *d);
3353 void lustre_swab_llog_hdr(struct llog_log_hdr *h);
3354 void lustre_swab_llogd_conn_body(struct llogd_conn_body *d);
3355 void lustre_swab_llog_rec(struct llog_rec_hdr *rec);
3356 
3357 struct lustre_cfg;
3358 void lustre_swab_lustre_cfg(struct lustre_cfg *lcfg);
3359 
3360 /* Functions for dumping PTLRPC fields */
3361 void dump_rniobuf(struct niobuf_remote *rnb);
3362 void dump_ioo(struct obd_ioobj *nb);
3363 void dump_ost_body(struct ost_body *ob);
3364 void dump_rcs(__u32 *rc);
3365 
3366 /* security opcodes */
3367 enum sec_cmd {
3368 	SEC_CTX_INIT	    = 801,
3369 	SEC_CTX_INIT_CONT       = 802,
3370 	SEC_CTX_FINI	    = 803,
3371 	SEC_LAST_OPC,
3372 	SEC_FIRST_OPC	   = SEC_CTX_INIT
3373 };
3374 
3375 /*
3376  * capa related definitions
3377  */
3378 #define CAPA_HMAC_MAX_LEN       64
3379 #define CAPA_HMAC_KEY_MAX_LEN   56
3380 
3381 /* NB take care when changing the sequence of elements this struct,
3382  * because the offset info is used in find_capa()
3383  */
3384 struct lustre_capa {
3385 	struct lu_fid   lc_fid;	 /** fid */
3386 	__u64	   lc_opc;	 /** operations allowed */
3387 	__u64	   lc_uid;	 /** file owner */
3388 	__u64	   lc_gid;	 /** file group */
3389 	__u32	   lc_flags;       /** HMAC algorithm & flags */
3390 	__u32	   lc_keyid;       /** key# used for the capability */
3391 	__u32	   lc_timeout;     /** capa timeout value (sec) */
3392 /* FIXME: y2038 time_t overflow: */
3393 	__u32	   lc_expiry;      /** expiry time (sec) */
3394 	__u8	    lc_hmac[CAPA_HMAC_MAX_LEN];   /** HMAC */
3395 } __packed;
3396 
3397 void lustre_swab_lustre_capa(struct lustre_capa *c);
3398 
3399 /** lustre_capa::lc_opc */
3400 enum {
3401 	CAPA_OPC_BODY_WRITE   = 1 << 0,  /**< write object data */
3402 	CAPA_OPC_BODY_READ    = 1 << 1,  /**< read object data */
3403 	CAPA_OPC_INDEX_LOOKUP = 1 << 2,  /**< lookup object fid */
3404 	CAPA_OPC_INDEX_INSERT = 1 << 3,  /**< insert object fid */
3405 	CAPA_OPC_INDEX_DELETE = 1 << 4,  /**< delete object fid */
3406 	CAPA_OPC_OSS_WRITE    = 1 << 5,  /**< write oss object data */
3407 	CAPA_OPC_OSS_READ     = 1 << 6,  /**< read oss object data */
3408 	CAPA_OPC_OSS_TRUNC    = 1 << 7,  /**< truncate oss object */
3409 	CAPA_OPC_OSS_DESTROY  = 1 << 8,  /**< destroy oss object */
3410 	CAPA_OPC_META_WRITE   = 1 << 9,  /**< write object meta data */
3411 	CAPA_OPC_META_READ    = 1 << 10, /**< read object meta data */
3412 };
3413 
3414 #define CAPA_OPC_OSS_RW (CAPA_OPC_OSS_READ | CAPA_OPC_OSS_WRITE)
3415 #define CAPA_OPC_MDS_ONLY						   \
3416 	(CAPA_OPC_BODY_WRITE | CAPA_OPC_BODY_READ | CAPA_OPC_INDEX_LOOKUP | \
3417 	 CAPA_OPC_INDEX_INSERT | CAPA_OPC_INDEX_DELETE)
3418 #define CAPA_OPC_OSS_ONLY						   \
3419 	(CAPA_OPC_OSS_WRITE | CAPA_OPC_OSS_READ | CAPA_OPC_OSS_TRUNC |      \
3420 	 CAPA_OPC_OSS_DESTROY)
3421 #define CAPA_OPC_MDS_DEFAULT ~CAPA_OPC_OSS_ONLY
3422 #define CAPA_OPC_OSS_DEFAULT ~(CAPA_OPC_MDS_ONLY | CAPA_OPC_OSS_ONLY)
3423 
3424 struct lustre_capa_key {
3425 	__u64   lk_seq;       /**< mds# */
3426 	__u32   lk_keyid;     /**< key# */
3427 	__u32   lk_padding;
3428 	__u8    lk_key[CAPA_HMAC_KEY_MAX_LEN];    /**< key */
3429 } __packed;
3430 
3431 /** The link ea holds 1 \a link_ea_entry for each hardlink */
3432 #define LINK_EA_MAGIC 0x11EAF1DFUL
3433 struct link_ea_header {
3434 	__u32 leh_magic;
3435 	__u32 leh_reccount;
3436 	__u64 leh_len;      /* total size */
3437 	/* future use */
3438 	__u32 padding1;
3439 	__u32 padding2;
3440 };
3441 
3442 /** Hardlink data is name and parent fid.
3443  * Stored in this crazy struct for maximum packing and endian-neutrality
3444  */
3445 struct link_ea_entry {
3446 	/** __u16 stored big-endian, unaligned */
3447 	unsigned char      lee_reclen[2];
3448 	unsigned char      lee_parent_fid[sizeof(struct lu_fid)];
3449 	char	       lee_name[0];
3450 } __packed;
3451 
3452 /** fid2path request/reply structure */
3453 struct getinfo_fid2path {
3454 	struct lu_fid   gf_fid;
3455 	__u64	   gf_recno;
3456 	__u32	   gf_linkno;
3457 	__u32	   gf_pathlen;
3458 	char	    gf_path[0];
3459 } __packed;
3460 
3461 void lustre_swab_fid2path(struct getinfo_fid2path *gf);
3462 
3463 /** path2parent request/reply structures */
3464 struct getparent {
3465 	struct lu_fid	gp_fid;		/**< parent FID */
3466 	__u32		gp_linkno;	/**< hardlink number */
3467 	__u32		gp_name_size;	/**< size of the name field */
3468 	char		gp_name[0];	/**< zero-terminated link name */
3469 } __packed;
3470 
3471 enum {
3472 	LAYOUT_INTENT_ACCESS    = 0,
3473 	LAYOUT_INTENT_READ      = 1,
3474 	LAYOUT_INTENT_WRITE     = 2,
3475 	LAYOUT_INTENT_GLIMPSE   = 3,
3476 	LAYOUT_INTENT_TRUNC     = 4,
3477 	LAYOUT_INTENT_RELEASE   = 5,
3478 	LAYOUT_INTENT_RESTORE   = 6
3479 };
3480 
3481 /* enqueue layout lock with intent */
3482 struct layout_intent {
3483 	__u32 li_opc; /* intent operation for enqueue, read, write etc */
3484 	__u32 li_flags;
3485 	__u64 li_start;
3486 	__u64 li_end;
3487 };
3488 
3489 void lustre_swab_layout_intent(struct layout_intent *li);
3490 
3491 /**
3492  * On the wire version of hsm_progress structure.
3493  *
3494  * Contains the userspace hsm_progress and some internal fields.
3495  */
3496 struct hsm_progress_kernel {
3497 	/* Field taken from struct hsm_progress */
3498 	struct lu_fid		hpk_fid;
3499 	__u64			hpk_cookie;
3500 	struct hsm_extent	hpk_extent;
3501 	__u16			hpk_flags;
3502 	__u16			hpk_errval; /* positive val */
3503 	__u32			hpk_padding1;
3504 	/* Additional fields */
3505 	__u64			hpk_data_version;
3506 	__u64			hpk_padding2;
3507 } __packed;
3508 
3509 void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
3510 void lustre_swab_hsm_current_action(struct hsm_current_action *action);
3511 void lustre_swab_hsm_progress_kernel(struct hsm_progress_kernel *hpk);
3512 void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
3513 void lustre_swab_hsm_user_item(struct hsm_user_item *hui);
3514 void lustre_swab_hsm_request(struct hsm_request *hr);
3515 
3516 /** layout swap request structure
3517  * fid1 and fid2 are in mdt_body
3518  */
3519 struct mdc_swap_layouts {
3520 	__u64	   msl_flags;
3521 } __packed;
3522 
3523 void lustre_swab_swap_layouts(struct mdc_swap_layouts *msl);
3524 
3525 struct close_data {
3526 	struct lustre_handle	cd_handle;
3527 	struct lu_fid		cd_fid;
3528 	__u64			cd_data_version;
3529 	__u64			cd_reserved[8];
3530 };
3531 
3532 void lustre_swab_close_data(struct close_data *data);
3533 
3534 #endif
3535 /** @} lustreidl */
3536