• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/include/lustre/lustre_idl.h
37  *
38  * Lustre wire protocol definitions.
39  */
40 
41 /** \defgroup lustreidl lustreidl
42  *
43  * Lustre wire protocol definitions.
44  *
45  * ALL structs passing over the wire should be declared here.  Structs
46  * that are used in interfaces with userspace should go in lustre_user.h.
47  *
48  * All structs being declared here should be built from simple fixed-size
49  * types (__u8, __u16, __u32, __u64) or be built from other types or
50  * structs also declared in this file.  Similarly, all flags and magic
51  * values in those structs should also be declared here.  This ensures
52  * that the Lustre wire protocol is not influenced by external dependencies.
53  *
54  * The only other acceptable items in this file are VERY SIMPLE accessor
55  * functions to avoid callers grubbing inside the structures, and the
56  * prototypes of the swabber functions for each struct.  Nothing that
57  * depends on external functions or definitions should be in here.
58  *
59  * Structs must be properly aligned to put 64-bit values on an 8-byte
60  * boundary.  Any structs being added here must also be added to
61  * utils/wirecheck.c and "make newwiretest" run to regenerate the
62  * utils/wiretest.c sources.  This allows us to verify that wire structs
63  * have the proper alignment/size on all architectures.
64  *
65  * DO NOT CHANGE any of the structs, flags, values declared here and used
66  * in released Lustre versions.  Some structs may have padding fields that
67  * can be used.  Some structs might allow addition at the end (verify this
68  * in the code to ensure that new/old clients that see this larger struct
69  * do not fail, otherwise you need to implement protocol compatibility).
70  *
71  * We assume all nodes are either little-endian or big-endian, and we
72  * always send messages in the sender's native format.  The receiver
73  * detects the message format by checking the 'magic' field of the message
74  * (see lustre_msg_swabbed() below).
75  *
76  * Each wire type has corresponding 'lustre_swab_xxxtypexxx()' routines,
77  * implemented either here, inline (trivial implementations) or in
78  * ptlrpc/pack_generic.c.  These 'swabbers' convert the type from "other"
79  * endian, in-place in the message buffer.
80  *
81  * A swabber takes a single pointer argument.  The caller must already have
82  * verified that the length of the message buffer >= sizeof (type).
83  *
84  * For variable length types, a second 'lustre_swab_v_xxxtypexxx()' routine
85  * may be defined that swabs just the variable part, after the caller has
86  * verified that the message buffer is large enough.
87  *
88  * @{
89  */
90 
91 #ifndef _LUSTRE_IDL_H_
92 #define _LUSTRE_IDL_H_
93 
94 #include "../../../include/linux/libcfs/libcfs.h"
95 #include "../../../include/linux/lnet/types.h"
96 
97 /* Defn's shared with user-space. */
98 #include "lustre_user.h"
99 #include "lustre_errno.h"
100 
101 /*
102  *  GENERAL STUFF
103  */
104 /* FOO_REQUEST_PORTAL is for incoming requests on the FOO
105  * FOO_REPLY_PORTAL   is for incoming replies on the FOO
106  * FOO_BULK_PORTAL    is for incoming bulk on the FOO
107  */
108 
109 /* Lustre service names are following the format
110  * service name + MDT + seq name
111  */
112 #define LUSTRE_MDT_MAXNAMELEN	80
113 
114 #define CONNMGR_REQUEST_PORTAL	  1
115 #define CONNMGR_REPLY_PORTAL	    2
116 //#define OSC_REQUEST_PORTAL	    3
117 #define OSC_REPLY_PORTAL		4
118 //#define OSC_BULK_PORTAL	       5
119 #define OST_IO_PORTAL		   6
120 #define OST_CREATE_PORTAL	       7
121 #define OST_BULK_PORTAL		 8
122 //#define MDC_REQUEST_PORTAL	    9
123 #define MDC_REPLY_PORTAL	       10
124 //#define MDC_BULK_PORTAL	      11
125 #define MDS_REQUEST_PORTAL	     12
126 //#define MDS_REPLY_PORTAL	     13
127 #define MDS_BULK_PORTAL		14
128 #define LDLM_CB_REQUEST_PORTAL	 15
129 #define LDLM_CB_REPLY_PORTAL	   16
130 #define LDLM_CANCEL_REQUEST_PORTAL     17
131 #define LDLM_CANCEL_REPLY_PORTAL       18
132 //#define PTLBD_REQUEST_PORTAL	   19
133 //#define PTLBD_REPLY_PORTAL	     20
134 //#define PTLBD_BULK_PORTAL	      21
135 #define MDS_SETATTR_PORTAL	     22
136 #define MDS_READPAGE_PORTAL	    23
137 #define OUT_PORTAL		    24
138 
139 #define MGC_REPLY_PORTAL	       25
140 #define MGS_REQUEST_PORTAL	     26
141 #define MGS_REPLY_PORTAL	       27
142 #define OST_REQUEST_PORTAL	     28
143 #define FLD_REQUEST_PORTAL	     29
144 #define SEQ_METADATA_PORTAL	    30
145 #define SEQ_DATA_PORTAL		31
146 #define SEQ_CONTROLLER_PORTAL	  32
147 #define MGS_BULK_PORTAL		33
148 
149 /* Portal 63 is reserved for the Cray Inc DVS - nic@cray.com, roe@cray.com, n8851@cray.com */
150 
151 /* packet types */
152 #define PTL_RPC_MSG_REQUEST 4711
153 #define PTL_RPC_MSG_ERR     4712
154 #define PTL_RPC_MSG_REPLY   4713
155 
156 /* DON'T use swabbed values of MAGIC as magic! */
157 #define LUSTRE_MSG_MAGIC_V2 0x0BD00BD3
158 #define LUSTRE_MSG_MAGIC_V2_SWABBED 0xD30BD00B
159 
160 #define LUSTRE_MSG_MAGIC LUSTRE_MSG_MAGIC_V2
161 
162 #define PTLRPC_MSG_VERSION  0x00000003
163 #define LUSTRE_VERSION_MASK 0xffff0000
164 #define LUSTRE_OBD_VERSION  0x00010000
165 #define LUSTRE_MDS_VERSION  0x00020000
166 #define LUSTRE_OST_VERSION  0x00030000
167 #define LUSTRE_DLM_VERSION  0x00040000
168 #define LUSTRE_LOG_VERSION  0x00050000
169 #define LUSTRE_MGS_VERSION  0x00060000
170 
171 /**
172  * Describes a range of sequence, lsr_start is included but lsr_end is
173  * not in the range.
174  * Same structure is used in fld module where lsr_index field holds mdt id
175  * of the home mdt.
176  */
177 struct lu_seq_range {
178 	__u64 lsr_start;
179 	__u64 lsr_end;
180 	__u32 lsr_index;
181 	__u32 lsr_flags;
182 };
183 
184 #define LU_SEQ_RANGE_MDT	0x0
185 #define LU_SEQ_RANGE_OST	0x1
186 #define LU_SEQ_RANGE_ANY	0x3
187 
188 #define LU_SEQ_RANGE_MASK	0x3
189 
fld_range_type(const struct lu_seq_range * range)190 static inline unsigned fld_range_type(const struct lu_seq_range *range)
191 {
192 	return range->lsr_flags & LU_SEQ_RANGE_MASK;
193 }
194 
fld_range_is_ost(const struct lu_seq_range * range)195 static inline int fld_range_is_ost(const struct lu_seq_range *range)
196 {
197 	return fld_range_type(range) == LU_SEQ_RANGE_OST;
198 }
199 
fld_range_is_mdt(const struct lu_seq_range * range)200 static inline int fld_range_is_mdt(const struct lu_seq_range *range)
201 {
202 	return fld_range_type(range) == LU_SEQ_RANGE_MDT;
203 }
204 
205 /**
206  * This all range is only being used when fld client sends fld query request,
207  * but it does not know whether the seq is MDT or OST, so it will send req
208  * with ALL type, which means either seq type gotten from lookup can be
209  * expected.
210  */
fld_range_is_any(const struct lu_seq_range * range)211 static inline unsigned fld_range_is_any(const struct lu_seq_range *range)
212 {
213 	return fld_range_type(range) == LU_SEQ_RANGE_ANY;
214 }
215 
fld_range_set_type(struct lu_seq_range * range,unsigned flags)216 static inline void fld_range_set_type(struct lu_seq_range *range,
217 				      unsigned flags)
218 {
219 	range->lsr_flags |= flags;
220 }
221 
fld_range_set_mdt(struct lu_seq_range * range)222 static inline void fld_range_set_mdt(struct lu_seq_range *range)
223 {
224 	fld_range_set_type(range, LU_SEQ_RANGE_MDT);
225 }
226 
fld_range_set_ost(struct lu_seq_range * range)227 static inline void fld_range_set_ost(struct lu_seq_range *range)
228 {
229 	fld_range_set_type(range, LU_SEQ_RANGE_OST);
230 }
231 
fld_range_set_any(struct lu_seq_range * range)232 static inline void fld_range_set_any(struct lu_seq_range *range)
233 {
234 	fld_range_set_type(range, LU_SEQ_RANGE_ANY);
235 }
236 
237 /**
238  * returns  width of given range \a r
239  */
240 
range_space(const struct lu_seq_range * range)241 static inline __u64 range_space(const struct lu_seq_range *range)
242 {
243 	return range->lsr_end - range->lsr_start;
244 }
245 
246 /**
247  * initialize range to zero
248  */
249 
range_init(struct lu_seq_range * range)250 static inline void range_init(struct lu_seq_range *range)
251 {
252 	memset(range, 0, sizeof(*range));
253 }
254 
255 /**
256  * check if given seq id \a s is within given range \a r
257  */
258 
range_within(const struct lu_seq_range * range,__u64 s)259 static inline int range_within(const struct lu_seq_range *range,
260 			       __u64 s)
261 {
262 	return s >= range->lsr_start && s < range->lsr_end;
263 }
264 
range_is_sane(const struct lu_seq_range * range)265 static inline int range_is_sane(const struct lu_seq_range *range)
266 {
267 	return (range->lsr_end >= range->lsr_start);
268 }
269 
range_is_zero(const struct lu_seq_range * range)270 static inline int range_is_zero(const struct lu_seq_range *range)
271 {
272 	return (range->lsr_start == 0 && range->lsr_end == 0);
273 }
274 
range_is_exhausted(const struct lu_seq_range * range)275 static inline int range_is_exhausted(const struct lu_seq_range *range)
276 
277 {
278 	return range_space(range) == 0;
279 }
280 
281 /* return 0 if two range have the same location */
range_compare_loc(const struct lu_seq_range * r1,const struct lu_seq_range * r2)282 static inline int range_compare_loc(const struct lu_seq_range *r1,
283 				    const struct lu_seq_range *r2)
284 {
285 	return r1->lsr_index != r2->lsr_index ||
286 	       r1->lsr_flags != r2->lsr_flags;
287 }
288 
289 #define DRANGE "[%#16.16Lx-%#16.16Lx):%x:%s"
290 
291 #define PRANGE(range)		\
292 	(range)->lsr_start,	\
293 	(range)->lsr_end,	\
294 	(range)->lsr_index,	\
295 	fld_range_is_mdt(range) ? "mdt" : "ost"
296 
297 /** \defgroup lu_fid lu_fid
298  * @{ */
299 
300 /**
301  * Flags for lustre_mdt_attrs::lma_compat and lustre_mdt_attrs::lma_incompat.
302  * Deprecated since HSM and SOM attributes are now stored in separate on-disk
303  * xattr.
304  */
305 enum lma_compat {
306 	LMAC_HSM	= 0x00000001,
307 	LMAC_SOM	= 0x00000002,
308 	LMAC_NOT_IN_OI	= 0x00000004, /* the object does NOT need OI mapping */
309 	LMAC_FID_ON_OST = 0x00000008, /* For OST-object, its OI mapping is
310 				       * under /O/<seq>/d<x>. */
311 };
312 
313 /**
314  * Masks for all features that should be supported by a Lustre version to
315  * access a specific file.
316  * This information is stored in lustre_mdt_attrs::lma_incompat.
317  */
318 enum lma_incompat {
319 	LMAI_RELEASED		= 0x00000001, /* file is released */
320 	LMAI_AGENT		= 0x00000002, /* agent inode */
321 	LMAI_REMOTE_PARENT	= 0x00000004, /* the parent of the object
322 						 is on the remote MDT */
323 };
324 
325 #define LMA_INCOMPAT_SUPP	(LMAI_AGENT | LMAI_REMOTE_PARENT)
326 
327 /**
328  * fid constants
329  */
330 enum {
331 	/** LASTID file has zero OID */
332 	LUSTRE_FID_LASTID_OID = 0UL,
333 	/** initial fid id value */
334 	LUSTRE_FID_INIT_OID  = 1UL
335 };
336 
337 /** returns fid object sequence */
fid_seq(const struct lu_fid * fid)338 static inline __u64 fid_seq(const struct lu_fid *fid)
339 {
340 	return fid->f_seq;
341 }
342 
343 /** returns fid object id */
fid_oid(const struct lu_fid * fid)344 static inline __u32 fid_oid(const struct lu_fid *fid)
345 {
346 	return fid->f_oid;
347 }
348 
349 /** returns fid object version */
fid_ver(const struct lu_fid * fid)350 static inline __u32 fid_ver(const struct lu_fid *fid)
351 {
352 	return fid->f_ver;
353 }
354 
fid_zero(struct lu_fid * fid)355 static inline void fid_zero(struct lu_fid *fid)
356 {
357 	memset(fid, 0, sizeof(*fid));
358 }
359 
fid_ver_oid(const struct lu_fid * fid)360 static inline __u64 fid_ver_oid(const struct lu_fid *fid)
361 {
362 	return ((__u64)fid_ver(fid) << 32 | fid_oid(fid));
363 }
364 
365 /* copytool uses a 32b bitmask field to encode archive-Ids during register
366  * with MDT thru kuc.
367  * archive num = 0 => all
368  * archive num from 1 to 32
369  */
370 #define LL_HSM_MAX_ARCHIVE (sizeof(__u32) * 8)
371 
372 /**
373  * Note that reserved SEQ numbers below 12 will conflict with ldiskfs
374  * inodes in the IGIF namespace, so these reserved SEQ numbers can be
375  * used for other purposes and not risk collisions with existing inodes.
376  *
377  * Different FID Format
378  * http://arch.lustre.org/index.php?title=Interoperability_fids_zfs#NEW.0
379  */
380 enum fid_seq {
381 	FID_SEQ_OST_MDT0	= 0,
382 	FID_SEQ_LLOG		= 1, /* unnamed llogs */
383 	FID_SEQ_ECHO		= 2,
384 	FID_SEQ_OST_MDT1	= 3,
385 	FID_SEQ_OST_MAX		= 9, /* Max MDT count before OST_on_FID */
386 	FID_SEQ_LLOG_NAME	= 10, /* named llogs */
387 	FID_SEQ_RSVD		= 11,
388 	FID_SEQ_IGIF		= 12,
389 	FID_SEQ_IGIF_MAX	= 0x0ffffffffULL,
390 	FID_SEQ_IDIF		= 0x100000000ULL,
391 	FID_SEQ_IDIF_MAX	= 0x1ffffffffULL,
392 	/* Normal FID sequence starts from this value, i.e. 1<<33 */
393 	FID_SEQ_START		= 0x200000000ULL,
394 	/* sequence for local pre-defined FIDs listed in local_oid */
395 	FID_SEQ_LOCAL_FILE	= 0x200000001ULL,
396 	FID_SEQ_DOT_LUSTRE	= 0x200000002ULL,
397 	/* sequence is used for local named objects FIDs generated
398 	 * by local_object_storage library */
399 	FID_SEQ_LOCAL_NAME	= 0x200000003ULL,
400 	/* Because current FLD will only cache the fid sequence, instead
401 	 * of oid on the client side, if the FID needs to be exposed to
402 	 * clients sides, it needs to make sure all of fids under one
403 	 * sequence will be located in one MDT. */
404 	FID_SEQ_SPECIAL		= 0x200000004ULL,
405 	FID_SEQ_QUOTA		= 0x200000005ULL,
406 	FID_SEQ_QUOTA_GLB	= 0x200000006ULL,
407 	FID_SEQ_ROOT		= 0x200000007ULL,  /* Located on MDT0 */
408 	FID_SEQ_NORMAL		= 0x200000400ULL,
409 	FID_SEQ_LOV_DEFAULT	= 0xffffffffffffffffULL
410 };
411 
412 #define OBIF_OID_MAX_BITS	   32
413 #define OBIF_MAX_OID		(1ULL << OBIF_OID_MAX_BITS)
414 #define OBIF_OID_MASK	       ((1ULL << OBIF_OID_MAX_BITS) - 1)
415 #define IDIF_OID_MAX_BITS	   48
416 #define IDIF_MAX_OID		(1ULL << IDIF_OID_MAX_BITS)
417 #define IDIF_OID_MASK	       ((1ULL << IDIF_OID_MAX_BITS) - 1)
418 
419 /** OID for FID_SEQ_SPECIAL */
420 enum special_oid {
421 	/* Big Filesystem Lock to serialize rename operations */
422 	FID_OID_SPECIAL_BFL     = 1UL,
423 };
424 
425 /** OID for FID_SEQ_DOT_LUSTRE */
426 enum dot_lustre_oid {
427 	FID_OID_DOT_LUSTRE  = 1UL,
428 	FID_OID_DOT_LUSTRE_OBF = 2UL,
429 };
430 
fid_seq_is_mdt0(__u64 seq)431 static inline int fid_seq_is_mdt0(__u64 seq)
432 {
433 	return (seq == FID_SEQ_OST_MDT0);
434 }
435 
fid_seq_is_mdt(const __u64 seq)436 static inline int fid_seq_is_mdt(const __u64 seq)
437 {
438 	return seq == FID_SEQ_OST_MDT0 || seq >= FID_SEQ_NORMAL;
439 };
440 
fid_seq_is_echo(__u64 seq)441 static inline int fid_seq_is_echo(__u64 seq)
442 {
443 	return (seq == FID_SEQ_ECHO);
444 }
445 
fid_is_echo(const struct lu_fid * fid)446 static inline int fid_is_echo(const struct lu_fid *fid)
447 {
448 	return fid_seq_is_echo(fid_seq(fid));
449 }
450 
fid_seq_is_llog(__u64 seq)451 static inline int fid_seq_is_llog(__u64 seq)
452 {
453 	return (seq == FID_SEQ_LLOG);
454 }
455 
fid_is_llog(const struct lu_fid * fid)456 static inline int fid_is_llog(const struct lu_fid *fid)
457 {
458 	/* file with OID == 0 is not llog but contains last oid */
459 	return fid_seq_is_llog(fid_seq(fid)) && fid_oid(fid) > 0;
460 }
461 
fid_seq_is_rsvd(const __u64 seq)462 static inline int fid_seq_is_rsvd(const __u64 seq)
463 {
464 	return (seq > FID_SEQ_OST_MDT0 && seq <= FID_SEQ_RSVD);
465 };
466 
fid_seq_is_special(const __u64 seq)467 static inline int fid_seq_is_special(const __u64 seq)
468 {
469 	return seq == FID_SEQ_SPECIAL;
470 };
471 
fid_seq_is_local_file(const __u64 seq)472 static inline int fid_seq_is_local_file(const __u64 seq)
473 {
474 	return seq == FID_SEQ_LOCAL_FILE ||
475 	       seq == FID_SEQ_LOCAL_NAME;
476 };
477 
fid_seq_is_root(const __u64 seq)478 static inline int fid_seq_is_root(const __u64 seq)
479 {
480 	return seq == FID_SEQ_ROOT;
481 }
482 
fid_seq_is_dot(const __u64 seq)483 static inline int fid_seq_is_dot(const __u64 seq)
484 {
485 	return seq == FID_SEQ_DOT_LUSTRE;
486 }
487 
fid_seq_is_default(const __u64 seq)488 static inline int fid_seq_is_default(const __u64 seq)
489 {
490 	return seq == FID_SEQ_LOV_DEFAULT;
491 }
492 
fid_is_mdt0(const struct lu_fid * fid)493 static inline int fid_is_mdt0(const struct lu_fid *fid)
494 {
495 	return fid_seq_is_mdt0(fid_seq(fid));
496 }
497 
lu_root_fid(struct lu_fid * fid)498 static inline void lu_root_fid(struct lu_fid *fid)
499 {
500 	fid->f_seq = FID_SEQ_ROOT;
501 	fid->f_oid = 1;
502 	fid->f_ver = 0;
503 }
504 
505 /**
506  * Check if a fid is igif or not.
507  * \param fid the fid to be tested.
508  * \return true if the fid is a igif; otherwise false.
509  */
fid_seq_is_igif(const __u64 seq)510 static inline int fid_seq_is_igif(const __u64 seq)
511 {
512 	return seq >= FID_SEQ_IGIF && seq <= FID_SEQ_IGIF_MAX;
513 }
514 
fid_is_igif(const struct lu_fid * fid)515 static inline int fid_is_igif(const struct lu_fid *fid)
516 {
517 	return fid_seq_is_igif(fid_seq(fid));
518 }
519 
520 /**
521  * Check if a fid is idif or not.
522  * \param fid the fid to be tested.
523  * \return true if the fid is a idif; otherwise false.
524  */
fid_seq_is_idif(const __u64 seq)525 static inline int fid_seq_is_idif(const __u64 seq)
526 {
527 	return seq >= FID_SEQ_IDIF && seq <= FID_SEQ_IDIF_MAX;
528 }
529 
fid_is_idif(const struct lu_fid * fid)530 static inline int fid_is_idif(const struct lu_fid *fid)
531 {
532 	return fid_seq_is_idif(fid_seq(fid));
533 }
534 
fid_is_local_file(const struct lu_fid * fid)535 static inline int fid_is_local_file(const struct lu_fid *fid)
536 {
537 	return fid_seq_is_local_file(fid_seq(fid));
538 }
539 
fid_seq_is_norm(const __u64 seq)540 static inline int fid_seq_is_norm(const __u64 seq)
541 {
542 	return (seq >= FID_SEQ_NORMAL);
543 }
544 
fid_is_norm(const struct lu_fid * fid)545 static inline int fid_is_norm(const struct lu_fid *fid)
546 {
547 	return fid_seq_is_norm(fid_seq(fid));
548 }
549 
550 /* convert an OST objid into an IDIF FID SEQ number */
fid_idif_seq(__u64 id,__u32 ost_idx)551 static inline __u64 fid_idif_seq(__u64 id, __u32 ost_idx)
552 {
553 	return FID_SEQ_IDIF | (ost_idx << 16) | ((id >> 32) & 0xffff);
554 }
555 
556 /* convert a packed IDIF FID into an OST objid */
fid_idif_id(__u64 seq,__u32 oid,__u32 ver)557 static inline __u64 fid_idif_id(__u64 seq, __u32 oid, __u32 ver)
558 {
559 	return ((__u64)ver << 48) | ((seq & 0xffff) << 32) | oid;
560 }
561 
562 /* extract ost index from IDIF FID */
fid_idif_ost_idx(const struct lu_fid * fid)563 static inline __u32 fid_idif_ost_idx(const struct lu_fid *fid)
564 {
565 	return (fid_seq(fid) >> 16) & 0xffff;
566 }
567 
568 /* extract OST sequence (group) from a wire ost_id (id/seq) pair */
ostid_seq(const struct ost_id * ostid)569 static inline __u64 ostid_seq(const struct ost_id *ostid)
570 {
571 	if (fid_seq_is_mdt0(ostid->oi.oi_seq))
572 		return FID_SEQ_OST_MDT0;
573 
574 	if (fid_seq_is_default(ostid->oi.oi_seq))
575 		return FID_SEQ_LOV_DEFAULT;
576 
577 	if (fid_is_idif(&ostid->oi_fid))
578 		return FID_SEQ_OST_MDT0;
579 
580 	return fid_seq(&ostid->oi_fid);
581 }
582 
583 /* extract OST objid from a wire ost_id (id/seq) pair */
ostid_id(const struct ost_id * ostid)584 static inline __u64 ostid_id(const struct ost_id *ostid)
585 {
586 	if (fid_seq_is_mdt0(ostid_seq(ostid)))
587 		return ostid->oi.oi_id & IDIF_OID_MASK;
588 
589 	if (fid_is_idif(&ostid->oi_fid))
590 		return fid_idif_id(fid_seq(&ostid->oi_fid),
591 				   fid_oid(&ostid->oi_fid), 0);
592 
593 	return fid_oid(&ostid->oi_fid);
594 }
595 
ostid_set_seq(struct ost_id * oi,__u64 seq)596 static inline void ostid_set_seq(struct ost_id *oi, __u64 seq)
597 {
598 	if (fid_seq_is_mdt0(seq) || fid_seq_is_default(seq)) {
599 		oi->oi.oi_seq = seq;
600 	} else {
601 		oi->oi_fid.f_seq = seq;
602 		/* Note: if f_oid + f_ver is zero, we need init it
603 		 * to be 1, otherwise, ostid_seq will treat this
604 		 * as old ostid (oi_seq == 0) */
605 		if (oi->oi_fid.f_oid == 0 && oi->oi_fid.f_ver == 0)
606 			oi->oi_fid.f_oid = LUSTRE_FID_INIT_OID;
607 	}
608 }
609 
ostid_set_seq_mdt0(struct ost_id * oi)610 static inline void ostid_set_seq_mdt0(struct ost_id *oi)
611 {
612 	ostid_set_seq(oi, FID_SEQ_OST_MDT0);
613 }
614 
ostid_set_seq_echo(struct ost_id * oi)615 static inline void ostid_set_seq_echo(struct ost_id *oi)
616 {
617 	ostid_set_seq(oi, FID_SEQ_ECHO);
618 }
619 
ostid_set_seq_llog(struct ost_id * oi)620 static inline void ostid_set_seq_llog(struct ost_id *oi)
621 {
622 	ostid_set_seq(oi, FID_SEQ_LLOG);
623 }
624 
625 /**
626  * Note: we need check oi_seq to decide where to set oi_id,
627  * so oi_seq should always be set ahead of oi_id.
628  */
ostid_set_id(struct ost_id * oi,__u64 oid)629 static inline void ostid_set_id(struct ost_id *oi, __u64 oid)
630 {
631 	if (fid_seq_is_mdt0(ostid_seq(oi))) {
632 		if (oid >= IDIF_MAX_OID) {
633 			CERROR("Bad %llu to set "DOSTID"\n",
634 				oid, POSTID(oi));
635 			return;
636 		}
637 		oi->oi.oi_id = oid;
638 	} else {
639 		if (oid > OBIF_MAX_OID) {
640 			CERROR("Bad %llu to set "DOSTID"\n",
641 				oid, POSTID(oi));
642 			return;
643 		}
644 		oi->oi_fid.f_oid = oid;
645 	}
646 }
647 
ostid_inc_id(struct ost_id * oi)648 static inline void ostid_inc_id(struct ost_id *oi)
649 {
650 	if (fid_seq_is_mdt0(ostid_seq(oi))) {
651 		if (unlikely(ostid_id(oi) + 1 > IDIF_MAX_OID)) {
652 			CERROR("Bad inc "DOSTID"\n", POSTID(oi));
653 			return;
654 		}
655 		oi->oi.oi_id++;
656 	} else {
657 		oi->oi_fid.f_oid++;
658 	}
659 }
660 
ostid_dec_id(struct ost_id * oi)661 static inline void ostid_dec_id(struct ost_id *oi)
662 {
663 	if (fid_seq_is_mdt0(ostid_seq(oi)))
664 		oi->oi.oi_id--;
665 	else
666 		oi->oi_fid.f_oid--;
667 }
668 
669 /**
670  * Unpack an OST object id/seq (group) into a FID.  This is needed for
671  * converting all obdo, lmm, lsm, etc. 64-bit id/seq pairs into proper
672  * FIDs.  Note that if an id/seq is already in FID/IDIF format it will
673  * be passed through unchanged.  Only legacy OST objects in "group 0"
674  * will be mapped into the IDIF namespace so that they can fit into the
675  * struct lu_fid fields without loss.  For reference see:
676  * http://arch.lustre.org/index.php?title=Interoperability_fids_zfs
677  */
ostid_to_fid(struct lu_fid * fid,struct ost_id * ostid,__u32 ost_idx)678 static inline int ostid_to_fid(struct lu_fid *fid, struct ost_id *ostid,
679 			       __u32 ost_idx)
680 {
681 	if (ost_idx > 0xffff) {
682 		CERROR("bad ost_idx, "DOSTID" ost_idx:%u\n", POSTID(ostid),
683 		       ost_idx);
684 		return -EBADF;
685 	}
686 
687 	if (fid_seq_is_mdt0(ostid_seq(ostid))) {
688 		/* This is a "legacy" (old 1.x/2.early) OST object in "group 0"
689 		 * that we map into the IDIF namespace.  It allows up to 2^48
690 		 * objects per OST, as this is the object namespace that has
691 		 * been in production for years.  This can handle create rates
692 		 * of 1M objects/s/OST for 9 years, or combinations thereof. */
693 		if (ostid_id(ostid) >= IDIF_MAX_OID) {
694 			 CERROR("bad MDT0 id, "DOSTID" ost_idx:%u\n",
695 				POSTID(ostid), ost_idx);
696 			 return -EBADF;
697 		}
698 		fid->f_seq = fid_idif_seq(ostid_id(ostid), ost_idx);
699 		/* truncate to 32 bits by assignment */
700 		fid->f_oid = ostid_id(ostid);
701 		/* in theory, not currently used */
702 		fid->f_ver = ostid_id(ostid) >> 48;
703 	} else /* if (fid_seq_is_idif(seq) || fid_seq_is_norm(seq)) */ {
704 	       /* This is either an IDIF object, which identifies objects across
705 		* all OSTs, or a regular FID.  The IDIF namespace maps legacy
706 		* OST objects into the FID namespace.  In both cases, we just
707 		* pass the FID through, no conversion needed. */
708 		if (ostid->oi_fid.f_ver != 0) {
709 			CERROR("bad MDT0 id, "DOSTID" ost_idx:%u\n",
710 				POSTID(ostid), ost_idx);
711 			return -EBADF;
712 		}
713 		*fid = ostid->oi_fid;
714 	}
715 
716 	return 0;
717 }
718 
719 /* pack any OST FID into an ostid (id/seq) for the wire/disk */
fid_to_ostid(const struct lu_fid * fid,struct ost_id * ostid)720 static inline int fid_to_ostid(const struct lu_fid *fid, struct ost_id *ostid)
721 {
722 	if (unlikely(fid_seq_is_igif(fid->f_seq))) {
723 		CERROR("bad IGIF, "DFID"\n", PFID(fid));
724 		return -EBADF;
725 	}
726 
727 	if (fid_is_idif(fid)) {
728 		ostid_set_seq_mdt0(ostid);
729 		ostid_set_id(ostid, fid_idif_id(fid_seq(fid), fid_oid(fid),
730 						fid_ver(fid)));
731 	} else {
732 		ostid->oi_fid = *fid;
733 	}
734 
735 	return 0;
736 }
737 
738 /* Check whether the fid is for LAST_ID */
fid_is_last_id(const struct lu_fid * fid)739 static inline int fid_is_last_id(const struct lu_fid *fid)
740 {
741 	return (fid_oid(fid) == 0);
742 }
743 
744 /**
745  * Get inode number from a igif.
746  * \param fid a igif to get inode number from.
747  * \return inode number for the igif.
748  */
lu_igif_ino(const struct lu_fid * fid)749 static inline ino_t lu_igif_ino(const struct lu_fid *fid)
750 {
751 	return fid_seq(fid);
752 }
753 
754 void lustre_swab_ost_id(struct ost_id *oid);
755 
756 /**
757  * Get inode generation from a igif.
758  * \param fid a igif to get inode generation from.
759  * \return inode generation for the igif.
760  */
lu_igif_gen(const struct lu_fid * fid)761 static inline __u32 lu_igif_gen(const struct lu_fid *fid)
762 {
763 	return fid_oid(fid);
764 }
765 
766 /**
767  * Build igif from the inode number/generation.
768  */
lu_igif_build(struct lu_fid * fid,__u32 ino,__u32 gen)769 static inline void lu_igif_build(struct lu_fid *fid, __u32 ino, __u32 gen)
770 {
771 	fid->f_seq = ino;
772 	fid->f_oid = gen;
773 	fid->f_ver = 0;
774 }
775 
776 /*
777  * Fids are transmitted across network (in the sender byte-ordering),
778  * and stored on disk in big-endian order.
779  */
fid_cpu_to_le(struct lu_fid * dst,const struct lu_fid * src)780 static inline void fid_cpu_to_le(struct lu_fid *dst, const struct lu_fid *src)
781 {
782 	dst->f_seq = cpu_to_le64(fid_seq(src));
783 	dst->f_oid = cpu_to_le32(fid_oid(src));
784 	dst->f_ver = cpu_to_le32(fid_ver(src));
785 }
786 
fid_le_to_cpu(struct lu_fid * dst,const struct lu_fid * src)787 static inline void fid_le_to_cpu(struct lu_fid *dst, const struct lu_fid *src)
788 {
789 	dst->f_seq = le64_to_cpu(fid_seq(src));
790 	dst->f_oid = le32_to_cpu(fid_oid(src));
791 	dst->f_ver = le32_to_cpu(fid_ver(src));
792 }
793 
fid_cpu_to_be(struct lu_fid * dst,const struct lu_fid * src)794 static inline void fid_cpu_to_be(struct lu_fid *dst, const struct lu_fid *src)
795 {
796 	dst->f_seq = cpu_to_be64(fid_seq(src));
797 	dst->f_oid = cpu_to_be32(fid_oid(src));
798 	dst->f_ver = cpu_to_be32(fid_ver(src));
799 }
800 
fid_be_to_cpu(struct lu_fid * dst,const struct lu_fid * src)801 static inline void fid_be_to_cpu(struct lu_fid *dst, const struct lu_fid *src)
802 {
803 	dst->f_seq = be64_to_cpu(fid_seq(src));
804 	dst->f_oid = be32_to_cpu(fid_oid(src));
805 	dst->f_ver = be32_to_cpu(fid_ver(src));
806 }
807 
fid_is_sane(const struct lu_fid * fid)808 static inline int fid_is_sane(const struct lu_fid *fid)
809 {
810 	return fid != NULL &&
811 	       ((fid_seq(fid) >= FID_SEQ_START && fid_ver(fid) == 0) ||
812 		fid_is_igif(fid) || fid_is_idif(fid) ||
813 		fid_seq_is_rsvd(fid_seq(fid)));
814 }
815 
fid_is_zero(const struct lu_fid * fid)816 static inline int fid_is_zero(const struct lu_fid *fid)
817 {
818 	return fid_seq(fid) == 0 && fid_oid(fid) == 0;
819 }
820 
821 void lustre_swab_lu_fid(struct lu_fid *fid);
822 void lustre_swab_lu_seq_range(struct lu_seq_range *range);
823 
lu_fid_eq(const struct lu_fid * f0,const struct lu_fid * f1)824 static inline int lu_fid_eq(const struct lu_fid *f0, const struct lu_fid *f1)
825 {
826 	return memcmp(f0, f1, sizeof(*f0)) == 0;
827 }
828 
829 #define __diff_normalize(val0, val1)			    \
830 ({							      \
831 	typeof(val0) __val0 = (val0);			   \
832 	typeof(val1) __val1 = (val1);			   \
833 								\
834 	(__val0 == __val1 ? 0 : __val0 > __val1 ? 1 : -1);     \
835 })
836 
lu_fid_cmp(const struct lu_fid * f0,const struct lu_fid * f1)837 static inline int lu_fid_cmp(const struct lu_fid *f0,
838 			     const struct lu_fid *f1)
839 {
840 	return
841 		__diff_normalize(fid_seq(f0), fid_seq(f1)) ?:
842 		__diff_normalize(fid_oid(f0), fid_oid(f1)) ?:
843 		__diff_normalize(fid_ver(f0), fid_ver(f1));
844 }
845 
ostid_cpu_to_le(const struct ost_id * src_oi,struct ost_id * dst_oi)846 static inline void ostid_cpu_to_le(const struct ost_id *src_oi,
847 				   struct ost_id *dst_oi)
848 {
849 	if (fid_seq_is_mdt0(ostid_seq(src_oi))) {
850 		dst_oi->oi.oi_id = cpu_to_le64(src_oi->oi.oi_id);
851 		dst_oi->oi.oi_seq = cpu_to_le64(src_oi->oi.oi_seq);
852 	} else {
853 		fid_cpu_to_le(&dst_oi->oi_fid, &src_oi->oi_fid);
854 	}
855 }
856 
ostid_le_to_cpu(const struct ost_id * src_oi,struct ost_id * dst_oi)857 static inline void ostid_le_to_cpu(const struct ost_id *src_oi,
858 				   struct ost_id *dst_oi)
859 {
860 	if (fid_seq_is_mdt0(ostid_seq(src_oi))) {
861 		dst_oi->oi.oi_id = le64_to_cpu(src_oi->oi.oi_id);
862 		dst_oi->oi.oi_seq = le64_to_cpu(src_oi->oi.oi_seq);
863 	} else {
864 		fid_le_to_cpu(&dst_oi->oi_fid, &src_oi->oi_fid);
865 	}
866 }
867 
868 /** @} lu_fid */
869 
870 /** \defgroup lu_dir lu_dir
871  * @{ */
872 
873 /**
874  * Enumeration of possible directory entry attributes.
875  *
876  * Attributes follow directory entry header in the order they appear in this
877  * enumeration.
878  */
879 enum lu_dirent_attrs {
880 	LUDA_FID		= 0x0001,
881 	LUDA_TYPE		= 0x0002,
882 	LUDA_64BITHASH		= 0x0004,
883 
884 	/* The following attrs are used for MDT internal only,
885 	 * not visible to client */
886 
887 	/* Verify the dirent consistency */
888 	LUDA_VERIFY		= 0x8000,
889 	/* Only check but not repair the dirent inconsistency */
890 	LUDA_VERIFY_DRYRUN	= 0x4000,
891 	/* The dirent has been repaired, or to be repaired (dryrun). */
892 	LUDA_REPAIR		= 0x2000,
893 	/* The system is upgraded, has beed or to be repaired (dryrun). */
894 	LUDA_UPGRADE		= 0x1000,
895 	/* Ignore this record, go to next directly. */
896 	LUDA_IGNORE		= 0x0800,
897 };
898 
899 #define LU_DIRENT_ATTRS_MASK	0xf800
900 
901 /**
902  * Layout of readdir pages, as transmitted on wire.
903  */
904 struct lu_dirent {
905 	/** valid if LUDA_FID is set. */
906 	struct lu_fid lde_fid;
907 	/** a unique entry identifier: a hash or an offset. */
908 	__u64	 lde_hash;
909 	/** total record length, including all attributes. */
910 	__u16	 lde_reclen;
911 	/** name length */
912 	__u16	 lde_namelen;
913 	/** optional variable size attributes following this entry.
914 	 *  taken from enum lu_dirent_attrs.
915 	 */
916 	__u32	 lde_attrs;
917 	/** name is followed by the attributes indicated in ->ldp_attrs, in
918 	 *  their natural order. After the last attribute, padding bytes are
919 	 *  added to make ->lde_reclen a multiple of 8.
920 	 */
921 	char	  lde_name[0];
922 };
923 
924 /*
925  * Definitions of optional directory entry attributes formats.
926  *
927  * Individual attributes do not have their length encoded in a generic way. It
928  * is assumed that consumer of an attribute knows its format. This means that
929  * it is impossible to skip over an unknown attribute, except by skipping over all
930  * remaining attributes (by using ->lde_reclen), which is not too
931  * constraining, because new server versions will append new attributes at
932  * the end of an entry.
933  */
934 
935 /**
936  * Fid directory attribute: a fid of an object referenced by the entry. This
937  * will be almost always requested by the client and supplied by the server.
938  *
939  * Aligned to 8 bytes.
940  */
941 /* To have compatibility with 1.8, lets have fid in lu_dirent struct. */
942 
943 /**
944  * File type.
945  *
946  * Aligned to 2 bytes.
947  */
948 struct luda_type {
949 	__u16 lt_type;
950 };
951 
952 #ifndef IFSHIFT
953 #define IFSHIFT                 12
954 #endif
955 
956 #ifndef IFTODT
957 #define IFTODT(type)		(((type) & S_IFMT) >> IFSHIFT)
958 #endif
959 #ifndef DTTOIF
960 #define DTTOIF(dirtype)		((dirtype) << IFSHIFT)
961 #endif
962 
963 struct lu_dirpage {
964 	__u64	    ldp_hash_start;
965 	__u64	    ldp_hash_end;
966 	__u32	    ldp_flags;
967 	__u32	    ldp_pad0;
968 	struct lu_dirent ldp_entries[0];
969 };
970 
971 enum lu_dirpage_flags {
972 	/**
973 	 * dirpage contains no entry.
974 	 */
975 	LDF_EMPTY   = 1 << 0,
976 	/**
977 	 * last entry's lde_hash equals ldp_hash_end.
978 	 */
979 	LDF_COLLIDE = 1 << 1
980 };
981 
lu_dirent_start(struct lu_dirpage * dp)982 static inline struct lu_dirent *lu_dirent_start(struct lu_dirpage *dp)
983 {
984 	if (le32_to_cpu(dp->ldp_flags) & LDF_EMPTY)
985 		return NULL;
986 	else
987 		return dp->ldp_entries;
988 }
989 
lu_dirent_next(struct lu_dirent * ent)990 static inline struct lu_dirent *lu_dirent_next(struct lu_dirent *ent)
991 {
992 	struct lu_dirent *next;
993 
994 	if (le16_to_cpu(ent->lde_reclen) != 0)
995 		next = ((void *)ent) + le16_to_cpu(ent->lde_reclen);
996 	else
997 		next = NULL;
998 
999 	return next;
1000 }
1001 
lu_dirent_calc_size(int namelen,__u16 attr)1002 static inline int lu_dirent_calc_size(int namelen, __u16 attr)
1003 {
1004 	int size;
1005 
1006 	if (attr & LUDA_TYPE) {
1007 		const unsigned align = sizeof(struct luda_type) - 1;
1008 
1009 		size = (sizeof(struct lu_dirent) + namelen + align) & ~align;
1010 		size += sizeof(struct luda_type);
1011 	} else
1012 		size = sizeof(struct lu_dirent) + namelen;
1013 
1014 	return (size + 7) & ~7;
1015 }
1016 
lu_dirent_size(struct lu_dirent * ent)1017 static inline int lu_dirent_size(struct lu_dirent *ent)
1018 {
1019 	if (le16_to_cpu(ent->lde_reclen) == 0) {
1020 		return lu_dirent_calc_size(le16_to_cpu(ent->lde_namelen),
1021 					   le32_to_cpu(ent->lde_attrs));
1022 	}
1023 	return le16_to_cpu(ent->lde_reclen);
1024 }
1025 
1026 #define MDS_DIR_END_OFF 0xfffffffffffffffeULL
1027 
1028 /**
1029  * MDS_READPAGE page size
1030  *
1031  * This is the directory page size packed in MDS_READPAGE RPC.
1032  * It's different than PAGE_CACHE_SIZE because the client needs to
1033  * access the struct lu_dirpage header packed at the beginning of
1034  * the "page" and without this there isn't any way to know find the
1035  * lu_dirpage header is if client and server PAGE_CACHE_SIZE differ.
1036  */
1037 #define LU_PAGE_SHIFT 12
1038 #define LU_PAGE_SIZE  (1UL << LU_PAGE_SHIFT)
1039 #define LU_PAGE_MASK  (~(LU_PAGE_SIZE - 1))
1040 
1041 #define LU_PAGE_COUNT (1 << (PAGE_CACHE_SHIFT - LU_PAGE_SHIFT))
1042 
1043 /** @} lu_dir */
1044 
1045 struct lustre_handle {
1046 	__u64 cookie;
1047 };
1048 
1049 #define DEAD_HANDLE_MAGIC 0xdeadbeefcafebabeULL
1050 
lustre_handle_is_used(struct lustre_handle * lh)1051 static inline int lustre_handle_is_used(struct lustre_handle *lh)
1052 {
1053 	return lh->cookie != 0ull;
1054 }
1055 
lustre_handle_equal(const struct lustre_handle * lh1,const struct lustre_handle * lh2)1056 static inline int lustre_handle_equal(const struct lustre_handle *lh1,
1057 				      const struct lustre_handle *lh2)
1058 {
1059 	return lh1->cookie == lh2->cookie;
1060 }
1061 
lustre_handle_copy(struct lustre_handle * tgt,struct lustre_handle * src)1062 static inline void lustre_handle_copy(struct lustre_handle *tgt,
1063 				      struct lustre_handle *src)
1064 {
1065 	tgt->cookie = src->cookie;
1066 }
1067 
1068 /* flags for lm_flags */
1069 #define MSGHDR_AT_SUPPORT	       0x1
1070 #define MSGHDR_CKSUM_INCOMPAT18	 0x2
1071 
1072 #define lustre_msg lustre_msg_v2
1073 /* we depend on this structure to be 8-byte aligned */
1074 /* this type is only endian-adjusted in lustre_unpack_msg() */
1075 struct lustre_msg_v2 {
1076 	__u32 lm_bufcount;
1077 	__u32 lm_secflvr;
1078 	__u32 lm_magic;
1079 	__u32 lm_repsize;
1080 	__u32 lm_cksum;
1081 	__u32 lm_flags;
1082 	__u32 lm_padding_2;
1083 	__u32 lm_padding_3;
1084 	__u32 lm_buflens[0];
1085 };
1086 
1087 /* without gss, ptlrpc_body is put at the first buffer. */
1088 #define PTLRPC_NUM_VERSIONS     4
1089 #define JOBSTATS_JOBID_SIZE     32  /* 32 bytes string */
1090 struct ptlrpc_body_v3 {
1091 	struct lustre_handle pb_handle;
1092 	__u32 pb_type;
1093 	__u32 pb_version;
1094 	__u32 pb_opc;
1095 	__u32 pb_status;
1096 	__u64 pb_last_xid;
1097 	__u64 pb_last_seen;
1098 	__u64 pb_last_committed;
1099 	__u64 pb_transno;
1100 	__u32 pb_flags;
1101 	__u32 pb_op_flags;
1102 	__u32 pb_conn_cnt;
1103 	__u32 pb_timeout;  /* for req, the deadline, for rep, the service est */
1104 	__u32 pb_service_time; /* for rep, actual service time */
1105 	__u32 pb_limit;
1106 	__u64 pb_slv;
1107 	/* VBR: pre-versions */
1108 	__u64 pb_pre_versions[PTLRPC_NUM_VERSIONS];
1109 	/* padding for future needs */
1110 	__u64 pb_padding[4];
1111 	char  pb_jobid[JOBSTATS_JOBID_SIZE];
1112 };
1113 
1114 #define ptlrpc_body     ptlrpc_body_v3
1115 
1116 struct ptlrpc_body_v2 {
1117 	struct lustre_handle pb_handle;
1118 	__u32 pb_type;
1119 	__u32 pb_version;
1120 	__u32 pb_opc;
1121 	__u32 pb_status;
1122 	__u64 pb_last_xid;
1123 	__u64 pb_last_seen;
1124 	__u64 pb_last_committed;
1125 	__u64 pb_transno;
1126 	__u32 pb_flags;
1127 	__u32 pb_op_flags;
1128 	__u32 pb_conn_cnt;
1129 	__u32 pb_timeout;  /* for req, the deadline, for rep, the service est */
1130 	__u32 pb_service_time; /* for rep, actual service time, also used for
1131 				  net_latency of req */
1132 	__u32 pb_limit;
1133 	__u64 pb_slv;
1134 	/* VBR: pre-versions */
1135 	__u64 pb_pre_versions[PTLRPC_NUM_VERSIONS];
1136 	/* padding for future needs */
1137 	__u64 pb_padding[4];
1138 };
1139 
1140 void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb);
1141 
1142 /* message body offset for lustre_msg_v2 */
1143 /* ptlrpc body offset in all request/reply messages */
1144 #define MSG_PTLRPC_BODY_OFF	     0
1145 
1146 /* normal request/reply message record offset */
1147 #define REQ_REC_OFF		     1
1148 #define REPLY_REC_OFF		   1
1149 
1150 /* ldlm request message body offset */
1151 #define DLM_LOCKREQ_OFF		 1 /* lockreq offset */
1152 #define DLM_REQ_REC_OFF		 2 /* normal dlm request record offset */
1153 
1154 /* ldlm intent lock message body offset */
1155 #define DLM_INTENT_IT_OFF	       2 /* intent lock it offset */
1156 #define DLM_INTENT_REC_OFF	      3 /* intent lock record offset */
1157 
1158 /* ldlm reply message body offset */
1159 #define DLM_LOCKREPLY_OFF	       1 /* lockrep offset */
1160 #define DLM_REPLY_REC_OFF	       2 /* reply record offset */
1161 
1162 /** only use in req->rq_{req,rep}_swab_mask */
1163 #define MSG_PTLRPC_HEADER_OFF	   31
1164 
1165 /* Flags that are operation-specific go in the top 16 bits. */
1166 #define MSG_OP_FLAG_MASK   0xffff0000
1167 #define MSG_OP_FLAG_SHIFT  16
1168 
1169 /* Flags that apply to all requests are in the bottom 16 bits */
1170 #define MSG_GEN_FLAG_MASK     0x0000ffff
1171 #define MSG_LAST_REPLAY	   0x0001
1172 #define MSG_RESENT		0x0002
1173 #define MSG_REPLAY		0x0004
1174 /* #define MSG_AT_SUPPORT	 0x0008
1175  * This was used in early prototypes of adaptive timeouts, and while there
1176  * shouldn't be any users of that code there also isn't a need for using this
1177  * bits. Defer usage until at least 1.10 to avoid potential conflict. */
1178 #define MSG_DELAY_REPLAY	  0x0010
1179 #define MSG_VERSION_REPLAY	0x0020
1180 #define MSG_REQ_REPLAY_DONE       0x0040
1181 #define MSG_LOCK_REPLAY_DONE      0x0080
1182 
1183 /*
1184  * Flags for all connect opcodes (MDS_CONNECT, OST_CONNECT)
1185  */
1186 
1187 #define MSG_CONNECT_RECOVERING  0x00000001
1188 #define MSG_CONNECT_RECONNECT   0x00000002
1189 #define MSG_CONNECT_REPLAYABLE  0x00000004
1190 //#define MSG_CONNECT_PEER	0x8
1191 #define MSG_CONNECT_LIBCLIENT   0x00000010
1192 #define MSG_CONNECT_INITIAL     0x00000020
1193 #define MSG_CONNECT_ASYNC       0x00000040
1194 #define MSG_CONNECT_NEXT_VER    0x00000080 /* use next version of lustre_msg */
1195 #define MSG_CONNECT_TRANSNO     0x00000100 /* report transno */
1196 
1197 /* Connect flags */
1198 #define OBD_CONNECT_RDONLY		0x1ULL /*client has read-only access*/
1199 #define OBD_CONNECT_INDEX		 0x2ULL /*connect specific LOV idx */
1200 #define OBD_CONNECT_MDS		   0x4ULL /*connect from MDT to OST */
1201 #define OBD_CONNECT_GRANT		 0x8ULL /*OSC gets grant at connect */
1202 #define OBD_CONNECT_SRVLOCK	      0x10ULL /*server takes locks for cli */
1203 #define OBD_CONNECT_VERSION	      0x20ULL /*Lustre versions in ocd */
1204 #define OBD_CONNECT_REQPORTAL	    0x40ULL /*Separate non-IO req portal */
1205 #define OBD_CONNECT_ACL		  0x80ULL /*access control lists */
1206 #define OBD_CONNECT_XATTR	       0x100ULL /*client use extended attr */
1207 #define OBD_CONNECT_CROW		0x200ULL /*MDS+OST create obj on write*/
1208 #define OBD_CONNECT_TRUNCLOCK	   0x400ULL /*locks on server for punch */
1209 #define OBD_CONNECT_TRANSNO	     0x800ULL /*replay sends init transno */
1210 #define OBD_CONNECT_IBITS	      0x1000ULL /*support for inodebits locks*/
1211 #define OBD_CONNECT_JOIN	       0x2000ULL /*files can be concatenated.
1212 						  *We do not support JOIN FILE
1213 						  *anymore, reserve this flags
1214 						  *just for preventing such bit
1215 						  *to be reused.*/
1216 #define OBD_CONNECT_ATTRFID	    0x4000ULL /*Server can GetAttr By Fid*/
1217 #define OBD_CONNECT_NODEVOH	    0x8000ULL /*No open hndl on specl nodes*/
1218 #define OBD_CONNECT_RMT_CLIENT	0x10000ULL /*Remote client */
1219 #define OBD_CONNECT_RMT_CLIENT_FORCE  0x20000ULL /*Remote client by force */
1220 #define OBD_CONNECT_BRW_SIZE	  0x40000ULL /*Max bytes per rpc */
1221 #define OBD_CONNECT_QUOTA64	   0x80000ULL /*Not used since 2.4 */
1222 #define OBD_CONNECT_MDS_CAPA	 0x100000ULL /*MDS capability */
1223 #define OBD_CONNECT_OSS_CAPA	 0x200000ULL /*OSS capability */
1224 #define OBD_CONNECT_CANCELSET	0x400000ULL /*Early batched cancels. */
1225 #define OBD_CONNECT_SOM	      0x800000ULL /*Size on MDS */
1226 #define OBD_CONNECT_AT	      0x1000000ULL /*client uses AT */
1227 #define OBD_CONNECT_LRU_RESIZE      0x2000000ULL /*LRU resize feature. */
1228 #define OBD_CONNECT_MDS_MDS	 0x4000000ULL /*MDS-MDS connection */
1229 #define OBD_CONNECT_REAL	    0x8000000ULL /*real connection */
1230 #define OBD_CONNECT_CHANGE_QS      0x10000000ULL /*Not used since 2.4 */
1231 #define OBD_CONNECT_CKSUM	  0x20000000ULL /*support several cksum algos*/
1232 #define OBD_CONNECT_FID	    0x40000000ULL /*FID is supported by server */
1233 #define OBD_CONNECT_VBR	    0x80000000ULL /*version based recovery */
1234 #define OBD_CONNECT_LOV_V3	0x100000000ULL /*client supports LOV v3 EA */
1235 #define OBD_CONNECT_GRANT_SHRINK  0x200000000ULL /* support grant shrink */
1236 #define OBD_CONNECT_SKIP_ORPHAN   0x400000000ULL /* don't reuse orphan objids */
1237 #define OBD_CONNECT_MAX_EASIZE    0x800000000ULL /* preserved for large EA */
1238 #define OBD_CONNECT_FULL20       0x1000000000ULL /* it is 2.0 client */
1239 #define OBD_CONNECT_LAYOUTLOCK   0x2000000000ULL /* client uses layout lock */
1240 #define OBD_CONNECT_64BITHASH    0x4000000000ULL /* client supports 64-bits
1241 						  * directory hash */
1242 #define OBD_CONNECT_MAXBYTES     0x8000000000ULL /* max stripe size */
1243 #define OBD_CONNECT_IMP_RECOV   0x10000000000ULL /* imp recovery support */
1244 #define OBD_CONNECT_JOBSTATS    0x20000000000ULL /* jobid in ptlrpc_body */
1245 #define OBD_CONNECT_UMASK       0x40000000000ULL /* create uses client umask */
1246 #define OBD_CONNECT_EINPROGRESS 0x80000000000ULL /* client handles -EINPROGRESS
1247 						  * RPC error properly */
1248 #define OBD_CONNECT_GRANT_PARAM 0x100000000000ULL/* extra grant params used for
1249 						  * finer space reservation */
1250 #define OBD_CONNECT_FLOCK_OWNER 0x200000000000ULL /* for the fixed 1.8
1251 						   * policy and 2.x server */
1252 #define OBD_CONNECT_LVB_TYPE	0x400000000000ULL /* variable type of LVB */
1253 #define OBD_CONNECT_NANOSEC_TIME 0x800000000000ULL /* nanosecond timestamps */
1254 #define OBD_CONNECT_LIGHTWEIGHT 0x1000000000000ULL/* lightweight connection */
1255 #define OBD_CONNECT_SHORTIO     0x2000000000000ULL/* short io */
1256 #define OBD_CONNECT_PINGLESS	0x4000000000000ULL/* pings not required */
1257 #define OBD_CONNECT_FLOCK_DEAD	0x8000000000000ULL/* flock deadlock detection */
1258 #define OBD_CONNECT_DISP_STRIPE 0x10000000000000ULL/*create stripe disposition*/
1259 
1260 /* XXX README XXX:
1261  * Please DO NOT add flag values here before first ensuring that this same
1262  * flag value is not in use on some other branch.  Please clear any such
1263  * changes with senior engineers before starting to use a new flag.  Then,
1264  * submit a small patch against EVERY branch that ONLY adds the new flag,
1265  * updates obd_connect_names[] for lprocfs_rd_connect_flags(), adds the
1266  * flag to check_obd_connect_data(), and updates wiretests accordingly, so it
1267  * can be approved and landed easily to reserve the flag for future use. */
1268 
1269 /* The MNE_SWAB flag is overloading the MDS_MDS bit only for the MGS
1270  * connection.  It is a temporary bug fix for Imperative Recovery interop
1271  * between 2.2 and 2.3 x86/ppc nodes, and can be removed when interop for
1272  * 2.2 clients/servers is no longer needed.  LU-1252/LU-1644. */
1273 #define OBD_CONNECT_MNE_SWAB		 OBD_CONNECT_MDS_MDS
1274 
1275 #define OCD_HAS_FLAG(ocd, flg)  \
1276 	(!!((ocd)->ocd_connect_flags & OBD_CONNECT_##flg))
1277 
1278 #define LRU_RESIZE_CONNECT_FLAG OBD_CONNECT_LRU_RESIZE
1279 
1280 #define MDT_CONNECT_SUPPORTED  (OBD_CONNECT_RDONLY | OBD_CONNECT_VERSION | \
1281 				OBD_CONNECT_ACL | OBD_CONNECT_XATTR | \
1282 				OBD_CONNECT_IBITS | \
1283 				OBD_CONNECT_NODEVOH | OBD_CONNECT_ATTRFID | \
1284 				OBD_CONNECT_CANCELSET | OBD_CONNECT_AT | \
1285 				OBD_CONNECT_RMT_CLIENT | \
1286 				OBD_CONNECT_RMT_CLIENT_FORCE | \
1287 				OBD_CONNECT_BRW_SIZE | OBD_CONNECT_MDS_CAPA | \
1288 				OBD_CONNECT_OSS_CAPA | OBD_CONNECT_MDS_MDS | \
1289 				OBD_CONNECT_FID | LRU_RESIZE_CONNECT_FLAG | \
1290 				OBD_CONNECT_VBR | OBD_CONNECT_LOV_V3 | \
1291 				OBD_CONNECT_SOM | OBD_CONNECT_FULL20 | \
1292 				OBD_CONNECT_64BITHASH | OBD_CONNECT_JOBSTATS | \
1293 				OBD_CONNECT_EINPROGRESS | \
1294 				OBD_CONNECT_LIGHTWEIGHT | OBD_CONNECT_UMASK | \
1295 				OBD_CONNECT_LVB_TYPE | OBD_CONNECT_LAYOUTLOCK |\
1296 				OBD_CONNECT_PINGLESS | OBD_CONNECT_MAX_EASIZE |\
1297 				OBD_CONNECT_FLOCK_DEAD | \
1298 				OBD_CONNECT_DISP_STRIPE)
1299 
1300 #define OST_CONNECT_SUPPORTED  (OBD_CONNECT_SRVLOCK | OBD_CONNECT_GRANT | \
1301 				OBD_CONNECT_REQPORTAL | OBD_CONNECT_VERSION | \
1302 				OBD_CONNECT_TRUNCLOCK | OBD_CONNECT_INDEX | \
1303 				OBD_CONNECT_BRW_SIZE | OBD_CONNECT_OSS_CAPA | \
1304 				OBD_CONNECT_CANCELSET | OBD_CONNECT_AT | \
1305 				LRU_RESIZE_CONNECT_FLAG | OBD_CONNECT_CKSUM | \
1306 				OBD_CONNECT_RMT_CLIENT | \
1307 				OBD_CONNECT_RMT_CLIENT_FORCE | OBD_CONNECT_VBR | \
1308 				OBD_CONNECT_MDS | OBD_CONNECT_SKIP_ORPHAN | \
1309 				OBD_CONNECT_GRANT_SHRINK | OBD_CONNECT_FULL20 | \
1310 				OBD_CONNECT_64BITHASH | OBD_CONNECT_MAXBYTES | \
1311 				OBD_CONNECT_MAX_EASIZE | \
1312 				OBD_CONNECT_EINPROGRESS | \
1313 				OBD_CONNECT_JOBSTATS | \
1314 				OBD_CONNECT_LIGHTWEIGHT | OBD_CONNECT_LVB_TYPE|\
1315 				OBD_CONNECT_LAYOUTLOCK | OBD_CONNECT_FID | \
1316 				OBD_CONNECT_PINGLESS)
1317 #define ECHO_CONNECT_SUPPORTED (0)
1318 #define MGS_CONNECT_SUPPORTED  (OBD_CONNECT_VERSION | OBD_CONNECT_AT | \
1319 				OBD_CONNECT_FULL20 | OBD_CONNECT_IMP_RECOV | \
1320 				OBD_CONNECT_MNE_SWAB | OBD_CONNECT_PINGLESS)
1321 
1322 /* Features required for this version of the client to work with server */
1323 #define CLIENT_CONNECT_MDT_REQD (OBD_CONNECT_IBITS | OBD_CONNECT_FID | \
1324 				 OBD_CONNECT_FULL20)
1325 
1326 #define OBD_OCD_VERSION(major, minor, patch, fix) (((major)<<24) + \
1327 						  ((minor)<<16) + \
1328 						  ((patch)<<8) + (fix))
1329 #define OBD_OCD_VERSION_MAJOR(version) ((int)((version)>>24)&255)
1330 #define OBD_OCD_VERSION_MINOR(version) ((int)((version)>>16)&255)
1331 #define OBD_OCD_VERSION_PATCH(version) ((int)((version)>>8)&255)
1332 #define OBD_OCD_VERSION_FIX(version)   ((int)(version)&255)
1333 
1334 /* This structure is used for both request and reply.
1335  *
1336  * If we eventually have separate connect data for different types, which we
1337  * almost certainly will, then perhaps we stick a union in here. */
1338 struct obd_connect_data_v1 {
1339 	__u64 ocd_connect_flags; /* OBD_CONNECT_* per above */
1340 	__u32 ocd_version;	 /* lustre release version number */
1341 	__u32 ocd_grant;	 /* initial cache grant amount (bytes) */
1342 	__u32 ocd_index;	 /* LOV index to connect to */
1343 	__u32 ocd_brw_size;	 /* Maximum BRW size in bytes, must be 2^n */
1344 	__u64 ocd_ibits_known;   /* inode bits this client understands */
1345 	__u8  ocd_blocksize;     /* log2 of the backend filesystem blocksize */
1346 	__u8  ocd_inodespace;    /* log2 of the per-inode space consumption */
1347 	__u16 ocd_grant_extent;  /* per-extent grant overhead, in 1K blocks */
1348 	__u32 ocd_unused;	/* also fix lustre_swab_connect */
1349 	__u64 ocd_transno;       /* first transno from client to be replayed */
1350 	__u32 ocd_group;	 /* MDS group on OST */
1351 	__u32 ocd_cksum_types;   /* supported checksum algorithms */
1352 	__u32 ocd_max_easize;    /* How big LOV EA can be on MDS */
1353 	__u32 ocd_instance;      /* also fix lustre_swab_connect */
1354 	__u64 ocd_maxbytes;      /* Maximum stripe size in bytes */
1355 };
1356 
1357 struct obd_connect_data {
1358 	__u64 ocd_connect_flags; /* OBD_CONNECT_* per above */
1359 	__u32 ocd_version;	 /* lustre release version number */
1360 	__u32 ocd_grant;	 /* initial cache grant amount (bytes) */
1361 	__u32 ocd_index;	 /* LOV index to connect to */
1362 	__u32 ocd_brw_size;	 /* Maximum BRW size in bytes */
1363 	__u64 ocd_ibits_known;   /* inode bits this client understands */
1364 	__u8  ocd_blocksize;     /* log2 of the backend filesystem blocksize */
1365 	__u8  ocd_inodespace;    /* log2 of the per-inode space consumption */
1366 	__u16 ocd_grant_extent;  /* per-extent grant overhead, in 1K blocks */
1367 	__u32 ocd_unused;	/* also fix lustre_swab_connect */
1368 	__u64 ocd_transno;       /* first transno from client to be replayed */
1369 	__u32 ocd_group;	 /* MDS group on OST */
1370 	__u32 ocd_cksum_types;   /* supported checksum algorithms */
1371 	__u32 ocd_max_easize;    /* How big LOV EA can be on MDS */
1372 	__u32 ocd_instance;      /* instance # of this target */
1373 	__u64 ocd_maxbytes;      /* Maximum stripe size in bytes */
1374 	/* Fields after ocd_maxbytes are only accessible by the receiver
1375 	 * if the corresponding flag in ocd_connect_flags is set. Accessing
1376 	 * any field after ocd_maxbytes on the receiver without a valid flag
1377 	 * may result in out-of-bound memory access and kernel oops. */
1378 	__u64 padding1;	  /* added 2.1.0. also fix lustre_swab_connect */
1379 	__u64 padding2;	  /* added 2.1.0. also fix lustre_swab_connect */
1380 	__u64 padding3;	  /* added 2.1.0. also fix lustre_swab_connect */
1381 	__u64 padding4;	  /* added 2.1.0. also fix lustre_swab_connect */
1382 	__u64 padding5;	  /* added 2.1.0. also fix lustre_swab_connect */
1383 	__u64 padding6;	  /* added 2.1.0. also fix lustre_swab_connect */
1384 	__u64 padding7;	  /* added 2.1.0. also fix lustre_swab_connect */
1385 	__u64 padding8;	  /* added 2.1.0. also fix lustre_swab_connect */
1386 	__u64 padding9;	  /* added 2.1.0. also fix lustre_swab_connect */
1387 	__u64 paddingA;	  /* added 2.1.0. also fix lustre_swab_connect */
1388 	__u64 paddingB;	  /* added 2.1.0. also fix lustre_swab_connect */
1389 	__u64 paddingC;	  /* added 2.1.0. also fix lustre_swab_connect */
1390 	__u64 paddingD;	  /* added 2.1.0. also fix lustre_swab_connect */
1391 	__u64 paddingE;	  /* added 2.1.0. also fix lustre_swab_connect */
1392 	__u64 paddingF;	  /* added 2.1.0. also fix lustre_swab_connect */
1393 };
1394 
1395 /* XXX README XXX:
1396  * Please DO NOT use any fields here before first ensuring that this same
1397  * field is not in use on some other branch.  Please clear any such changes
1398  * with senior engineers before starting to use a new field.  Then, submit
1399  * a small patch against EVERY branch that ONLY adds the new field along with
1400  * the matching OBD_CONNECT flag, so that can be approved and landed easily to
1401  * reserve the flag for future use. */
1402 
1403 void lustre_swab_connect(struct obd_connect_data *ocd);
1404 
1405 /*
1406  * Supported checksum algorithms. Up to 32 checksum types are supported.
1407  * (32-bit mask stored in obd_connect_data::ocd_cksum_types)
1408  * Please update DECLARE_CKSUM_NAME/OBD_CKSUM_ALL in obd.h when adding a new
1409  * algorithm and also the OBD_FL_CKSUM* flags.
1410  */
1411 typedef enum {
1412 	OBD_CKSUM_CRC32  = 0x00000001,
1413 	OBD_CKSUM_ADLER  = 0x00000002,
1414 	OBD_CKSUM_CRC32C = 0x00000004,
1415 } cksum_type_t;
1416 
1417 /*
1418  *   OST requests: OBDO & OBD request records
1419  */
1420 
1421 /* opcodes */
1422 typedef enum {
1423 	OST_REPLY      =  0,       /* reply ? */
1424 	OST_GETATTR    =  1,
1425 	OST_SETATTR    =  2,
1426 	OST_READ       =  3,
1427 	OST_WRITE      =  4,
1428 	OST_CREATE     =  5,
1429 	OST_DESTROY    =  6,
1430 	OST_GET_INFO   =  7,
1431 	OST_CONNECT    =  8,
1432 	OST_DISCONNECT =  9,
1433 	OST_PUNCH      = 10,
1434 	OST_OPEN       = 11,
1435 	OST_CLOSE      = 12,
1436 	OST_STATFS     = 13,
1437 	OST_SYNC       = 16,
1438 	OST_SET_INFO   = 17,
1439 	OST_QUOTACHECK = 18,
1440 	OST_QUOTACTL   = 19,
1441 	OST_QUOTA_ADJUST_QUNIT = 20, /* not used since 2.4 */
1442 	OST_LAST_OPC
1443 } ost_cmd_t;
1444 #define OST_FIRST_OPC  OST_REPLY
1445 
1446 enum obdo_flags {
1447 	OBD_FL_INLINEDATA   = 0x00000001,
1448 	OBD_FL_OBDMDEXISTS  = 0x00000002,
1449 	OBD_FL_DELORPHAN    = 0x00000004, /* if set in o_flags delete orphans */
1450 	OBD_FL_NORPC	= 0x00000008, /* set in o_flags do in OSC not OST */
1451 	OBD_FL_IDONLY       = 0x00000010, /* set in o_flags only adjust obj id*/
1452 	OBD_FL_RECREATE_OBJS = 0x00000020, /* recreate missing obj */
1453 	OBD_FL_DEBUG_CHECK  = 0x00000040, /* echo client/server debug check */
1454 	OBD_FL_NO_USRQUOTA  = 0x00000100, /* the object's owner is over quota */
1455 	OBD_FL_NO_GRPQUOTA  = 0x00000200, /* the object's group is over quota */
1456 	OBD_FL_CREATE_CROW  = 0x00000400, /* object should be create on write */
1457 	OBD_FL_SRVLOCK      = 0x00000800, /* delegate DLM locking to server */
1458 	OBD_FL_CKSUM_CRC32  = 0x00001000, /* CRC32 checksum type */
1459 	OBD_FL_CKSUM_ADLER  = 0x00002000, /* ADLER checksum type */
1460 	OBD_FL_CKSUM_CRC32C = 0x00004000, /* CRC32C checksum type */
1461 	OBD_FL_CKSUM_RSVD2  = 0x00008000, /* for future cksum types */
1462 	OBD_FL_CKSUM_RSVD3  = 0x00010000, /* for future cksum types */
1463 	OBD_FL_SHRINK_GRANT = 0x00020000, /* object shrink the grant */
1464 	OBD_FL_MMAP	 = 0x00040000, /* object is mmapped on the client.
1465 					   * XXX: obsoleted - reserved for old
1466 					   * clients prior than 2.2 */
1467 	OBD_FL_RECOV_RESEND = 0x00080000, /* recoverable resent */
1468 	OBD_FL_NOSPC_BLK    = 0x00100000, /* no more block space on OST */
1469 
1470 	/* Note that while these checksum values are currently separate bits,
1471 	 * in 2.x we can actually allow all values from 1-31 if we wanted. */
1472 	OBD_FL_CKSUM_ALL    = OBD_FL_CKSUM_CRC32 | OBD_FL_CKSUM_ADLER |
1473 			      OBD_FL_CKSUM_CRC32C,
1474 
1475 	/* mask for local-only flag, which won't be sent over network */
1476 	OBD_FL_LOCAL_MASK   = 0xF0000000,
1477 };
1478 
1479 #define LOV_MAGIC_V1      0x0BD10BD0
1480 #define LOV_MAGIC	 LOV_MAGIC_V1
1481 #define LOV_MAGIC_JOIN_V1 0x0BD20BD0
1482 #define LOV_MAGIC_V3      0x0BD30BD0
1483 
1484 /*
1485  * magic for fully defined striping
1486  * the idea is that we should have different magics for striping "hints"
1487  * (struct lov_user_md_v[13]) and defined ready-to-use striping (struct
1488  * lov_mds_md_v[13]). at the moment the magics are used in wire protocol,
1489  * we can't just change it w/o long way preparation, but we still need a
1490  * mechanism to allow LOD to differentiate hint versus ready striping.
1491  * so, at the moment we do a trick: MDT knows what to expect from request
1492  * depending on the case (replay uses ready striping, non-replay req uses
1493  * hints), so MDT replaces magic with appropriate one and now LOD can
1494  * easily understand what's inside -bzzz
1495  */
1496 #define LOV_MAGIC_V1_DEF  0x0CD10BD0
1497 #define LOV_MAGIC_V3_DEF  0x0CD30BD0
1498 
1499 #define LOV_PATTERN_RAID0	0x001   /* stripes are used round-robin */
1500 #define LOV_PATTERN_RAID1	0x002   /* stripes are mirrors of each other */
1501 #define LOV_PATTERN_FIRST	0x100   /* first stripe is not in round-robin */
1502 #define LOV_PATTERN_CMOBD	0x200
1503 
1504 #define LOV_PATTERN_F_MASK	0xffff0000
1505 #define LOV_PATTERN_F_RELEASED	0x80000000 /* HSM released file */
1506 
1507 #define lov_pattern(pattern)		(pattern & ~LOV_PATTERN_F_MASK)
1508 #define lov_pattern_flags(pattern)	(pattern & LOV_PATTERN_F_MASK)
1509 
1510 #define lov_ost_data lov_ost_data_v1
1511 struct lov_ost_data_v1 {	  /* per-stripe data structure (little-endian)*/
1512 	struct ost_id l_ost_oi;	  /* OST object ID */
1513 	__u32 l_ost_gen;	  /* generation of this l_ost_idx */
1514 	__u32 l_ost_idx;	  /* OST index in LOV (lov_tgt_desc->tgts) */
1515 };
1516 
1517 #define lov_mds_md lov_mds_md_v1
1518 struct lov_mds_md_v1 {	    /* LOV EA mds/wire data (little-endian) */
1519 	__u32 lmm_magic;	  /* magic number = LOV_MAGIC_V1 */
1520 	__u32 lmm_pattern;	/* LOV_PATTERN_RAID0, LOV_PATTERN_RAID1 */
1521 	struct ost_id	lmm_oi;	  /* LOV object ID */
1522 	__u32 lmm_stripe_size;    /* size of stripe in bytes */
1523 	/* lmm_stripe_count used to be __u32 */
1524 	__u16 lmm_stripe_count;   /* num stripes in use for this object */
1525 	__u16 lmm_layout_gen;     /* layout generation number */
1526 	struct lov_ost_data_v1 lmm_objects[0]; /* per-stripe data */
1527 };
1528 
1529 /**
1530  * Sigh, because pre-2.4 uses
1531  * struct lov_mds_md_v1 {
1532  *	........
1533  *	__u64 lmm_object_id;
1534  *	__u64 lmm_object_seq;
1535  *      ......
1536  *      }
1537  * to identify the LOV(MDT) object, and lmm_object_seq will
1538  * be normal_fid, which make it hard to combine these conversion
1539  * to ostid_to FID. so we will do lmm_oi/fid conversion separately
1540  *
1541  * We can tell the lmm_oi by this way,
1542  * 1.8: lmm_object_id = {inode}, lmm_object_gr = 0
1543  * 2.1: lmm_object_id = {oid < 128k}, lmm_object_seq = FID_SEQ_NORMAL
1544  * 2.4: lmm_oi.f_seq = FID_SEQ_NORMAL, lmm_oi.f_oid = {oid < 128k},
1545  *      lmm_oi.f_ver = 0
1546  *
1547  * But currently lmm_oi/lsm_oi does not have any "real" usages,
1548  * except for printing some information, and the user can always
1549  * get the real FID from LMA, besides this multiple case check might
1550  * make swab more complicate. So we will keep using id/seq for lmm_oi.
1551  */
1552 
fid_to_lmm_oi(const struct lu_fid * fid,struct ost_id * oi)1553 static inline void fid_to_lmm_oi(const struct lu_fid *fid,
1554 				 struct ost_id *oi)
1555 {
1556 	oi->oi.oi_id = fid_oid(fid);
1557 	oi->oi.oi_seq = fid_seq(fid);
1558 }
1559 
lmm_oi_set_seq(struct ost_id * oi,__u64 seq)1560 static inline void lmm_oi_set_seq(struct ost_id *oi, __u64 seq)
1561 {
1562 	oi->oi.oi_seq = seq;
1563 }
1564 
lmm_oi_id(struct ost_id * oi)1565 static inline __u64 lmm_oi_id(struct ost_id *oi)
1566 {
1567 	return oi->oi.oi_id;
1568 }
1569 
lmm_oi_seq(struct ost_id * oi)1570 static inline __u64 lmm_oi_seq(struct ost_id *oi)
1571 {
1572 	return oi->oi.oi_seq;
1573 }
1574 
lmm_oi_le_to_cpu(struct ost_id * dst_oi,struct ost_id * src_oi)1575 static inline void lmm_oi_le_to_cpu(struct ost_id *dst_oi,
1576 				    struct ost_id *src_oi)
1577 {
1578 	dst_oi->oi.oi_id = le64_to_cpu(src_oi->oi.oi_id);
1579 	dst_oi->oi.oi_seq = le64_to_cpu(src_oi->oi.oi_seq);
1580 }
1581 
lmm_oi_cpu_to_le(struct ost_id * dst_oi,struct ost_id * src_oi)1582 static inline void lmm_oi_cpu_to_le(struct ost_id *dst_oi,
1583 				    struct ost_id *src_oi)
1584 {
1585 	dst_oi->oi.oi_id = cpu_to_le64(src_oi->oi.oi_id);
1586 	dst_oi->oi.oi_seq = cpu_to_le64(src_oi->oi.oi_seq);
1587 }
1588 
1589 /* extern void lustre_swab_lov_mds_md(struct lov_mds_md *llm); */
1590 
1591 #define MAX_MD_SIZE							\
1592 	(sizeof(struct lov_mds_md) + 4 * sizeof(struct lov_ost_data))
1593 #define MIN_MD_SIZE							\
1594 	(sizeof(struct lov_mds_md) + 1 * sizeof(struct lov_ost_data))
1595 
1596 #define XATTR_NAME_ACL_ACCESS   "system.posix_acl_access"
1597 #define XATTR_NAME_ACL_DEFAULT  "system.posix_acl_default"
1598 #define XATTR_USER_PREFIX       "user."
1599 #define XATTR_TRUSTED_PREFIX    "trusted."
1600 #define XATTR_SECURITY_PREFIX   "security."
1601 #define XATTR_LUSTRE_PREFIX     "lustre."
1602 
1603 #define XATTR_NAME_LOV	  "trusted.lov"
1604 #define XATTR_NAME_LMA	  "trusted.lma"
1605 #define XATTR_NAME_LMV	  "trusted.lmv"
1606 #define XATTR_NAME_LINK	 "trusted.link"
1607 #define XATTR_NAME_FID	  "trusted.fid"
1608 #define XATTR_NAME_VERSION      "trusted.version"
1609 #define XATTR_NAME_SOM		"trusted.som"
1610 #define XATTR_NAME_HSM		"trusted.hsm"
1611 #define XATTR_NAME_LFSCK_NAMESPACE "trusted.lfsck_namespace"
1612 
1613 struct lov_mds_md_v3 {	    /* LOV EA mds/wire data (little-endian) */
1614 	__u32 lmm_magic;	  /* magic number = LOV_MAGIC_V3 */
1615 	__u32 lmm_pattern;	/* LOV_PATTERN_RAID0, LOV_PATTERN_RAID1 */
1616 	struct ost_id	lmm_oi;	  /* LOV object ID */
1617 	__u32 lmm_stripe_size;    /* size of stripe in bytes */
1618 	/* lmm_stripe_count used to be __u32 */
1619 	__u16 lmm_stripe_count;   /* num stripes in use for this object */
1620 	__u16 lmm_layout_gen;     /* layout generation number */
1621 	char  lmm_pool_name[LOV_MAXPOOLNAME]; /* must be 32bit aligned */
1622 	struct lov_ost_data_v1 lmm_objects[0]; /* per-stripe data */
1623 };
1624 
lov_mds_md_size(__u16 stripes,__u32 lmm_magic)1625 static inline __u32 lov_mds_md_size(__u16 stripes, __u32 lmm_magic)
1626 {
1627 	if (lmm_magic == LOV_MAGIC_V3)
1628 		return sizeof(struct lov_mds_md_v3) +
1629 				stripes * sizeof(struct lov_ost_data_v1);
1630 	else
1631 		return sizeof(struct lov_mds_md_v1) +
1632 				stripes * sizeof(struct lov_ost_data_v1);
1633 }
1634 
1635 static inline __u32
lov_mds_md_max_stripe_count(size_t buf_size,__u32 lmm_magic)1636 lov_mds_md_max_stripe_count(size_t buf_size, __u32 lmm_magic)
1637 {
1638 	switch (lmm_magic) {
1639 	case LOV_MAGIC_V1: {
1640 		struct lov_mds_md_v1 lmm;
1641 
1642 		if (buf_size < sizeof(lmm))
1643 			return 0;
1644 
1645 		return (buf_size - sizeof(lmm)) / sizeof(lmm.lmm_objects[0]);
1646 	}
1647 	case LOV_MAGIC_V3: {
1648 		struct lov_mds_md_v3 lmm;
1649 
1650 		if (buf_size < sizeof(lmm))
1651 			return 0;
1652 
1653 		return (buf_size - sizeof(lmm)) / sizeof(lmm.lmm_objects[0]);
1654 	}
1655 	default:
1656 		return 0;
1657 	}
1658 }
1659 
1660 #define OBD_MD_FLID	(0x00000001ULL) /* object ID */
1661 #define OBD_MD_FLATIME     (0x00000002ULL) /* access time */
1662 #define OBD_MD_FLMTIME     (0x00000004ULL) /* data modification time */
1663 #define OBD_MD_FLCTIME     (0x00000008ULL) /* change time */
1664 #define OBD_MD_FLSIZE      (0x00000010ULL) /* size */
1665 #define OBD_MD_FLBLOCKS    (0x00000020ULL) /* allocated blocks count */
1666 #define OBD_MD_FLBLKSZ     (0x00000040ULL) /* block size */
1667 #define OBD_MD_FLMODE      (0x00000080ULL) /* access bits (mode & ~S_IFMT) */
1668 #define OBD_MD_FLTYPE      (0x00000100ULL) /* object type (mode & S_IFMT) */
1669 #define OBD_MD_FLUID       (0x00000200ULL) /* user ID */
1670 #define OBD_MD_FLGID       (0x00000400ULL) /* group ID */
1671 #define OBD_MD_FLFLAGS     (0x00000800ULL) /* flags word */
1672 #define OBD_MD_FLNLINK     (0x00002000ULL) /* link count */
1673 #define OBD_MD_FLGENER     (0x00004000ULL) /* generation number */
1674 /*#define OBD_MD_FLINLINE    (0x00008000ULL)  inline data. used until 1.6.5 */
1675 #define OBD_MD_FLRDEV      (0x00010000ULL) /* device number */
1676 #define OBD_MD_FLEASIZE    (0x00020000ULL) /* extended attribute data */
1677 #define OBD_MD_LINKNAME    (0x00040000ULL) /* symbolic link target */
1678 #define OBD_MD_FLHANDLE    (0x00080000ULL) /* file/lock handle */
1679 #define OBD_MD_FLCKSUM     (0x00100000ULL) /* bulk data checksum */
1680 #define OBD_MD_FLQOS       (0x00200000ULL) /* quality of service stats */
1681 /*#define OBD_MD_FLOSCOPQ    (0x00400000ULL) osc opaque data, never used */
1682 #define OBD_MD_FLCOOKIE    (0x00800000ULL) /* log cancellation cookie */
1683 #define OBD_MD_FLGROUP     (0x01000000ULL) /* group */
1684 #define OBD_MD_FLFID       (0x02000000ULL) /* ->ost write inline fid */
1685 #define OBD_MD_FLEPOCH     (0x04000000ULL) /* ->ost write with ioepoch */
1686 					   /* ->mds if epoch opens or closes */
1687 #define OBD_MD_FLGRANT     (0x08000000ULL) /* ost preallocation space grant */
1688 #define OBD_MD_FLDIREA     (0x10000000ULL) /* dir's extended attribute data */
1689 #define OBD_MD_FLUSRQUOTA  (0x20000000ULL) /* over quota flags sent from ost */
1690 #define OBD_MD_FLGRPQUOTA  (0x40000000ULL) /* over quota flags sent from ost */
1691 #define OBD_MD_FLMODEASIZE (0x80000000ULL) /* EA size will be changed */
1692 
1693 #define OBD_MD_MDS	 (0x0000000100000000ULL) /* where an inode lives on */
1694 #define OBD_MD_REINT       (0x0000000200000000ULL) /* reintegrate oa */
1695 #define OBD_MD_MEA	 (0x0000000400000000ULL) /* CMD split EA  */
1696 #define OBD_MD_TSTATE      (0x0000000800000000ULL) /* transient state field */
1697 
1698 #define OBD_MD_FLXATTR       (0x0000001000000000ULL) /* xattr */
1699 #define OBD_MD_FLXATTRLS     (0x0000002000000000ULL) /* xattr list */
1700 #define OBD_MD_FLXATTRRM     (0x0000004000000000ULL) /* xattr remove */
1701 #define OBD_MD_FLACL	 (0x0000008000000000ULL) /* ACL */
1702 #define OBD_MD_FLRMTPERM     (0x0000010000000000ULL) /* remote permission */
1703 #define OBD_MD_FLMDSCAPA     (0x0000020000000000ULL) /* MDS capability */
1704 #define OBD_MD_FLOSSCAPA     (0x0000040000000000ULL) /* OSS capability */
1705 #define OBD_MD_FLCKSPLIT     (0x0000080000000000ULL) /* Check split on server */
1706 #define OBD_MD_FLCROSSREF    (0x0000100000000000ULL) /* Cross-ref case */
1707 #define OBD_MD_FLGETATTRLOCK (0x0000200000000000ULL) /* Get IOEpoch attributes
1708 						      * under lock; for xattr
1709 						      * requests means the
1710 						      * client holds the lock */
1711 #define OBD_MD_FLOBJCOUNT    (0x0000400000000000ULL) /* for multiple destroy */
1712 
1713 #define OBD_MD_FLRMTLSETFACL (0x0001000000000000ULL) /* lfs lsetfacl case */
1714 #define OBD_MD_FLRMTLGETFACL (0x0002000000000000ULL) /* lfs lgetfacl case */
1715 #define OBD_MD_FLRMTRSETFACL (0x0004000000000000ULL) /* lfs rsetfacl case */
1716 #define OBD_MD_FLRMTRGETFACL (0x0008000000000000ULL) /* lfs rgetfacl case */
1717 
1718 #define OBD_MD_FLDATAVERSION (0x0010000000000000ULL) /* iversion sum */
1719 #define OBD_MD_FLRELEASED    (0x0020000000000000ULL) /* file released */
1720 
1721 #define OBD_MD_FLGETATTR (OBD_MD_FLID    | OBD_MD_FLATIME | OBD_MD_FLMTIME | \
1722 			  OBD_MD_FLCTIME | OBD_MD_FLSIZE  | OBD_MD_FLBLKSZ | \
1723 			  OBD_MD_FLMODE  | OBD_MD_FLTYPE  | OBD_MD_FLUID   | \
1724 			  OBD_MD_FLGID   | OBD_MD_FLFLAGS | OBD_MD_FLNLINK | \
1725 			  OBD_MD_FLGENER | OBD_MD_FLRDEV  | OBD_MD_FLGROUP)
1726 
1727 #define OBD_MD_FLXATTRALL (OBD_MD_FLXATTR | OBD_MD_FLXATTRLS)
1728 
1729 /* don't forget obdo_fid which is way down at the bottom so it can
1730  * come after the definition of llog_cookie */
1731 
1732 enum hss_valid {
1733 	HSS_SETMASK	= 0x01,
1734 	HSS_CLEARMASK	= 0x02,
1735 	HSS_ARCHIVE_ID	= 0x04,
1736 };
1737 
1738 struct hsm_state_set {
1739 	__u32	hss_valid;
1740 	__u32	hss_archive_id;
1741 	__u64	hss_setmask;
1742 	__u64	hss_clearmask;
1743 };
1744 
1745 void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
1746 void lustre_swab_hsm_state_set(struct hsm_state_set *hss);
1747 
1748 void lustre_swab_obd_statfs(struct obd_statfs *os);
1749 
1750 /* ost_body.data values for OST_BRW */
1751 
1752 #define OBD_BRW_READ	    0x01
1753 #define OBD_BRW_WRITE	   0x02
1754 #define OBD_BRW_RWMASK	  (OBD_BRW_READ | OBD_BRW_WRITE)
1755 #define OBD_BRW_SYNC	    0x08 /* this page is a part of synchronous
1756 				      * transfer and is not accounted in
1757 				      * the grant. */
1758 #define OBD_BRW_CHECK	   0x10
1759 #define OBD_BRW_FROM_GRANT      0x20 /* the osc manages this under llite */
1760 #define OBD_BRW_GRANTED	 0x40 /* the ost manages this */
1761 #define OBD_BRW_NOCACHE	 0x80 /* this page is a part of non-cached IO */
1762 #define OBD_BRW_NOQUOTA	0x100
1763 #define OBD_BRW_SRVLOCK	0x200 /* Client holds no lock over this page */
1764 #define OBD_BRW_ASYNC	  0x400 /* Server may delay commit to disk */
1765 #define OBD_BRW_MEMALLOC       0x800 /* Client runs in the "kswapd" context */
1766 #define OBD_BRW_OVER_USRQUOTA 0x1000 /* Running out of user quota */
1767 #define OBD_BRW_OVER_GRPQUOTA 0x2000 /* Running out of group quota */
1768 
1769 #define OBD_OBJECT_EOF 0xffffffffffffffffULL
1770 
1771 #define OST_MIN_PRECREATE 32
1772 #define OST_MAX_PRECREATE 20000
1773 
1774 struct obd_ioobj {
1775 	struct ost_id	ioo_oid;	/* object ID, if multi-obj BRW */
1776 	__u32		ioo_max_brw;	/* low 16 bits were o_mode before 2.4,
1777 					 * now (PTLRPC_BULK_OPS_COUNT - 1) in
1778 					 * high 16 bits in 2.4 and later */
1779 	__u32		ioo_bufcnt;	/* number of niobufs for this object */
1780 };
1781 
1782 #define IOOBJ_MAX_BRW_BITS	16
1783 #define IOOBJ_TYPE_MASK		((1U << IOOBJ_MAX_BRW_BITS) - 1)
1784 #define ioobj_max_brw_get(ioo)	(((ioo)->ioo_max_brw >> IOOBJ_MAX_BRW_BITS) + 1)
1785 #define ioobj_max_brw_set(ioo, num)					\
1786 do { (ioo)->ioo_max_brw = ((num) - 1) << IOOBJ_MAX_BRW_BITS; } while (0)
1787 
1788 void lustre_swab_obd_ioobj(struct obd_ioobj *ioo);
1789 
1790 /* multiple of 8 bytes => can array */
1791 struct niobuf_remote {
1792 	__u64 offset;
1793 	__u32 len;
1794 	__u32 flags;
1795 };
1796 
1797 void lustre_swab_niobuf_remote(struct niobuf_remote *nbr);
1798 
1799 /* lock value block communicated between the filter and llite */
1800 
1801 /* OST_LVB_ERR_INIT is needed because the return code in rc is
1802  * negative, i.e. because ((MASK + rc) & MASK) != MASK. */
1803 #define OST_LVB_ERR_INIT 0xffbadbad80000000ULL
1804 #define OST_LVB_ERR_MASK 0xffbadbad00000000ULL
1805 #define OST_LVB_IS_ERR(blocks)					  \
1806 	((blocks & OST_LVB_ERR_MASK) == OST_LVB_ERR_MASK)
1807 #define OST_LVB_SET_ERR(blocks, rc)				     \
1808 	do { blocks = OST_LVB_ERR_INIT + rc; } while (0)
1809 #define OST_LVB_GET_ERR(blocks)    (int)(blocks - OST_LVB_ERR_INIT)
1810 
1811 struct ost_lvb_v1 {
1812 	__u64		lvb_size;
1813 	__s64		lvb_mtime;
1814 	__s64		lvb_atime;
1815 	__s64		lvb_ctime;
1816 	__u64		lvb_blocks;
1817 };
1818 
1819 void lustre_swab_ost_lvb_v1(struct ost_lvb_v1 *lvb);
1820 
1821 struct ost_lvb {
1822 	__u64		lvb_size;
1823 	__s64		lvb_mtime;
1824 	__s64		lvb_atime;
1825 	__s64		lvb_ctime;
1826 	__u64		lvb_blocks;
1827 	__u32		lvb_mtime_ns;
1828 	__u32		lvb_atime_ns;
1829 	__u32		lvb_ctime_ns;
1830 	__u32		lvb_padding;
1831 };
1832 
1833 void lustre_swab_ost_lvb(struct ost_lvb *lvb);
1834 
1835 /*
1836  *   lquota data structures
1837  */
1838 
1839 #ifndef QUOTABLOCK_BITS
1840 #define QUOTABLOCK_BITS 10
1841 #endif
1842 
1843 #ifndef QUOTABLOCK_SIZE
1844 #define QUOTABLOCK_SIZE (1 << QUOTABLOCK_BITS)
1845 #endif
1846 
1847 #ifndef toqb
1848 #define toqb(x) (((x) + QUOTABLOCK_SIZE - 1) >> QUOTABLOCK_BITS)
1849 #endif
1850 
1851 /* The lquota_id structure is an union of all the possible identifier types that
1852  * can be used with quota, this includes:
1853  * - 64-bit user ID
1854  * - 64-bit group ID
1855  * - a FID which can be used for per-directory quota in the future */
1856 union lquota_id {
1857 	struct lu_fid	qid_fid; /* FID for per-directory quota */
1858 	__u64		qid_uid; /* user identifier */
1859 	__u64		qid_gid; /* group identifier */
1860 };
1861 
1862 /* quotactl management */
1863 struct obd_quotactl {
1864 	__u32			qc_cmd;
1865 	__u32			qc_type; /* see Q_* flag below */
1866 	__u32			qc_id;
1867 	__u32			qc_stat;
1868 	struct obd_dqinfo	qc_dqinfo;
1869 	struct obd_dqblk	qc_dqblk;
1870 };
1871 
1872 void lustre_swab_obd_quotactl(struct obd_quotactl *q);
1873 
1874 #define Q_QUOTACHECK	0x800100 /* deprecated as of 2.4 */
1875 #define Q_INITQUOTA	0x800101 /* deprecated as of 2.4  */
1876 #define Q_GETOINFO	0x800102 /* get obd quota info */
1877 #define Q_GETOQUOTA	0x800103 /* get obd quotas */
1878 #define Q_FINVALIDATE	0x800104 /* deprecated as of 2.4 */
1879 
1880 #define Q_COPY(out, in, member) (out)->member = (in)->member
1881 
1882 #define QCTL_COPY(out, in)		\
1883 do {					\
1884 	Q_COPY(out, in, qc_cmd);	\
1885 	Q_COPY(out, in, qc_type);	\
1886 	Q_COPY(out, in, qc_id);		\
1887 	Q_COPY(out, in, qc_stat);	\
1888 	Q_COPY(out, in, qc_dqinfo);	\
1889 	Q_COPY(out, in, qc_dqblk);	\
1890 } while (0)
1891 
1892 /* Body of quota request used for quota acquire/release RPCs between quota
1893  * master (aka QMT) and slaves (ak QSD). */
1894 struct quota_body {
1895 	struct lu_fid	qb_fid;     /* FID of global index packing the pool ID
1896 				      * and type (data or metadata) as well as
1897 				      * the quota type (user or group). */
1898 	union lquota_id	qb_id;      /* uid or gid or directory FID */
1899 	__u32		qb_flags;   /* see below */
1900 	__u32		qb_padding;
1901 	__u64		qb_count;   /* acquire/release count (kbytes/inodes) */
1902 	__u64		qb_usage;   /* current slave usage (kbytes/inodes) */
1903 	__u64		qb_slv_ver; /* slave index file version */
1904 	struct lustre_handle	qb_lockh;     /* per-ID lock handle */
1905 	struct lustre_handle	qb_glb_lockh; /* global lock handle */
1906 	__u64		qb_padding1[4];
1907 };
1908 
1909 /* When the quota_body is used in the reply of quota global intent
1910  * lock (IT_QUOTA_CONN) reply, qb_fid contains slave index file FID. */
1911 #define qb_slv_fid	qb_fid
1912 /* qb_usage is the current qunit (in kbytes/inodes) when quota_body is used in
1913  * quota reply */
1914 #define qb_qunit	qb_usage
1915 
1916 #define QUOTA_DQACQ_FL_ACQ	0x1  /* acquire quota */
1917 #define QUOTA_DQACQ_FL_PREACQ	0x2  /* pre-acquire */
1918 #define QUOTA_DQACQ_FL_REL	0x4  /* release quota */
1919 #define QUOTA_DQACQ_FL_REPORT	0x8  /* report usage */
1920 
1921 void lustre_swab_quota_body(struct quota_body *b);
1922 
1923 /* Quota types currently supported */
1924 enum {
1925 	LQUOTA_TYPE_USR	= 0x00, /* maps to USRQUOTA */
1926 	LQUOTA_TYPE_GRP	= 0x01, /* maps to GRPQUOTA */
1927 	LQUOTA_TYPE_MAX
1928 };
1929 
1930 /* There are 2 different resource types on which a quota limit can be enforced:
1931  * - inodes on the MDTs
1932  * - blocks on the OSTs */
1933 enum {
1934 	LQUOTA_RES_MD		= 0x01, /* skip 0 to avoid null oid in FID */
1935 	LQUOTA_RES_DT		= 0x02,
1936 	LQUOTA_LAST_RES,
1937 	LQUOTA_FIRST_RES	= LQUOTA_RES_MD
1938 };
1939 
1940 #define LQUOTA_NR_RES (LQUOTA_LAST_RES - LQUOTA_FIRST_RES + 1)
1941 
1942 /*
1943  * Space accounting support
1944  * Format of an accounting record, providing disk usage information for a given
1945  * user or group
1946  */
1947 struct lquota_acct_rec { /* 16 bytes */
1948 	__u64 bspace;  /* current space in use */
1949 	__u64 ispace;  /* current # inodes in use */
1950 };
1951 
1952 /*
1953  * Global quota index support
1954  * Format of a global record, providing global quota settings for a given quota
1955  * identifier
1956  */
1957 struct lquota_glb_rec { /* 32 bytes */
1958 	__u64 qbr_hardlimit; /* quota hard limit, in #inodes or kbytes */
1959 	__u64 qbr_softlimit; /* quota soft limit, in #inodes or kbytes */
1960 	__u64 qbr_time;      /* grace time, in seconds */
1961 	__u64 qbr_granted;   /* how much is granted to slaves, in #inodes or
1962 			      * kbytes */
1963 };
1964 
1965 /*
1966  * Slave index support
1967  * Format of a slave record, recording how much space is granted to a given
1968  * slave
1969  */
1970 struct lquota_slv_rec { /* 8 bytes */
1971 	__u64 qsr_granted; /* space granted to the slave for the key=ID,
1972 			    * in #inodes or kbytes */
1973 };
1974 
1975 /* Data structures associated with the quota locks */
1976 
1977 /* Glimpse descriptor used for the index & per-ID quota locks */
1978 struct ldlm_gl_lquota_desc {
1979 	union lquota_id	gl_id;    /* quota ID subject to the glimpse */
1980 	__u64		gl_flags; /* see LQUOTA_FL* below */
1981 	__u64		gl_ver;   /* new index version */
1982 	__u64		gl_hardlimit; /* new hardlimit or qunit value */
1983 	__u64		gl_softlimit; /* new softlimit */
1984 	__u64		gl_time;
1985 	__u64		gl_pad2;
1986 };
1987 
1988 #define gl_qunit	gl_hardlimit /* current qunit value used when
1989 				      * glimpsing per-ID quota locks */
1990 
1991 /* quota glimpse flags */
1992 #define LQUOTA_FL_EDQUOT 0x1 /* user/group out of quota space on QMT */
1993 
1994 /* LVB used with quota (global and per-ID) locks */
1995 struct lquota_lvb {
1996 	__u64	lvb_flags;	/* see LQUOTA_FL* above */
1997 	__u64	lvb_id_may_rel; /* space that might be released later */
1998 	__u64	lvb_id_rel;     /* space released by the slave for this ID */
1999 	__u64	lvb_id_qunit;   /* current qunit value */
2000 	__u64	lvb_pad1;
2001 };
2002 
2003 void lustre_swab_lquota_lvb(struct lquota_lvb *lvb);
2004 
2005 /* LVB used with global quota lock */
2006 #define lvb_glb_ver  lvb_id_may_rel /* current version of the global index */
2007 
2008 /* op codes */
2009 typedef enum {
2010 	QUOTA_DQACQ	= 601,
2011 	QUOTA_DQREL	= 602,
2012 	QUOTA_LAST_OPC
2013 } quota_cmd_t;
2014 #define QUOTA_FIRST_OPC	QUOTA_DQACQ
2015 
2016 /*
2017  *   MDS REQ RECORDS
2018  */
2019 
2020 /* opcodes */
2021 typedef enum {
2022 	MDS_GETATTR		= 33,
2023 	MDS_GETATTR_NAME	= 34,
2024 	MDS_CLOSE		= 35,
2025 	MDS_REINT		= 36,
2026 	MDS_READPAGE		= 37,
2027 	MDS_CONNECT		= 38,
2028 	MDS_DISCONNECT		= 39,
2029 	MDS_GETSTATUS		= 40,
2030 	MDS_STATFS		= 41,
2031 	MDS_PIN			= 42,
2032 	MDS_UNPIN		= 43,
2033 	MDS_SYNC		= 44,
2034 	MDS_DONE_WRITING	= 45,
2035 	MDS_SET_INFO		= 46,
2036 	MDS_QUOTACHECK		= 47,
2037 	MDS_QUOTACTL		= 48,
2038 	MDS_GETXATTR		= 49,
2039 	MDS_SETXATTR		= 50, /* obsolete, now it's MDS_REINT op */
2040 	MDS_WRITEPAGE		= 51,
2041 	MDS_IS_SUBDIR		= 52,
2042 	MDS_GET_INFO		= 53,
2043 	MDS_HSM_STATE_GET	= 54,
2044 	MDS_HSM_STATE_SET	= 55,
2045 	MDS_HSM_ACTION		= 56,
2046 	MDS_HSM_PROGRESS	= 57,
2047 	MDS_HSM_REQUEST		= 58,
2048 	MDS_HSM_CT_REGISTER	= 59,
2049 	MDS_HSM_CT_UNREGISTER	= 60,
2050 	MDS_SWAP_LAYOUTS	= 61,
2051 	MDS_LAST_OPC
2052 } mds_cmd_t;
2053 
2054 #define MDS_FIRST_OPC    MDS_GETATTR
2055 
2056 /* opcodes for object update */
2057 typedef enum {
2058 	UPDATE_OBJ	= 1000,
2059 	UPDATE_LAST_OPC
2060 } update_cmd_t;
2061 
2062 #define UPDATE_FIRST_OPC    UPDATE_OBJ
2063 
2064 /*
2065  * Do not exceed 63
2066  */
2067 
2068 typedef enum {
2069 	REINT_SETATTR  = 1,
2070 	REINT_CREATE   = 2,
2071 	REINT_LINK     = 3,
2072 	REINT_UNLINK   = 4,
2073 	REINT_RENAME   = 5,
2074 	REINT_OPEN     = 6,
2075 	REINT_SETXATTR = 7,
2076 	REINT_RMENTRY  = 8,
2077 //      REINT_WRITE    = 9,
2078 	REINT_MAX
2079 } mds_reint_t, mdt_reint_t;
2080 
2081 void lustre_swab_generic_32s(__u32 *val);
2082 
2083 /* the disposition of the intent outlines what was executed */
2084 #define DISP_IT_EXECD	0x00000001
2085 #define DISP_LOOKUP_EXECD    0x00000002
2086 #define DISP_LOOKUP_NEG      0x00000004
2087 #define DISP_LOOKUP_POS      0x00000008
2088 #define DISP_OPEN_CREATE     0x00000010
2089 #define DISP_OPEN_OPEN       0x00000020
2090 #define DISP_ENQ_COMPLETE    0x00400000		/* obsolete and unused */
2091 #define DISP_ENQ_OPEN_REF    0x00800000
2092 #define DISP_ENQ_CREATE_REF  0x01000000
2093 #define DISP_OPEN_LOCK       0x02000000
2094 #define DISP_OPEN_LEASE      0x04000000
2095 #define DISP_OPEN_STRIPE     0x08000000
2096 
2097 /* INODE LOCK PARTS */
2098 #define MDS_INODELOCK_LOOKUP 0x000001	/* For namespace, dentry etc, and also
2099 					 * was used to protect permission (mode,
2100 					 * owner, group etc) before 2.4. */
2101 #define MDS_INODELOCK_UPDATE 0x000002	/* size, links, timestamps */
2102 #define MDS_INODELOCK_OPEN   0x000004	/* For opened files */
2103 #define MDS_INODELOCK_LAYOUT 0x000008	/* for layout */
2104 
2105 /* The PERM bit is added int 2.4, and it is used to protect permission(mode,
2106  * owner, group, acl etc), so to separate the permission from LOOKUP lock.
2107  * Because for remote directories(in DNE), these locks will be granted by
2108  * different MDTs(different ldlm namespace).
2109  *
2110  * For local directory, MDT will always grant UPDATE_LOCK|PERM_LOCK together.
2111  * For Remote directory, the master MDT, where the remote directory is, will
2112  * grant UPDATE_LOCK|PERM_LOCK, and the remote MDT, where the name entry is,
2113  * will grant LOOKUP_LOCK. */
2114 #define MDS_INODELOCK_PERM   0x000010
2115 #define MDS_INODELOCK_XATTR  0x000020	/* extended attributes */
2116 
2117 #define MDS_INODELOCK_MAXSHIFT 5
2118 /* This FULL lock is useful to take on unlink sort of operations */
2119 #define MDS_INODELOCK_FULL ((1<<(MDS_INODELOCK_MAXSHIFT+1))-1)
2120 
2121 /* NOTE: until Lustre 1.8.7/2.1.1 the fid_ver() was packed into name[2],
2122  * but was moved into name[1] along with the OID to avoid consuming the
2123  * name[2,3] fields that need to be used for the quota id (also a FID). */
2124 enum {
2125 	LUSTRE_RES_ID_SEQ_OFF = 0,
2126 	LUSTRE_RES_ID_VER_OID_OFF = 1,
2127 	LUSTRE_RES_ID_WAS_VER_OFF = 2, /* see note above */
2128 	LUSTRE_RES_ID_QUOTA_SEQ_OFF = 2,
2129 	LUSTRE_RES_ID_QUOTA_VER_OID_OFF = 3,
2130 	LUSTRE_RES_ID_HSH_OFF = 3
2131 };
2132 
2133 #define MDS_STATUS_CONN 1
2134 #define MDS_STATUS_LOV 2
2135 
2136 /* mdt_thread_info.mti_flags. */
2137 enum md_op_flags {
2138 	/* The flag indicates Size-on-MDS attributes are changed. */
2139 	MF_SOM_CHANGE	   = (1 << 0),
2140 	/* Flags indicates an epoch opens or closes. */
2141 	MF_EPOCH_OPEN	   = (1 << 1),
2142 	MF_EPOCH_CLOSE	  = (1 << 2),
2143 	MF_MDC_CANCEL_FID1      = (1 << 3),
2144 	MF_MDC_CANCEL_FID2      = (1 << 4),
2145 	MF_MDC_CANCEL_FID3      = (1 << 5),
2146 	MF_MDC_CANCEL_FID4      = (1 << 6),
2147 	/* There is a pending attribute update. */
2148 	MF_SOM_AU	       = (1 << 7),
2149 	/* Cancel OST locks while getattr OST attributes. */
2150 	MF_GETATTR_LOCK	 = (1 << 8),
2151 	MF_GET_MDT_IDX	  = (1 << 9),
2152 };
2153 
2154 #define MF_SOM_LOCAL_FLAGS (MF_SOM_CHANGE | MF_EPOCH_OPEN | MF_EPOCH_CLOSE)
2155 
2156 #define LUSTRE_BFLAG_UNCOMMITTED_WRITES   0x1
2157 
2158 /* these should be identical to their EXT4_*_FL counterparts, they are
2159  * redefined here only to avoid dragging in fs/ext4/ext4.h */
2160 #define LUSTRE_SYNC_FL	 0x00000008 /* Synchronous updates */
2161 #define LUSTRE_IMMUTABLE_FL    0x00000010 /* Immutable file */
2162 #define LUSTRE_APPEND_FL       0x00000020 /* writes to file may only append */
2163 #define LUSTRE_NOATIME_FL      0x00000080 /* do not update atime */
2164 #define LUSTRE_DIRSYNC_FL      0x00010000 /* dirsync behaviour (dir only) */
2165 
2166 /* Convert wire LUSTRE_*_FL to corresponding client local VFS S_* values
2167  * for the client inode i_flags.  The LUSTRE_*_FL are the Lustre wire
2168  * protocol equivalents of LDISKFS_*_FL values stored on disk, while
2169  * the S_* flags are kernel-internal values that change between kernel
2170  * versions.  These flags are set/cleared via FSFILT_IOC_{GET,SET}_FLAGS.
2171  * See b=16526 for a full history. */
ll_ext_to_inode_flags(int flags)2172 static inline int ll_ext_to_inode_flags(int flags)
2173 {
2174 	return (((flags & LUSTRE_SYNC_FL)      ? S_SYNC      : 0) |
2175 		((flags & LUSTRE_NOATIME_FL)   ? S_NOATIME   : 0) |
2176 		((flags & LUSTRE_APPEND_FL)    ? S_APPEND    : 0) |
2177 #if defined(S_DIRSYNC)
2178 		((flags & LUSTRE_DIRSYNC_FL)   ? S_DIRSYNC   : 0) |
2179 #endif
2180 		((flags & LUSTRE_IMMUTABLE_FL) ? S_IMMUTABLE : 0));
2181 }
2182 
ll_inode_to_ext_flags(int iflags)2183 static inline int ll_inode_to_ext_flags(int iflags)
2184 {
2185 	return (((iflags & S_SYNC)      ? LUSTRE_SYNC_FL      : 0) |
2186 		((iflags & S_NOATIME)   ? LUSTRE_NOATIME_FL   : 0) |
2187 		((iflags & S_APPEND)    ? LUSTRE_APPEND_FL    : 0) |
2188 #if defined(S_DIRSYNC)
2189 		((iflags & S_DIRSYNC)   ? LUSTRE_DIRSYNC_FL   : 0) |
2190 #endif
2191 		((iflags & S_IMMUTABLE) ? LUSTRE_IMMUTABLE_FL : 0));
2192 }
2193 
2194 /* 64 possible states */
2195 enum md_transient_state {
2196 	MS_RESTORE	= (1 << 0),	/* restore is running */
2197 };
2198 
2199 struct mdt_body {
2200 	struct lu_fid  fid1;
2201 	struct lu_fid  fid2;
2202 	struct lustre_handle handle;
2203 	__u64	  valid;
2204 	__u64	  size;   /* Offset, in the case of MDS_READPAGE */
2205 	__s64	  mtime;
2206 	__s64	  atime;
2207 	__s64	  ctime;
2208 	__u64	  blocks; /* XID, in the case of MDS_READPAGE */
2209 	__u64	  ioepoch;
2210 	__u64	       t_state; /* transient file state defined in
2211 				 * enum md_transient_state
2212 				 * was "ino" until 2.4.0 */
2213 	__u32	  fsuid;
2214 	__u32	  fsgid;
2215 	__u32	  capability;
2216 	__u32	  mode;
2217 	__u32	  uid;
2218 	__u32	  gid;
2219 	__u32	  flags; /* from vfs for pin/unpin, LUSTRE_BFLAG close */
2220 	__u32	  rdev;
2221 	__u32	  nlink; /* #bytes to read in the case of MDS_READPAGE */
2222 	__u32	       unused2; /* was "generation" until 2.4.0 */
2223 	__u32	  suppgid;
2224 	__u32	  eadatasize;
2225 	__u32	  aclsize;
2226 	__u32	  max_mdsize;
2227 	__u32	  max_cookiesize;
2228 	__u32	  uid_h; /* high 32-bits of uid, for FUID */
2229 	__u32	  gid_h; /* high 32-bits of gid, for FUID */
2230 	__u32	  padding_5; /* also fix lustre_swab_mdt_body */
2231 	__u64	  padding_6;
2232 	__u64	  padding_7;
2233 	__u64	  padding_8;
2234 	__u64	  padding_9;
2235 	__u64	  padding_10;
2236 }; /* 216 */
2237 
2238 void lustre_swab_mdt_body(struct mdt_body *b);
2239 
2240 struct mdt_ioepoch {
2241 	struct lustre_handle handle;
2242 	__u64  ioepoch;
2243 	__u32  flags;
2244 	__u32  padding;
2245 };
2246 
2247 void lustre_swab_mdt_ioepoch(struct mdt_ioepoch *b);
2248 
2249 /* permissions for md_perm.mp_perm */
2250 enum {
2251 	CFS_SETUID_PERM = 0x01,
2252 	CFS_SETGID_PERM = 0x02,
2253 	CFS_SETGRP_PERM = 0x04,
2254 	CFS_RMTACL_PERM = 0x08,
2255 	CFS_RMTOWN_PERM = 0x10
2256 };
2257 
2258 /* inode access permission for remote user, the inode info are omitted,
2259  * for client knows them. */
2260 struct mdt_remote_perm {
2261 	__u32	   rp_uid;
2262 	__u32	   rp_gid;
2263 	__u32	   rp_fsuid;
2264 	__u32	   rp_fsuid_h;
2265 	__u32	   rp_fsgid;
2266 	__u32	   rp_fsgid_h;
2267 	__u32	   rp_access_perm; /* MAY_READ/WRITE/EXEC */
2268 	__u32	   rp_padding;
2269 };
2270 
2271 void lustre_swab_mdt_remote_perm(struct mdt_remote_perm *p);
2272 
2273 struct mdt_rec_setattr {
2274 	__u32	   sa_opcode;
2275 	__u32	   sa_cap;
2276 	__u32	   sa_fsuid;
2277 	__u32	   sa_fsuid_h;
2278 	__u32	   sa_fsgid;
2279 	__u32	   sa_fsgid_h;
2280 	__u32	   sa_suppgid;
2281 	__u32	   sa_suppgid_h;
2282 	__u32	   sa_padding_1;
2283 	__u32	   sa_padding_1_h;
2284 	struct lu_fid   sa_fid;
2285 	__u64	   sa_valid;
2286 	__u32	   sa_uid;
2287 	__u32	   sa_gid;
2288 	__u64	   sa_size;
2289 	__u64	   sa_blocks;
2290 	__s64	   sa_mtime;
2291 	__s64	   sa_atime;
2292 	__s64	   sa_ctime;
2293 	__u32	   sa_attr_flags;
2294 	__u32	   sa_mode;
2295 	__u32	   sa_bias;      /* some operation flags */
2296 	__u32	   sa_padding_3;
2297 	__u32	   sa_padding_4;
2298 	__u32	   sa_padding_5;
2299 };
2300 
2301 void lustre_swab_mdt_rec_setattr(struct mdt_rec_setattr *sa);
2302 
2303 /*
2304  * Attribute flags used in mdt_rec_setattr::sa_valid.
2305  * The kernel's #defines for ATTR_* should not be used over the network
2306  * since the client and MDS may run different kernels (see bug 13828)
2307  * Therefore, we should only use MDS_ATTR_* attributes for sa_valid.
2308  */
2309 #define MDS_ATTR_MODE	  0x1ULL /* = 1 */
2310 #define MDS_ATTR_UID	   0x2ULL /* = 2 */
2311 #define MDS_ATTR_GID	   0x4ULL /* = 4 */
2312 #define MDS_ATTR_SIZE	  0x8ULL /* = 8 */
2313 #define MDS_ATTR_ATIME	0x10ULL /* = 16 */
2314 #define MDS_ATTR_MTIME	0x20ULL /* = 32 */
2315 #define MDS_ATTR_CTIME	0x40ULL /* = 64 */
2316 #define MDS_ATTR_ATIME_SET    0x80ULL /* = 128 */
2317 #define MDS_ATTR_MTIME_SET   0x100ULL /* = 256 */
2318 #define MDS_ATTR_FORCE       0x200ULL /* = 512, Not a change, but a change it */
2319 #define MDS_ATTR_ATTR_FLAG   0x400ULL /* = 1024 */
2320 #define MDS_ATTR_KILL_SUID   0x800ULL /* = 2048 */
2321 #define MDS_ATTR_KILL_SGID  0x1000ULL /* = 4096 */
2322 #define MDS_ATTR_CTIME_SET  0x2000ULL /* = 8192 */
2323 #define MDS_ATTR_FROM_OPEN  0x4000ULL /* = 16384, called from open path, ie O_TRUNC */
2324 #define MDS_ATTR_BLOCKS     0x8000ULL /* = 32768 */
2325 
2326 #ifndef FMODE_READ
2327 #define FMODE_READ	       00000001
2328 #define FMODE_WRITE	      00000002
2329 #endif
2330 
2331 #define MDS_FMODE_CLOSED	 00000000
2332 #define MDS_FMODE_EXEC	   00000004
2333 /* IO Epoch is opened on a closed file. */
2334 #define MDS_FMODE_EPOCH	  01000000
2335 /* IO Epoch is opened on a file truncate. */
2336 #define MDS_FMODE_TRUNC	  02000000
2337 /* Size-on-MDS Attribute Update is pending. */
2338 #define MDS_FMODE_SOM	    04000000
2339 
2340 #define MDS_OPEN_CREATED	 00000010
2341 #define MDS_OPEN_CROSS	   00000020
2342 
2343 #define MDS_OPEN_CREAT	   00000100
2344 #define MDS_OPEN_EXCL	    00000200
2345 #define MDS_OPEN_TRUNC	   00001000
2346 #define MDS_OPEN_APPEND	  00002000
2347 #define MDS_OPEN_SYNC	    00010000
2348 #define MDS_OPEN_DIRECTORY       00200000
2349 
2350 #define MDS_OPEN_BY_FID		040000000 /* open_by_fid for known object */
2351 #define MDS_OPEN_DELAY_CREATE  0100000000 /* delay initial object create */
2352 #define MDS_OPEN_OWNEROVERRIDE 0200000000 /* NFSD rw-reopen ro file for owner */
2353 #define MDS_OPEN_JOIN_FILE     0400000000 /* open for join file.
2354 					   * We do not support JOIN FILE
2355 					   * anymore, reserve this flags
2356 					   * just for preventing such bit
2357 					   * to be reused. */
2358 
2359 #define MDS_OPEN_LOCK	 04000000000 /* This open requires open lock */
2360 #define MDS_OPEN_HAS_EA      010000000000 /* specify object create pattern */
2361 #define MDS_OPEN_HAS_OBJS    020000000000 /* Just set the EA the obj exist */
2362 #define MDS_OPEN_NORESTORE  0100000000000ULL /* Do not restore file at open */
2363 #define MDS_OPEN_NEWSTRIPE  0200000000000ULL /* New stripe needed (restripe or
2364 					      * hsm restore) */
2365 #define MDS_OPEN_VOLATILE   0400000000000ULL /* File is volatile = created
2366 						unlinked */
2367 #define MDS_OPEN_LEASE	   01000000000000ULL /* Open the file and grant lease
2368 					      * delegation, succeed if it's not
2369 					      * being opened with conflict mode.
2370 					      */
2371 #define MDS_OPEN_RELEASE   02000000000000ULL /* Open the file for HSM release */
2372 
2373 enum mds_op_bias {
2374 	MDS_CHECK_SPLIT		= 1 << 0,
2375 	MDS_CROSS_REF		= 1 << 1,
2376 	MDS_VTX_BYPASS		= 1 << 2,
2377 	MDS_PERM_BYPASS		= 1 << 3,
2378 	MDS_SOM			= 1 << 4,
2379 	MDS_QUOTA_IGNORE	= 1 << 5,
2380 	MDS_CLOSE_CLEANUP	= 1 << 6,
2381 	MDS_KEEP_ORPHAN		= 1 << 7,
2382 	MDS_RECOV_OPEN		= 1 << 8,
2383 	MDS_DATA_MODIFIED	= 1 << 9,
2384 	MDS_CREATE_VOLATILE	= 1 << 10,
2385 	MDS_OWNEROVERRIDE	= 1 << 11,
2386 	MDS_HSM_RELEASE		= 1 << 12,
2387 };
2388 
2389 /* instance of mdt_reint_rec */
2390 struct mdt_rec_create {
2391 	__u32	   cr_opcode;
2392 	__u32	   cr_cap;
2393 	__u32	   cr_fsuid;
2394 	__u32	   cr_fsuid_h;
2395 	__u32	   cr_fsgid;
2396 	__u32	   cr_fsgid_h;
2397 	__u32	   cr_suppgid1;
2398 	__u32	   cr_suppgid1_h;
2399 	__u32	   cr_suppgid2;
2400 	__u32	   cr_suppgid2_h;
2401 	struct lu_fid   cr_fid1;
2402 	struct lu_fid   cr_fid2;
2403 	struct lustre_handle cr_old_handle; /* handle in case of open replay */
2404 	__s64	   cr_time;
2405 	__u64	   cr_rdev;
2406 	__u64	   cr_ioepoch;
2407 	__u64	   cr_padding_1;   /* rr_blocks */
2408 	__u32	   cr_mode;
2409 	__u32	   cr_bias;
2410 	/* use of helpers set/get_mrc_cr_flags() is needed to access
2411 	 * 64 bits cr_flags [cr_flags_l, cr_flags_h], this is done to
2412 	 * extend cr_flags size without breaking 1.8 compat */
2413 	__u32	   cr_flags_l;     /* for use with open, low  32 bits  */
2414 	__u32	   cr_flags_h;     /* for use with open, high 32 bits */
2415 	__u32	   cr_umask;       /* umask for create */
2416 	__u32	   cr_padding_4;   /* rr_padding_4 */
2417 };
2418 
set_mrc_cr_flags(struct mdt_rec_create * mrc,__u64 flags)2419 static inline void set_mrc_cr_flags(struct mdt_rec_create *mrc, __u64 flags)
2420 {
2421 	mrc->cr_flags_l = (__u32)(flags & 0xFFFFFFFFUll);
2422 	mrc->cr_flags_h = (__u32)(flags >> 32);
2423 }
2424 
get_mrc_cr_flags(struct mdt_rec_create * mrc)2425 static inline __u64 get_mrc_cr_flags(struct mdt_rec_create *mrc)
2426 {
2427 	return ((__u64)(mrc->cr_flags_l) | ((__u64)mrc->cr_flags_h << 32));
2428 }
2429 
2430 /* instance of mdt_reint_rec */
2431 struct mdt_rec_link {
2432 	__u32	   lk_opcode;
2433 	__u32	   lk_cap;
2434 	__u32	   lk_fsuid;
2435 	__u32	   lk_fsuid_h;
2436 	__u32	   lk_fsgid;
2437 	__u32	   lk_fsgid_h;
2438 	__u32	   lk_suppgid1;
2439 	__u32	   lk_suppgid1_h;
2440 	__u32	   lk_suppgid2;
2441 	__u32	   lk_suppgid2_h;
2442 	struct lu_fid   lk_fid1;
2443 	struct lu_fid   lk_fid2;
2444 	__s64	   lk_time;
2445 	__u64	   lk_padding_1;   /* rr_atime */
2446 	__u64	   lk_padding_2;   /* rr_ctime */
2447 	__u64	   lk_padding_3;   /* rr_size */
2448 	__u64	   lk_padding_4;   /* rr_blocks */
2449 	__u32	   lk_bias;
2450 	__u32	   lk_padding_5;   /* rr_mode */
2451 	__u32	   lk_padding_6;   /* rr_flags */
2452 	__u32	   lk_padding_7;   /* rr_padding_2 */
2453 	__u32	   lk_padding_8;   /* rr_padding_3 */
2454 	__u32	   lk_padding_9;   /* rr_padding_4 */
2455 };
2456 
2457 /* instance of mdt_reint_rec */
2458 struct mdt_rec_unlink {
2459 	__u32	   ul_opcode;
2460 	__u32	   ul_cap;
2461 	__u32	   ul_fsuid;
2462 	__u32	   ul_fsuid_h;
2463 	__u32	   ul_fsgid;
2464 	__u32	   ul_fsgid_h;
2465 	__u32	   ul_suppgid1;
2466 	__u32	   ul_suppgid1_h;
2467 	__u32	   ul_suppgid2;
2468 	__u32	   ul_suppgid2_h;
2469 	struct lu_fid   ul_fid1;
2470 	struct lu_fid   ul_fid2;
2471 	__s64	   ul_time;
2472 	__u64	   ul_padding_2;   /* rr_atime */
2473 	__u64	   ul_padding_3;   /* rr_ctime */
2474 	__u64	   ul_padding_4;   /* rr_size */
2475 	__u64	   ul_padding_5;   /* rr_blocks */
2476 	__u32	   ul_bias;
2477 	__u32	   ul_mode;
2478 	__u32	   ul_padding_6;   /* rr_flags */
2479 	__u32	   ul_padding_7;   /* rr_padding_2 */
2480 	__u32	   ul_padding_8;   /* rr_padding_3 */
2481 	__u32	   ul_padding_9;   /* rr_padding_4 */
2482 };
2483 
2484 /* instance of mdt_reint_rec */
2485 struct mdt_rec_rename {
2486 	__u32	   rn_opcode;
2487 	__u32	   rn_cap;
2488 	__u32	   rn_fsuid;
2489 	__u32	   rn_fsuid_h;
2490 	__u32	   rn_fsgid;
2491 	__u32	   rn_fsgid_h;
2492 	__u32	   rn_suppgid1;
2493 	__u32	   rn_suppgid1_h;
2494 	__u32	   rn_suppgid2;
2495 	__u32	   rn_suppgid2_h;
2496 	struct lu_fid   rn_fid1;
2497 	struct lu_fid   rn_fid2;
2498 	__s64	   rn_time;
2499 	__u64	   rn_padding_1;   /* rr_atime */
2500 	__u64	   rn_padding_2;   /* rr_ctime */
2501 	__u64	   rn_padding_3;   /* rr_size */
2502 	__u64	   rn_padding_4;   /* rr_blocks */
2503 	__u32	   rn_bias;	/* some operation flags */
2504 	__u32	   rn_mode;	/* cross-ref rename has mode */
2505 	__u32	   rn_padding_5;   /* rr_flags */
2506 	__u32	   rn_padding_6;   /* rr_padding_2 */
2507 	__u32	   rn_padding_7;   /* rr_padding_3 */
2508 	__u32	   rn_padding_8;   /* rr_padding_4 */
2509 };
2510 
2511 /* instance of mdt_reint_rec */
2512 struct mdt_rec_setxattr {
2513 	__u32	   sx_opcode;
2514 	__u32	   sx_cap;
2515 	__u32	   sx_fsuid;
2516 	__u32	   sx_fsuid_h;
2517 	__u32	   sx_fsgid;
2518 	__u32	   sx_fsgid_h;
2519 	__u32	   sx_suppgid1;
2520 	__u32	   sx_suppgid1_h;
2521 	__u32	   sx_suppgid2;
2522 	__u32	   sx_suppgid2_h;
2523 	struct lu_fid   sx_fid;
2524 	__u64	   sx_padding_1;   /* These three are rr_fid2 */
2525 	__u32	   sx_padding_2;
2526 	__u32	   sx_padding_3;
2527 	__u64	   sx_valid;
2528 	__s64	   sx_time;
2529 	__u64	   sx_padding_5;   /* rr_ctime */
2530 	__u64	   sx_padding_6;   /* rr_size */
2531 	__u64	   sx_padding_7;   /* rr_blocks */
2532 	__u32	   sx_size;
2533 	__u32	   sx_flags;
2534 	__u32	   sx_padding_8;   /* rr_flags */
2535 	__u32	   sx_padding_9;   /* rr_padding_2 */
2536 	__u32	   sx_padding_10;  /* rr_padding_3 */
2537 	__u32	   sx_padding_11;  /* rr_padding_4 */
2538 };
2539 
2540 /*
2541  * mdt_rec_reint is the template for all mdt_reint_xxx structures.
2542  * Do NOT change the size of various members, otherwise the value
2543  * will be broken in lustre_swab_mdt_rec_reint().
2544  *
2545  * If you add new members in other mdt_reint_xxx structures and need to use the
2546  * rr_padding_x fields, then update lustre_swab_mdt_rec_reint() also.
2547  */
2548 struct mdt_rec_reint {
2549 	__u32	   rr_opcode;
2550 	__u32	   rr_cap;
2551 	__u32	   rr_fsuid;
2552 	__u32	   rr_fsuid_h;
2553 	__u32	   rr_fsgid;
2554 	__u32	   rr_fsgid_h;
2555 	__u32	   rr_suppgid1;
2556 	__u32	   rr_suppgid1_h;
2557 	__u32	   rr_suppgid2;
2558 	__u32	   rr_suppgid2_h;
2559 	struct lu_fid   rr_fid1;
2560 	struct lu_fid   rr_fid2;
2561 	__s64	   rr_mtime;
2562 	__s64	   rr_atime;
2563 	__s64	   rr_ctime;
2564 	__u64	   rr_size;
2565 	__u64	   rr_blocks;
2566 	__u32	   rr_bias;
2567 	__u32	   rr_mode;
2568 	__u32	   rr_flags;
2569 	__u32	   rr_flags_h;
2570 	__u32	   rr_umask;
2571 	__u32	   rr_padding_4; /* also fix lustre_swab_mdt_rec_reint */
2572 };
2573 
2574 void lustre_swab_mdt_rec_reint(struct mdt_rec_reint *rr);
2575 
2576 struct lmv_desc {
2577 	__u32 ld_tgt_count;		/* how many MDS's */
2578 	__u32 ld_active_tgt_count;	 /* how many active */
2579 	__u32 ld_default_stripe_count;     /* how many objects are used */
2580 	__u32 ld_pattern;		  /* default MEA_MAGIC_* */
2581 	__u64 ld_default_hash_size;
2582 	__u64 ld_padding_1;		/* also fix lustre_swab_lmv_desc */
2583 	__u32 ld_padding_2;		/* also fix lustre_swab_lmv_desc */
2584 	__u32 ld_qos_maxage;	       /* in second */
2585 	__u32 ld_padding_3;		/* also fix lustre_swab_lmv_desc */
2586 	__u32 ld_padding_4;		/* also fix lustre_swab_lmv_desc */
2587 	struct obd_uuid ld_uuid;
2588 };
2589 
2590 /* TODO: lmv_stripe_md should contain mds capabilities for all slave fids */
2591 struct lmv_stripe_md {
2592 	__u32	 mea_magic;
2593 	__u32	 mea_count;
2594 	__u32	 mea_master;
2595 	__u32	 mea_padding;
2596 	char	  mea_pool_name[LOV_MAXPOOLNAME];
2597 	struct lu_fid mea_ids[0];
2598 };
2599 
2600 /* lmv structures */
2601 #define MEA_MAGIC_LAST_CHAR      0xb2221ca1
2602 #define MEA_MAGIC_ALL_CHARS      0xb222a11c
2603 #define MEA_MAGIC_HASH_SEGMENT   0xb222a11b
2604 
2605 #define MAX_HASH_SIZE_32	 0x7fffffffUL
2606 #define MAX_HASH_SIZE	    0x7fffffffffffffffULL
2607 #define MAX_HASH_HIGHEST_BIT     0x1000000000000000ULL
2608 
2609 enum fld_rpc_opc {
2610 	FLD_QUERY		       = 900,
2611 	FLD_LAST_OPC,
2612 	FLD_FIRST_OPC		   = FLD_QUERY
2613 };
2614 
2615 enum seq_rpc_opc {
2616 	SEQ_QUERY		       = 700,
2617 	SEQ_LAST_OPC,
2618 	SEQ_FIRST_OPC		   = SEQ_QUERY
2619 };
2620 
2621 enum seq_op {
2622 	SEQ_ALLOC_SUPER = 0,
2623 	SEQ_ALLOC_META = 1
2624 };
2625 
2626 /*
2627  *  LOV data structures
2628  */
2629 
2630 #define LOV_MAX_UUID_BUFFER_SIZE  8192
2631 /* The size of the buffer the lov/mdc reserves for the
2632  * array of UUIDs returned by the MDS.  With the current
2633  * protocol, this will limit the max number of OSTs per LOV */
2634 
2635 #define LOV_DESC_MAGIC 0xB0CCDE5C
2636 #define LOV_DESC_QOS_MAXAGE_DEFAULT 5  /* Seconds */
2637 #define LOV_DESC_STRIPE_SIZE_DEFAULT (1 << LNET_MTU_BITS)
2638 
2639 /* LOV settings descriptor (should only contain static info) */
2640 struct lov_desc {
2641 	__u32 ld_tgt_count;		/* how many OBD's */
2642 	__u32 ld_active_tgt_count;	 /* how many active */
2643 	__u32 ld_default_stripe_count;     /* how many objects are used */
2644 	__u32 ld_pattern;		  /* default PATTERN_RAID0 */
2645 	__u64 ld_default_stripe_size;      /* in bytes */
2646 	__u64 ld_default_stripe_offset;    /* in bytes */
2647 	__u32 ld_padding_0;		/* unused */
2648 	__u32 ld_qos_maxage;	       /* in second */
2649 	__u32 ld_padding_1;		/* also fix lustre_swab_lov_desc */
2650 	__u32 ld_padding_2;		/* also fix lustre_swab_lov_desc */
2651 	struct obd_uuid ld_uuid;
2652 };
2653 
2654 #define ld_magic ld_active_tgt_count       /* for swabbing from llogs */
2655 
2656 void lustre_swab_lov_desc(struct lov_desc *ld);
2657 
2658 /*
2659  *   LDLM requests:
2660  */
2661 /* opcodes -- MUST be distinct from OST/MDS opcodes */
2662 typedef enum {
2663 	LDLM_ENQUEUE     = 101,
2664 	LDLM_CONVERT     = 102,
2665 	LDLM_CANCEL      = 103,
2666 	LDLM_BL_CALLBACK = 104,
2667 	LDLM_CP_CALLBACK = 105,
2668 	LDLM_GL_CALLBACK = 106,
2669 	LDLM_SET_INFO    = 107,
2670 	LDLM_LAST_OPC
2671 } ldlm_cmd_t;
2672 #define LDLM_FIRST_OPC LDLM_ENQUEUE
2673 
2674 #define RES_NAME_SIZE 4
2675 struct ldlm_res_id {
2676 	__u64 name[RES_NAME_SIZE];
2677 };
2678 
2679 #define DLDLMRES	"[%#llx:%#llx:%#llx].%llx"
2680 #define PLDLMRES(res)	(res)->lr_name.name[0], (res)->lr_name.name[1], \
2681 			(res)->lr_name.name[2], (res)->lr_name.name[3]
2682 
ldlm_res_eq(const struct ldlm_res_id * res0,const struct ldlm_res_id * res1)2683 static inline int ldlm_res_eq(const struct ldlm_res_id *res0,
2684 			      const struct ldlm_res_id *res1)
2685 {
2686 	return !memcmp(res0, res1, sizeof(*res0));
2687 }
2688 
2689 /* lock types */
2690 typedef enum {
2691 	LCK_MINMODE = 0,
2692 	LCK_EX      = 1,
2693 	LCK_PW      = 2,
2694 	LCK_PR      = 4,
2695 	LCK_CW      = 8,
2696 	LCK_CR      = 16,
2697 	LCK_NL      = 32,
2698 	LCK_GROUP   = 64,
2699 	LCK_COS     = 128,
2700 	LCK_MAXMODE
2701 } ldlm_mode_t;
2702 
2703 #define LCK_MODE_NUM    8
2704 
2705 typedef enum {
2706 	LDLM_PLAIN     = 10,
2707 	LDLM_EXTENT    = 11,
2708 	LDLM_FLOCK     = 12,
2709 	LDLM_IBITS     = 13,
2710 	LDLM_MAX_TYPE
2711 } ldlm_type_t;
2712 
2713 #define LDLM_MIN_TYPE LDLM_PLAIN
2714 
2715 struct ldlm_extent {
2716 	__u64 start;
2717 	__u64 end;
2718 	__u64 gid;
2719 };
2720 
ldlm_extent_overlap(struct ldlm_extent * ex1,struct ldlm_extent * ex2)2721 static inline int ldlm_extent_overlap(struct ldlm_extent *ex1,
2722 				      struct ldlm_extent *ex2)
2723 {
2724 	return (ex1->start <= ex2->end) && (ex2->start <= ex1->end);
2725 }
2726 
2727 /* check if @ex1 contains @ex2 */
ldlm_extent_contain(struct ldlm_extent * ex1,struct ldlm_extent * ex2)2728 static inline int ldlm_extent_contain(struct ldlm_extent *ex1,
2729 				      struct ldlm_extent *ex2)
2730 {
2731 	return (ex1->start <= ex2->start) && (ex1->end >= ex2->end);
2732 }
2733 
2734 struct ldlm_inodebits {
2735 	__u64 bits;
2736 };
2737 
2738 struct ldlm_flock_wire {
2739 	__u64 lfw_start;
2740 	__u64 lfw_end;
2741 	__u64 lfw_owner;
2742 	__u32 lfw_padding;
2743 	__u32 lfw_pid;
2744 };
2745 
2746 /* it's important that the fields of the ldlm_extent structure match
2747  * the first fields of the ldlm_flock structure because there is only
2748  * one ldlm_swab routine to process the ldlm_policy_data_t union. if
2749  * this ever changes we will need to swab the union differently based
2750  * on the resource type. */
2751 
2752 typedef union {
2753 	struct ldlm_extent l_extent;
2754 	struct ldlm_flock_wire l_flock;
2755 	struct ldlm_inodebits l_inodebits;
2756 } ldlm_wire_policy_data_t;
2757 
2758 union ldlm_gl_desc {
2759 	struct ldlm_gl_lquota_desc	lquota_desc;
2760 };
2761 
2762 void lustre_swab_gl_desc(union ldlm_gl_desc *);
2763 
2764 struct ldlm_intent {
2765 	__u64 opc;
2766 };
2767 
2768 void lustre_swab_ldlm_intent(struct ldlm_intent *i);
2769 
2770 struct ldlm_resource_desc {
2771 	ldlm_type_t lr_type;
2772 	__u32 lr_padding;       /* also fix lustre_swab_ldlm_resource_desc */
2773 	struct ldlm_res_id lr_name;
2774 };
2775 
2776 struct ldlm_lock_desc {
2777 	struct ldlm_resource_desc l_resource;
2778 	ldlm_mode_t l_req_mode;
2779 	ldlm_mode_t l_granted_mode;
2780 	ldlm_wire_policy_data_t l_policy_data;
2781 };
2782 
2783 #define LDLM_LOCKREQ_HANDLES 2
2784 #define LDLM_ENQUEUE_CANCEL_OFF 1
2785 
2786 struct ldlm_request {
2787 	__u32 lock_flags;
2788 	__u32 lock_count;
2789 	struct ldlm_lock_desc lock_desc;
2790 	struct lustre_handle lock_handle[LDLM_LOCKREQ_HANDLES];
2791 };
2792 
2793 void lustre_swab_ldlm_request(struct ldlm_request *rq);
2794 
2795 /* If LDLM_ENQUEUE, 1 slot is already occupied, 1 is available.
2796  * Otherwise, 2 are available. */
2797 #define ldlm_request_bufsize(count, type)				\
2798 ({								      \
2799 	int _avail = LDLM_LOCKREQ_HANDLES;			      \
2800 	_avail -= (type == LDLM_ENQUEUE ? LDLM_ENQUEUE_CANCEL_OFF : 0); \
2801 	sizeof(struct ldlm_request) +				   \
2802 	(count > _avail ? count - _avail : 0) *			 \
2803 	sizeof(struct lustre_handle);				   \
2804 })
2805 
2806 struct ldlm_reply {
2807 	__u32 lock_flags;
2808 	__u32 lock_padding;     /* also fix lustre_swab_ldlm_reply */
2809 	struct ldlm_lock_desc lock_desc;
2810 	struct lustre_handle lock_handle;
2811 	__u64  lock_policy_res1;
2812 	__u64  lock_policy_res2;
2813 };
2814 
2815 void lustre_swab_ldlm_reply(struct ldlm_reply *r);
2816 
2817 #define ldlm_flags_to_wire(flags)    ((__u32)(flags))
2818 #define ldlm_flags_from_wire(flags)  ((__u64)(flags))
2819 
2820 /*
2821  * Opcodes for mountconf (mgs and mgc)
2822  */
2823 typedef enum {
2824 	MGS_CONNECT = 250,
2825 	MGS_DISCONNECT,
2826 	MGS_EXCEPTION,	 /* node died, etc. */
2827 	MGS_TARGET_REG,	/* whenever target starts up */
2828 	MGS_TARGET_DEL,
2829 	MGS_SET_INFO,
2830 	MGS_CONFIG_READ,
2831 	MGS_LAST_OPC
2832 } mgs_cmd_t;
2833 #define MGS_FIRST_OPC MGS_CONNECT
2834 
2835 #define MGS_PARAM_MAXLEN 1024
2836 #define KEY_SET_INFO "set_info"
2837 
2838 struct mgs_send_param {
2839 	char	     mgs_param[MGS_PARAM_MAXLEN];
2840 };
2841 
2842 /* We pass this info to the MGS so it can write config logs */
2843 #define MTI_NAME_MAXLEN  64
2844 #define MTI_PARAM_MAXLEN 4096
2845 #define MTI_NIDS_MAX     32
2846 struct mgs_target_info {
2847 	__u32	    mti_lustre_ver;
2848 	__u32	    mti_stripe_index;
2849 	__u32	    mti_config_ver;
2850 	__u32	    mti_flags;
2851 	__u32	    mti_nid_count;
2852 	__u32	    mti_instance; /* Running instance of target */
2853 	char	     mti_fsname[MTI_NAME_MAXLEN];
2854 	char	     mti_svname[MTI_NAME_MAXLEN];
2855 	char	     mti_uuid[sizeof(struct obd_uuid)];
2856 	__u64	    mti_nids[MTI_NIDS_MAX];     /* host nids (lnet_nid_t)*/
2857 	char	     mti_params[MTI_PARAM_MAXLEN];
2858 };
2859 
2860 void lustre_swab_mgs_target_info(struct mgs_target_info *oinfo);
2861 
2862 struct mgs_nidtbl_entry {
2863 	__u64	   mne_version;    /* table version of this entry */
2864 	__u32	   mne_instance;   /* target instance # */
2865 	__u32	   mne_index;      /* target index */
2866 	__u32	   mne_length;     /* length of this entry - by bytes */
2867 	__u8	    mne_type;       /* target type LDD_F_SV_TYPE_OST/MDT */
2868 	__u8	    mne_nid_type;   /* type of nid(mbz). for ipv6. */
2869 	__u8	    mne_nid_size;   /* size of each NID, by bytes */
2870 	__u8	    mne_nid_count;  /* # of NIDs in buffer */
2871 	union {
2872 		lnet_nid_t nids[0];     /* variable size buffer for NIDs. */
2873 	} u;
2874 };
2875 
2876 void lustre_swab_mgs_nidtbl_entry(struct mgs_nidtbl_entry *oinfo);
2877 
2878 struct mgs_config_body {
2879 	char     mcb_name[MTI_NAME_MAXLEN]; /* logname */
2880 	__u64    mcb_offset;    /* next index of config log to request */
2881 	__u16    mcb_type;      /* type of log: CONFIG_T_[CONFIG|RECOVER] */
2882 	__u8     mcb_reserved;
2883 	__u8     mcb_bits;      /* bits unit size of config log */
2884 	__u32    mcb_units;     /* # of units for bulk transfer */
2885 };
2886 
2887 void lustre_swab_mgs_config_body(struct mgs_config_body *body);
2888 
2889 struct mgs_config_res {
2890 	__u64    mcr_offset;    /* index of last config log */
2891 	__u64    mcr_size;      /* size of the log */
2892 };
2893 
2894 void lustre_swab_mgs_config_res(struct mgs_config_res *body);
2895 
2896 /* Config marker flags (in config log) */
2897 #define CM_START       0x01
2898 #define CM_END	 0x02
2899 #define CM_SKIP	0x04
2900 #define CM_UPGRADE146  0x08
2901 #define CM_EXCLUDE     0x10
2902 #define CM_START_SKIP (CM_START | CM_SKIP)
2903 
2904 struct cfg_marker {
2905 	__u32	     cm_step;       /* aka config version */
2906 	__u32	     cm_flags;
2907 	__u32	     cm_vers;       /* lustre release version number */
2908 	__u32	     cm_padding;    /* 64 bit align */
2909 	__s64	     cm_createtime; /*when this record was first created */
2910 	__s64	     cm_canceltime; /*when this record is no longer valid*/
2911 	char	      cm_tgtname[MTI_NAME_MAXLEN];
2912 	char	      cm_comment[MTI_NAME_MAXLEN];
2913 };
2914 
2915 void lustre_swab_cfg_marker(struct cfg_marker *marker, int swab, int size);
2916 
2917 /*
2918  * Opcodes for multiple servers.
2919  */
2920 
2921 typedef enum {
2922 	OBD_PING = 400,
2923 	OBD_LOG_CANCEL,
2924 	OBD_QC_CALLBACK,
2925 	OBD_IDX_READ,
2926 	OBD_LAST_OPC
2927 } obd_cmd_t;
2928 #define OBD_FIRST_OPC OBD_PING
2929 
2930 /* catalog of log objects */
2931 
2932 /** Identifier for a single log object */
2933 struct llog_logid {
2934 	struct ost_id		lgl_oi;
2935 	__u32		   lgl_ogen;
2936 } __attribute__((packed));
2937 
2938 /** Records written to the CATALOGS list */
2939 #define CATLIST "CATALOGS"
2940 struct llog_catid {
2941 	struct llog_logid       lci_logid;
2942 	__u32		   lci_padding1;
2943 	__u32		   lci_padding2;
2944 	__u32		   lci_padding3;
2945 } __attribute__((packed));
2946 
2947 /* Log data record types - there is no specific reason that these need to
2948  * be related to the RPC opcodes, but no reason not to (may be handy later?)
2949  */
2950 #define LLOG_OP_MAGIC 0x10600000
2951 #define LLOG_OP_MASK  0xfff00000
2952 
2953 typedef enum {
2954 	LLOG_PAD_MAGIC		= LLOG_OP_MAGIC | 0x00000,
2955 	OST_SZ_REC		= LLOG_OP_MAGIC | 0x00f00,
2956 	/* OST_RAID1_REC	= LLOG_OP_MAGIC | 0x01000, never used */
2957 	MDS_UNLINK_REC		= LLOG_OP_MAGIC | 0x10000 | (MDS_REINT << 8) |
2958 				  REINT_UNLINK, /* obsolete after 2.5.0 */
2959 	MDS_UNLINK64_REC	= LLOG_OP_MAGIC | 0x90000 | (MDS_REINT << 8) |
2960 				  REINT_UNLINK,
2961 	/* MDS_SETATTR_REC	= LLOG_OP_MAGIC | 0x12401, obsolete 1.8.0 */
2962 	MDS_SETATTR64_REC	= LLOG_OP_MAGIC | 0x90000 | (MDS_REINT << 8) |
2963 				  REINT_SETATTR,
2964 	OBD_CFG_REC		= LLOG_OP_MAGIC | 0x20000,
2965 	/* PTL_CFG_REC		= LLOG_OP_MAGIC | 0x30000, obsolete 1.4.0 */
2966 	LLOG_GEN_REC		= LLOG_OP_MAGIC | 0x40000,
2967 	/* LLOG_JOIN_REC	= LLOG_OP_MAGIC | 0x50000, obsolete  1.8.0 */
2968 	CHANGELOG_REC		= LLOG_OP_MAGIC | 0x60000,
2969 	CHANGELOG_USER_REC	= LLOG_OP_MAGIC | 0x70000,
2970 	HSM_AGENT_REC		= LLOG_OP_MAGIC | 0x80000,
2971 	LLOG_HDR_MAGIC		= LLOG_OP_MAGIC | 0x45539,
2972 	LLOG_LOGID_MAGIC	= LLOG_OP_MAGIC | 0x4553b,
2973 } llog_op_type;
2974 
2975 #define LLOG_REC_HDR_NEEDS_SWABBING(r) \
2976 	(((r)->lrh_type & __swab32(LLOG_OP_MASK)) == __swab32(LLOG_OP_MAGIC))
2977 
2978 /** Log record header - stored in little endian order.
2979  * Each record must start with this struct, end with a llog_rec_tail,
2980  * and be a multiple of 256 bits in size.
2981  */
2982 struct llog_rec_hdr {
2983 	__u32	lrh_len;
2984 	__u32	lrh_index;
2985 	__u32	lrh_type;
2986 	__u32	lrh_id;
2987 };
2988 
2989 struct llog_rec_tail {
2990 	__u32	lrt_len;
2991 	__u32	lrt_index;
2992 };
2993 
2994 /* Where data follow just after header */
2995 #define REC_DATA(ptr)						\
2996 	((void *)((char *)ptr + sizeof(struct llog_rec_hdr)))
2997 
2998 #define REC_DATA_LEN(rec)					\
2999 	(rec->lrh_len - sizeof(struct llog_rec_hdr) -		\
3000 	 sizeof(struct llog_rec_tail))
3001 
3002 struct llog_logid_rec {
3003 	struct llog_rec_hdr	lid_hdr;
3004 	struct llog_logid	lid_id;
3005 	__u32			lid_padding1;
3006 	__u64			lid_padding2;
3007 	__u64			lid_padding3;
3008 	struct llog_rec_tail	lid_tail;
3009 } __attribute__((packed));
3010 
3011 struct llog_unlink_rec {
3012 	struct llog_rec_hdr	lur_hdr;
3013 	__u64			lur_oid;
3014 	__u32			lur_oseq;
3015 	__u32			lur_count;
3016 	struct llog_rec_tail	lur_tail;
3017 } __attribute__((packed));
3018 
3019 struct llog_unlink64_rec {
3020 	struct llog_rec_hdr	lur_hdr;
3021 	struct lu_fid		lur_fid;
3022 	__u32			lur_count; /* to destroy the lost precreated */
3023 	__u32			lur_padding1;
3024 	__u64			lur_padding2;
3025 	__u64			lur_padding3;
3026 	struct llog_rec_tail    lur_tail;
3027 } __attribute__((packed));
3028 
3029 struct llog_setattr64_rec {
3030 	struct llog_rec_hdr	lsr_hdr;
3031 	struct ost_id		lsr_oi;
3032 	__u32			lsr_uid;
3033 	__u32			lsr_uid_h;
3034 	__u32			lsr_gid;
3035 	__u32			lsr_gid_h;
3036 	__u64			lsr_padding;
3037 	struct llog_rec_tail    lsr_tail;
3038 } __attribute__((packed));
3039 
3040 struct llog_size_change_rec {
3041 	struct llog_rec_hdr	lsc_hdr;
3042 	struct ll_fid		lsc_fid;
3043 	__u32			lsc_ioepoch;
3044 	__u32			lsc_padding1;
3045 	__u64			lsc_padding2;
3046 	__u64			lsc_padding3;
3047 	struct llog_rec_tail	lsc_tail;
3048 } __attribute__((packed));
3049 
3050 #define CHANGELOG_MAGIC 0xca103000
3051 
3052 /** \a changelog_rec_type's that can't be masked */
3053 #define CHANGELOG_MINMASK (1 << CL_MARK)
3054 /** bits covering all \a changelog_rec_type's */
3055 #define CHANGELOG_ALLMASK 0XFFFFFFFF
3056 /** default \a changelog_rec_type mask */
3057 #define CHANGELOG_DEFMASK CHANGELOG_ALLMASK & ~(1 << CL_ATIME | 1 << CL_CLOSE)
3058 
3059 /* changelog llog name, needed by client replicators */
3060 #define CHANGELOG_CATALOG "changelog_catalog"
3061 
3062 struct changelog_setinfo {
3063 	__u64 cs_recno;
3064 	__u32 cs_id;
3065 } __attribute__((packed));
3066 
3067 /** changelog record */
3068 struct llog_changelog_rec {
3069 	struct llog_rec_hdr  cr_hdr;
3070 	struct changelog_rec cr;
3071 	struct llog_rec_tail cr_tail; /**< for_sizezof_only */
3072 } __attribute__((packed));
3073 
3074 struct llog_changelog_ext_rec {
3075 	struct llog_rec_hdr      cr_hdr;
3076 	struct changelog_ext_rec cr;
3077 	struct llog_rec_tail     cr_tail; /**< for_sizezof_only */
3078 } __attribute__((packed));
3079 
3080 #define CHANGELOG_USER_PREFIX "cl"
3081 
3082 struct llog_changelog_user_rec {
3083 	struct llog_rec_hdr   cur_hdr;
3084 	__u32		 cur_id;
3085 	__u32		 cur_padding;
3086 	__u64		 cur_endrec;
3087 	struct llog_rec_tail  cur_tail;
3088 } __attribute__((packed));
3089 
3090 enum agent_req_status {
3091 	ARS_WAITING,
3092 	ARS_STARTED,
3093 	ARS_FAILED,
3094 	ARS_CANCELED,
3095 	ARS_SUCCEED,
3096 };
3097 
agent_req_status2name(enum agent_req_status ars)3098 static inline char *agent_req_status2name(enum agent_req_status ars)
3099 {
3100 	switch (ars) {
3101 	case ARS_WAITING:
3102 		return "WAITING";
3103 	case ARS_STARTED:
3104 		return "STARTED";
3105 	case ARS_FAILED:
3106 		return "FAILED";
3107 	case ARS_CANCELED:
3108 		return "CANCELED";
3109 	case ARS_SUCCEED:
3110 		return "SUCCEED";
3111 	default:
3112 		return "UNKNOWN";
3113 	}
3114 }
3115 
agent_req_in_final_state(enum agent_req_status ars)3116 static inline bool agent_req_in_final_state(enum agent_req_status ars)
3117 {
3118 	return ((ars == ARS_SUCCEED) || (ars == ARS_FAILED) ||
3119 		(ars == ARS_CANCELED));
3120 }
3121 
3122 struct llog_agent_req_rec {
3123 	struct llog_rec_hdr	arr_hdr;	/**< record header */
3124 	__u32			arr_status;	/**< status of the request */
3125 						/* must match enum
3126 						 * agent_req_status */
3127 	__u32			arr_archive_id;	/**< backend archive number */
3128 	__u64			arr_flags;	/**< req flags */
3129 	__u64			arr_compound_id;	/**< compound cookie */
3130 	__u64			arr_req_create;	/**< req. creation time */
3131 	__u64			arr_req_change;	/**< req. status change time */
3132 	struct hsm_action_item	arr_hai;	/**< req. to the agent */
3133 	struct llog_rec_tail	arr_tail; /**< record tail for_sizezof_only */
3134 } __attribute__((packed));
3135 
3136 /* Old llog gen for compatibility */
3137 struct llog_gen {
3138 	__u64 mnt_cnt;
3139 	__u64 conn_cnt;
3140 } __attribute__((packed));
3141 
3142 struct llog_gen_rec {
3143 	struct llog_rec_hdr	lgr_hdr;
3144 	struct llog_gen		lgr_gen;
3145 	__u64			padding1;
3146 	__u64			padding2;
3147 	__u64			padding3;
3148 	struct llog_rec_tail	lgr_tail;
3149 };
3150 
3151 /* On-disk header structure of each log object, stored in little endian order */
3152 #define LLOG_CHUNK_SIZE	 8192
3153 #define LLOG_HEADER_SIZE	(96)
3154 #define LLOG_BITMAP_BYTES       (LLOG_CHUNK_SIZE - LLOG_HEADER_SIZE)
3155 
3156 #define LLOG_MIN_REC_SIZE       (24) /* round(llog_rec_hdr + llog_rec_tail) */
3157 
3158 /* flags for the logs */
3159 enum llog_flag {
3160 	LLOG_F_ZAP_WHEN_EMPTY	= 0x1,
3161 	LLOG_F_IS_CAT		= 0x2,
3162 	LLOG_F_IS_PLAIN		= 0x4,
3163 };
3164 
3165 struct llog_log_hdr {
3166 	struct llog_rec_hdr     llh_hdr;
3167 	__s64		   llh_timestamp;
3168 	__u32		   llh_count;
3169 	__u32		   llh_bitmap_offset;
3170 	__u32		   llh_size;
3171 	__u32		   llh_flags;
3172 	__u32		   llh_cat_idx;
3173 	/* for a catalog the first plain slot is next to it */
3174 	struct obd_uuid	 llh_tgtuuid;
3175 	__u32		   llh_reserved[LLOG_HEADER_SIZE/sizeof(__u32) - 23];
3176 	__u32		   llh_bitmap[LLOG_BITMAP_BYTES/sizeof(__u32)];
3177 	struct llog_rec_tail    llh_tail;
3178 } __attribute__((packed));
3179 
3180 #define LLOG_BITMAP_SIZE(llh)  (__u32)((llh->llh_hdr.lrh_len -		\
3181 					llh->llh_bitmap_offset -	\
3182 					sizeof(llh->llh_tail)) * 8)
3183 
3184 /** log cookies are used to reference a specific log file and a record therein */
3185 struct llog_cookie {
3186 	struct llog_logid       lgc_lgl;
3187 	__u32		   lgc_subsys;
3188 	__u32		   lgc_index;
3189 	__u32		   lgc_padding;
3190 } __attribute__((packed));
3191 
3192 /** llog protocol */
3193 enum llogd_rpc_ops {
3194 	LLOG_ORIGIN_HANDLE_CREATE       = 501,
3195 	LLOG_ORIGIN_HANDLE_NEXT_BLOCK   = 502,
3196 	LLOG_ORIGIN_HANDLE_READ_HEADER  = 503,
3197 	LLOG_ORIGIN_HANDLE_WRITE_REC    = 504,
3198 	LLOG_ORIGIN_HANDLE_CLOSE	= 505,
3199 	LLOG_ORIGIN_CONNECT	     = 506,
3200 	LLOG_CATINFO			= 507,  /* deprecated */
3201 	LLOG_ORIGIN_HANDLE_PREV_BLOCK   = 508,
3202 	LLOG_ORIGIN_HANDLE_DESTROY      = 509,  /* for destroy llog object*/
3203 	LLOG_LAST_OPC,
3204 	LLOG_FIRST_OPC		  = LLOG_ORIGIN_HANDLE_CREATE
3205 };
3206 
3207 struct llogd_body {
3208 	struct llog_logid  lgd_logid;
3209 	__u32 lgd_ctxt_idx;
3210 	__u32 lgd_llh_flags;
3211 	__u32 lgd_index;
3212 	__u32 lgd_saved_index;
3213 	__u32 lgd_len;
3214 	__u64 lgd_cur_offset;
3215 } __attribute__((packed));
3216 
3217 struct llogd_conn_body {
3218 	struct llog_gen	 lgdc_gen;
3219 	struct llog_logid       lgdc_logid;
3220 	__u32		   lgdc_ctxt_idx;
3221 } __attribute__((packed));
3222 
3223 /* Note: 64-bit types are 64-bit aligned in structure */
3224 struct obdo {
3225 	__u64		o_valid;	/* hot fields in this obdo */
3226 	struct ost_id	o_oi;
3227 	__u64		o_parent_seq;
3228 	__u64		o_size;	 /* o_size-o_blocks == ost_lvb */
3229 	__s64		o_mtime;
3230 	__s64		o_atime;
3231 	__s64		o_ctime;
3232 	__u64		o_blocks;       /* brw: cli sent cached bytes */
3233 	__u64		o_grant;
3234 
3235 	/* 32-bit fields start here: keep an even number of them via padding */
3236 	__u32		o_blksize;      /* optimal IO blocksize */
3237 	__u32		o_mode;	 /* brw: cli sent cache remain */
3238 	__u32		o_uid;
3239 	__u32		o_gid;
3240 	__u32		o_flags;
3241 	__u32		o_nlink;	/* brw: checksum */
3242 	__u32		o_parent_oid;
3243 	__u32		o_misc;		/* brw: o_dropped */
3244 
3245 	__u64		   o_ioepoch;      /* epoch in ost writes */
3246 	__u32		   o_stripe_idx;   /* holds stripe idx */
3247 	__u32		   o_parent_ver;
3248 	struct lustre_handle    o_handle;       /* brw: lock handle to prolong
3249 						 * locks */
3250 	struct llog_cookie      o_lcookie;      /* destroy: unlink cookie from
3251 						 * MDS */
3252 	__u32			o_uid_h;
3253 	__u32			o_gid_h;
3254 
3255 	__u64			o_data_version; /* getattr: sum of iversion for
3256 						 * each stripe.
3257 						 * brw: grant space consumed on
3258 						 * the client for the write */
3259 	__u64			o_padding_4;
3260 	__u64			o_padding_5;
3261 	__u64			o_padding_6;
3262 };
3263 
3264 #define o_dirty   o_blocks
3265 #define o_undirty o_mode
3266 #define o_dropped o_misc
3267 #define o_cksum   o_nlink
3268 #define o_grant_used o_data_version
3269 
lustre_set_wire_obdo(struct obd_connect_data * ocd,struct obdo * wobdo,const struct obdo * lobdo)3270 static inline void lustre_set_wire_obdo(struct obd_connect_data *ocd,
3271 					struct obdo *wobdo,
3272 					const struct obdo *lobdo)
3273 {
3274 	*wobdo = *lobdo;
3275 	wobdo->o_flags &= ~OBD_FL_LOCAL_MASK;
3276 	if (ocd == NULL)
3277 		return;
3278 
3279 	if (unlikely(!(ocd->ocd_connect_flags & OBD_CONNECT_FID)) &&
3280 	    fid_seq_is_echo(ostid_seq(&lobdo->o_oi))) {
3281 		/* Currently OBD_FL_OSTID will only be used when 2.4 echo
3282 		 * client communicate with pre-2.4 server */
3283 		wobdo->o_oi.oi.oi_id = fid_oid(&lobdo->o_oi.oi_fid);
3284 		wobdo->o_oi.oi.oi_seq = fid_seq(&lobdo->o_oi.oi_fid);
3285 	}
3286 }
3287 
lustre_get_wire_obdo(struct obd_connect_data * ocd,struct obdo * lobdo,const struct obdo * wobdo)3288 static inline void lustre_get_wire_obdo(struct obd_connect_data *ocd,
3289 					struct obdo *lobdo,
3290 					const struct obdo *wobdo)
3291 {
3292 	__u32 local_flags = 0;
3293 
3294 	if (lobdo->o_valid & OBD_MD_FLFLAGS)
3295 		 local_flags = lobdo->o_flags & OBD_FL_LOCAL_MASK;
3296 
3297 	*lobdo = *wobdo;
3298 	if (local_flags != 0) {
3299 		lobdo->o_valid |= OBD_MD_FLFLAGS;
3300 		lobdo->o_flags &= ~OBD_FL_LOCAL_MASK;
3301 		lobdo->o_flags |= local_flags;
3302 	}
3303 	if (ocd == NULL)
3304 		return;
3305 
3306 	if (unlikely(!(ocd->ocd_connect_flags & OBD_CONNECT_FID)) &&
3307 	    fid_seq_is_echo(wobdo->o_oi.oi.oi_seq)) {
3308 		/* see above */
3309 		lobdo->o_oi.oi_fid.f_seq = wobdo->o_oi.oi.oi_seq;
3310 		lobdo->o_oi.oi_fid.f_oid = wobdo->o_oi.oi.oi_id;
3311 		lobdo->o_oi.oi_fid.f_ver = 0;
3312 	}
3313 }
3314 
3315 /* request structure for OST's */
3316 struct ost_body {
3317 	struct  obdo oa;
3318 };
3319 
3320 /* Key for FIEMAP to be used in get_info calls */
3321 struct ll_fiemap_info_key {
3322 	char    name[8];
3323 	struct  obdo oa;
3324 	struct  ll_user_fiemap fiemap;
3325 };
3326 
3327 void lustre_swab_ost_body(struct ost_body *b);
3328 void lustre_swab_ost_last_id(__u64 *id);
3329 void lustre_swab_fiemap(struct ll_user_fiemap *fiemap);
3330 
3331 void lustre_swab_lov_user_md_v1(struct lov_user_md_v1 *lum);
3332 void lustre_swab_lov_user_md_v3(struct lov_user_md_v3 *lum);
3333 void lustre_swab_lov_user_md_objects(struct lov_user_ost_data *lod,
3334 				     int stripe_count);
3335 void lustre_swab_lov_mds_md(struct lov_mds_md *lmm);
3336 
3337 /* llog_swab.c */
3338 void lustre_swab_llogd_body(struct llogd_body *d);
3339 void lustre_swab_llog_hdr(struct llog_log_hdr *h);
3340 void lustre_swab_llogd_conn_body(struct llogd_conn_body *d);
3341 void lustre_swab_llog_rec(struct llog_rec_hdr *rec);
3342 
3343 struct lustre_cfg;
3344 void lustre_swab_lustre_cfg(struct lustre_cfg *lcfg);
3345 
3346 /* Functions for dumping PTLRPC fields */
3347 void dump_rniobuf(struct niobuf_remote *rnb);
3348 void dump_ioo(struct obd_ioobj *nb);
3349 void dump_ost_body(struct ost_body *ob);
3350 void dump_rcs(__u32 *rc);
3351 
3352 #define IDX_INFO_MAGIC 0x3D37CC37
3353 
3354 /* Index file transfer through the network. The server serializes the index into
3355  * a byte stream which is sent to the client via a bulk transfer */
3356 struct idx_info {
3357 	__u32		ii_magic;
3358 
3359 	/* reply: see idx_info_flags below */
3360 	__u32		ii_flags;
3361 
3362 	/* request & reply: number of lu_idxpage (to be) transferred */
3363 	__u16		ii_count;
3364 	__u16		ii_pad0;
3365 
3366 	/* request: requested attributes passed down to the iterator API */
3367 	__u32		ii_attrs;
3368 
3369 	/* request & reply: index file identifier (FID) */
3370 	struct lu_fid	ii_fid;
3371 
3372 	/* reply: version of the index file before starting to walk the index.
3373 	 * Please note that the version can be modified at any time during the
3374 	 * transfer */
3375 	__u64		ii_version;
3376 
3377 	/* request: hash to start with:
3378 	 * reply: hash of the first entry of the first lu_idxpage and hash
3379 	 *	of the entry to read next if any */
3380 	__u64		ii_hash_start;
3381 	__u64		ii_hash_end;
3382 
3383 	/* reply: size of keys in lu_idxpages, minimal one if II_FL_VARKEY is
3384 	 * set */
3385 	__u16		ii_keysize;
3386 
3387 	/* reply: size of records in lu_idxpages, minimal one if II_FL_VARREC
3388 	 * is set */
3389 	__u16		ii_recsize;
3390 
3391 	__u32		ii_pad1;
3392 	__u64		ii_pad2;
3393 	__u64		ii_pad3;
3394 };
3395 
3396 void lustre_swab_idx_info(struct idx_info *ii);
3397 
3398 #define II_END_OFF	MDS_DIR_END_OFF /* all entries have been read */
3399 
3400 /* List of flags used in idx_info::ii_flags */
3401 enum idx_info_flags {
3402 	II_FL_NOHASH	= 1 << 0, /* client doesn't care about hash value */
3403 	II_FL_VARKEY	= 1 << 1, /* keys can be of variable size */
3404 	II_FL_VARREC	= 1 << 2, /* records can be of variable size */
3405 	II_FL_NONUNQ	= 1 << 3, /* index supports non-unique keys */
3406 };
3407 
3408 #define LIP_MAGIC 0x8A6D6B6C
3409 
3410 /* 4KB (= LU_PAGE_SIZE) container gathering key/record pairs */
3411 struct lu_idxpage {
3412 	/* 16-byte header */
3413 	__u32	lip_magic;
3414 	__u16	lip_flags;
3415 	__u16	lip_nr;   /* number of entries in the container */
3416 	__u64	lip_pad0; /* additional padding for future use */
3417 
3418 	/* key/record pairs are stored in the remaining 4080 bytes.
3419 	 * depending upon the flags in idx_info::ii_flags, each key/record
3420 	 * pair might be preceded by:
3421 	 * - a hash value
3422 	 * - the key size (II_FL_VARKEY is set)
3423 	 * - the record size (II_FL_VARREC is set)
3424 	 *
3425 	 * For the time being, we only support fixed-size key & record. */
3426 	char	lip_entries[0];
3427 };
3428 
3429 #define LIP_HDR_SIZE (offsetof(struct lu_idxpage, lip_entries))
3430 
3431 /* Gather all possible type associated with a 4KB container */
3432 union lu_page {
3433 	struct lu_dirpage	lp_dir; /* for MDS_READPAGE */
3434 	struct lu_idxpage	lp_idx; /* for OBD_IDX_READ */
3435 	char			lp_array[LU_PAGE_SIZE];
3436 };
3437 
3438 /* security opcodes */
3439 typedef enum {
3440 	SEC_CTX_INIT	    = 801,
3441 	SEC_CTX_INIT_CONT       = 802,
3442 	SEC_CTX_FINI	    = 803,
3443 	SEC_LAST_OPC,
3444 	SEC_FIRST_OPC	   = SEC_CTX_INIT
3445 } sec_cmd_t;
3446 
3447 /*
3448  * capa related definitions
3449  */
3450 #define CAPA_HMAC_MAX_LEN       64
3451 #define CAPA_HMAC_KEY_MAX_LEN   56
3452 
3453 /* NB take care when changing the sequence of elements this struct,
3454  * because the offset info is used in find_capa() */
3455 struct lustre_capa {
3456 	struct lu_fid   lc_fid;	 /** fid */
3457 	__u64	   lc_opc;	 /** operations allowed */
3458 	__u64	   lc_uid;	 /** file owner */
3459 	__u64	   lc_gid;	 /** file group */
3460 	__u32	   lc_flags;       /** HMAC algorithm & flags */
3461 	__u32	   lc_keyid;       /** key# used for the capability */
3462 	__u32	   lc_timeout;     /** capa timeout value (sec) */
3463 /* FIXME: y2038 time_t overflow: */
3464 	__u32	   lc_expiry;      /** expiry time (sec) */
3465 	__u8	    lc_hmac[CAPA_HMAC_MAX_LEN];   /** HMAC */
3466 } __attribute__((packed));
3467 
3468 void lustre_swab_lustre_capa(struct lustre_capa *c);
3469 
3470 /** lustre_capa::lc_opc */
3471 enum {
3472 	CAPA_OPC_BODY_WRITE   = 1<<0,  /**< write object data */
3473 	CAPA_OPC_BODY_READ    = 1<<1,  /**< read object data */
3474 	CAPA_OPC_INDEX_LOOKUP = 1<<2,  /**< lookup object fid */
3475 	CAPA_OPC_INDEX_INSERT = 1<<3,  /**< insert object fid */
3476 	CAPA_OPC_INDEX_DELETE = 1<<4,  /**< delete object fid */
3477 	CAPA_OPC_OSS_WRITE    = 1<<5,  /**< write oss object data */
3478 	CAPA_OPC_OSS_READ     = 1<<6,  /**< read oss object data */
3479 	CAPA_OPC_OSS_TRUNC    = 1<<7,  /**< truncate oss object */
3480 	CAPA_OPC_OSS_DESTROY  = 1<<8,  /**< destroy oss object */
3481 	CAPA_OPC_META_WRITE   = 1<<9,  /**< write object meta data */
3482 	CAPA_OPC_META_READ    = 1<<10, /**< read object meta data */
3483 };
3484 
3485 #define CAPA_OPC_OSS_RW (CAPA_OPC_OSS_READ | CAPA_OPC_OSS_WRITE)
3486 #define CAPA_OPC_MDS_ONLY						   \
3487 	(CAPA_OPC_BODY_WRITE | CAPA_OPC_BODY_READ | CAPA_OPC_INDEX_LOOKUP | \
3488 	 CAPA_OPC_INDEX_INSERT | CAPA_OPC_INDEX_DELETE)
3489 #define CAPA_OPC_OSS_ONLY						   \
3490 	(CAPA_OPC_OSS_WRITE | CAPA_OPC_OSS_READ | CAPA_OPC_OSS_TRUNC |      \
3491 	 CAPA_OPC_OSS_DESTROY)
3492 #define CAPA_OPC_MDS_DEFAULT ~CAPA_OPC_OSS_ONLY
3493 #define CAPA_OPC_OSS_DEFAULT ~(CAPA_OPC_MDS_ONLY | CAPA_OPC_OSS_ONLY)
3494 
3495 struct lustre_capa_key {
3496 	__u64   lk_seq;       /**< mds# */
3497 	__u32   lk_keyid;     /**< key# */
3498 	__u32   lk_padding;
3499 	__u8    lk_key[CAPA_HMAC_KEY_MAX_LEN];    /**< key */
3500 } __attribute__((packed));
3501 
3502 /** The link ea holds 1 \a link_ea_entry for each hardlink */
3503 #define LINK_EA_MAGIC 0x11EAF1DFUL
3504 struct link_ea_header {
3505 	__u32 leh_magic;
3506 	__u32 leh_reccount;
3507 	__u64 leh_len;      /* total size */
3508 	/* future use */
3509 	__u32 padding1;
3510 	__u32 padding2;
3511 };
3512 
3513 /** Hardlink data is name and parent fid.
3514  * Stored in this crazy struct for maximum packing and endian-neutrality
3515  */
3516 struct link_ea_entry {
3517 	/** __u16 stored big-endian, unaligned */
3518 	unsigned char      lee_reclen[2];
3519 	unsigned char      lee_parent_fid[sizeof(struct lu_fid)];
3520 	char	       lee_name[0];
3521 } __attribute__((packed));
3522 
3523 /** fid2path request/reply structure */
3524 struct getinfo_fid2path {
3525 	struct lu_fid   gf_fid;
3526 	__u64	   gf_recno;
3527 	__u32	   gf_linkno;
3528 	__u32	   gf_pathlen;
3529 	char	    gf_path[0];
3530 } __attribute__((packed));
3531 
3532 void lustre_swab_fid2path (struct getinfo_fid2path *gf);
3533 
3534 enum {
3535 	LAYOUT_INTENT_ACCESS    = 0,
3536 	LAYOUT_INTENT_READ      = 1,
3537 	LAYOUT_INTENT_WRITE     = 2,
3538 	LAYOUT_INTENT_GLIMPSE   = 3,
3539 	LAYOUT_INTENT_TRUNC     = 4,
3540 	LAYOUT_INTENT_RELEASE   = 5,
3541 	LAYOUT_INTENT_RESTORE   = 6
3542 };
3543 
3544 /* enqueue layout lock with intent */
3545 struct layout_intent {
3546 	__u32 li_opc; /* intent operation for enqueue, read, write etc */
3547 	__u32 li_flags;
3548 	__u64 li_start;
3549 	__u64 li_end;
3550 };
3551 
3552 void lustre_swab_layout_intent(struct layout_intent *li);
3553 
3554 /**
3555  * On the wire version of hsm_progress structure.
3556  *
3557  * Contains the userspace hsm_progress and some internal fields.
3558  */
3559 struct hsm_progress_kernel {
3560 	/* Field taken from struct hsm_progress */
3561 	lustre_fid		hpk_fid;
3562 	__u64			hpk_cookie;
3563 	struct hsm_extent	hpk_extent;
3564 	__u16			hpk_flags;
3565 	__u16			hpk_errval; /* positive val */
3566 	__u32			hpk_padding1;
3567 	/* Additional fields */
3568 	__u64			hpk_data_version;
3569 	__u64			hpk_padding2;
3570 } __attribute__((packed));
3571 
3572 void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
3573 void lustre_swab_hsm_current_action(struct hsm_current_action *action);
3574 void lustre_swab_hsm_progress_kernel(struct hsm_progress_kernel *hpk);
3575 void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
3576 void lustre_swab_hsm_user_item(struct hsm_user_item *hui);
3577 void lustre_swab_hsm_request(struct hsm_request *hr);
3578 
3579 /**
3580  * These are object update opcode under UPDATE_OBJ, which is currently
3581  * being used by cross-ref operations between MDT.
3582  *
3583  * During the cross-ref operation, the Master MDT, which the client send the
3584  * request to, will disassembly the operation into object updates, then OSP
3585  * will send these updates to the remote MDT to be executed.
3586  *
3587  *   Update request format
3588  *   magic:  UPDATE_BUFFER_MAGIC_V1
3589  *   Count:  How many updates in the req.
3590  *   bufs[0] : following are packets of object.
3591  *   update[0]:
3592  *		type: object_update_op, the op code of update
3593  *		fid: The object fid of the update.
3594  *		lens/bufs: other parameters of the update.
3595  *   update[1]:
3596  *		type: object_update_op, the op code of update
3597  *		fid: The object fid of the update.
3598  *		lens/bufs: other parameters of the update.
3599  *   ..........
3600  *   update[7]:	type: object_update_op, the op code of update
3601  *		fid: The object fid of the update.
3602  *		lens/bufs: other parameters of the update.
3603  *   Current 8 maxim updates per object update request.
3604  *
3605  *******************************************************************
3606  *   update reply format:
3607  *
3608  *   ur_version: UPDATE_REPLY_V1
3609  *   ur_count:   The count of the reply, which is usually equal
3610  *		 to the number of updates in the request.
3611  *   ur_lens:    The reply lengths of each object update.
3612  *
3613  *   replies:    1st update reply  [4bytes_ret: other body]
3614  *		 2nd update reply  [4bytes_ret: other body]
3615  *		 .....
3616  *		 nth update reply  [4bytes_ret: other body]
3617  *
3618  *   For each reply of the update, the format would be
3619  *	 result(4 bytes):Other stuff
3620  */
3621 
3622 #define UPDATE_MAX_OPS		10
3623 #define UPDATE_BUFFER_MAGIC_V1	0xBDDE0001
3624 #define UPDATE_BUFFER_MAGIC	UPDATE_BUFFER_MAGIC_V1
3625 #define UPDATE_BUF_COUNT	8
3626 enum object_update_op {
3627 	OBJ_CREATE		= 1,
3628 	OBJ_DESTROY		= 2,
3629 	OBJ_REF_ADD		= 3,
3630 	OBJ_REF_DEL		= 4,
3631 	OBJ_ATTR_SET		= 5,
3632 	OBJ_ATTR_GET		= 6,
3633 	OBJ_XATTR_SET		= 7,
3634 	OBJ_XATTR_GET		= 8,
3635 	OBJ_INDEX_LOOKUP	= 9,
3636 	OBJ_INDEX_INSERT	= 10,
3637 	OBJ_INDEX_DELETE	= 11,
3638 	OBJ_LAST
3639 };
3640 
3641 struct update {
3642 	__u32		u_type;
3643 	__u32		u_batchid;
3644 	struct lu_fid	u_fid;
3645 	__u32		u_lens[UPDATE_BUF_COUNT];
3646 	__u32		u_bufs[0];
3647 };
3648 
3649 struct update_buf {
3650 	__u32	ub_magic;
3651 	__u32	ub_count;
3652 	__u32	ub_bufs[0];
3653 };
3654 
3655 #define UPDATE_REPLY_V1		0x00BD0001
3656 struct update_reply {
3657 	__u32	ur_version;
3658 	__u32	ur_count;
3659 	__u32	ur_lens[0];
3660 };
3661 
3662 void lustre_swab_update_buf(struct update_buf *ub);
3663 void lustre_swab_update_reply_buf(struct update_reply *ur);
3664 
3665 /** layout swap request structure
3666  * fid1 and fid2 are in mdt_body
3667  */
3668 struct mdc_swap_layouts {
3669 	__u64	   msl_flags;
3670 } __packed;
3671 
3672 void lustre_swab_swap_layouts(struct mdc_swap_layouts *msl);
3673 
3674 struct close_data {
3675 	struct lustre_handle	cd_handle;
3676 	struct lu_fid		cd_fid;
3677 	__u64			cd_data_version;
3678 	__u64			cd_reserved[8];
3679 };
3680 
3681 void lustre_swab_close_data(struct close_data *data);
3682 
3683 #endif
3684 /** @} lustreidl */
3685