4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2014, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/include/lustre/lustre_idl.h
38 * Lustre wire protocol definitions.
41 /** \defgroup lustreidl lustreidl
43 * Lustre wire protocol definitions.
45 * ALL structs passing over the wire should be declared here. Structs
46 * that are used in interfaces with userspace should go in lustre_user.h.
48 * All structs being declared here should be built from simple fixed-size
49 * types (__u8, __u16, __u32, __u64) or be built from other types or
50 * structs also declared in this file. Similarly, all flags and magic
51 * values in those structs should also be declared here. This ensures
52 * that the Lustre wire protocol is not influenced by external dependencies.
54 * The only other acceptable items in this file are VERY SIMPLE accessor
55 * functions to avoid callers grubbing inside the structures, and the
56 * prototypes of the swabber functions for each struct. Nothing that
57 * depends on external functions or definitions should be in here.
59 * Structs must be properly aligned to put 64-bit values on an 8-byte
60 * boundary. Any structs being added here must also be added to
61 * utils/wirecheck.c and "make newwiretest" run to regenerate the
62 * utils/wiretest.c sources. This allows us to verify that wire structs
63 * have the proper alignment/size on all architectures.
65 * DO NOT CHANGE any of the structs, flags, values declared here and used
66 * in released Lustre versions. Some structs may have padding fields that
67 * can be used. Some structs might allow addition at the end (verify this
68 * in the code to ensure that new/old clients that see this larger struct
69 * do not fail, otherwise you need to implement protocol compatibility).
71 * We assume all nodes are either little-endian or big-endian, and we
72 * always send messages in the sender's native format. The receiver
73 * detects the message format by checking the 'magic' field of the message
74 * (see lustre_msg_swabbed() below).
76 * Each wire type has corresponding 'lustre_swab_xxxtypexxx()' routines,
77 * implemented either here, inline (trivial implementations) or in
78 * ptlrpc/pack_generic.c. These 'swabbers' convert the type from "other"
79 * endian, in-place in the message buffer.
81 * A swabber takes a single pointer argument. The caller must already have
82 * verified that the length of the message buffer >= sizeof (type).
84 * For variable length types, a second 'lustre_swab_v_xxxtypexxx()' routine
85 * may be defined that swabs just the variable part, after the caller has
86 * verified that the message buffer is large enough.
91 #ifndef _LUSTRE_IDL_H_
92 #define _LUSTRE_IDL_H_
94 #include <libcfs/libcfs.h> /* for LPUX64, etc */
95 #include <lnet/types.h>
96 #include <lustre/lustre_user.h> /* Defn's shared with user-space. */
97 #include <lustre/lustre_errno.h>
98 #include <lustre_ver.h>
103 /* FOO_REQUEST_PORTAL is for incoming requests on the FOO
104 * FOO_REPLY_PORTAL is for incoming replies on the FOO
105 * FOO_BULK_PORTAL is for incoming bulk on the FOO
108 #define CONNMGR_REQUEST_PORTAL 1
109 #define CONNMGR_REPLY_PORTAL 2
110 //#define OSC_REQUEST_PORTAL 3
111 #define OSC_REPLY_PORTAL 4
112 //#define OSC_BULK_PORTAL 5
113 #define OST_IO_PORTAL 6
114 #define OST_CREATE_PORTAL 7
115 #define OST_BULK_PORTAL 8
116 //#define MDC_REQUEST_PORTAL 9
117 #define MDC_REPLY_PORTAL 10
118 //#define MDC_BULK_PORTAL 11
119 #define MDS_REQUEST_PORTAL 12
120 //#define MDS_REPLY_PORTAL 13
121 #define MDS_BULK_PORTAL 14
122 #define LDLM_CB_REQUEST_PORTAL 15
123 #define LDLM_CB_REPLY_PORTAL 16
124 #define LDLM_CANCEL_REQUEST_PORTAL 17
125 #define LDLM_CANCEL_REPLY_PORTAL 18
126 //#define PTLBD_REQUEST_PORTAL 19
127 //#define PTLBD_REPLY_PORTAL 20
128 //#define PTLBD_BULK_PORTAL 21
129 #define MDS_SETATTR_PORTAL 22
130 #define MDS_READPAGE_PORTAL 23
131 #define OUT_PORTAL 24
132 #define MGC_REPLY_PORTAL 25
133 #define MGS_REQUEST_PORTAL 26
134 #define MGS_REPLY_PORTAL 27
135 #define OST_REQUEST_PORTAL 28
136 #define FLD_REQUEST_PORTAL 29
137 #define SEQ_METADATA_PORTAL 30
138 #define SEQ_DATA_PORTAL 31
139 #define SEQ_CONTROLLER_PORTAL 32
140 #define MGS_BULK_PORTAL 33
142 /* Portal 63 is reserved for the Cray Inc DVS - nic@cray.com, roe@cray.com, n8851@cray.com */
145 #define PTL_RPC_MSG_REQUEST 4711
146 #define PTL_RPC_MSG_ERR 4712
147 #define PTL_RPC_MSG_REPLY 4713
149 /* DON'T use swabbed values of MAGIC as magic! */
150 #define LUSTRE_MSG_MAGIC_V2 0x0BD00BD3
151 #define LUSTRE_MSG_MAGIC_V2_SWABBED 0xD30BD00B
153 #define LUSTRE_MSG_MAGIC LUSTRE_MSG_MAGIC_V2
155 #define PTLRPC_MSG_VERSION 0x00000003
156 #define LUSTRE_VERSION_MASK 0xffff0000
157 #define LUSTRE_OBD_VERSION 0x00010000
158 #define LUSTRE_MDS_VERSION 0x00020000
159 #define LUSTRE_OST_VERSION 0x00030000
160 #define LUSTRE_DLM_VERSION 0x00040000
161 #define LUSTRE_LOG_VERSION 0x00050000
162 #define LUSTRE_MGS_VERSION 0x00060000
165 * Describes a range of sequence, lsr_start is included but lsr_end is
167 * Same structure is used in fld module where lsr_index field holds mdt id
170 struct lu_seq_range {
177 struct lu_seq_range_array {
180 struct lu_seq_range lsra_lsr[0];
183 #define LU_SEQ_RANGE_MDT 0x0
184 #define LU_SEQ_RANGE_OST 0x1
185 #define LU_SEQ_RANGE_ANY 0x3
187 #define LU_SEQ_RANGE_MASK 0x3
189 static inline unsigned fld_range_type(const struct lu_seq_range *range)
191 return range->lsr_flags & LU_SEQ_RANGE_MASK;
194 static inline bool fld_range_is_ost(const struct lu_seq_range *range)
196 return fld_range_type(range) == LU_SEQ_RANGE_OST;
199 static inline bool fld_range_is_mdt(const struct lu_seq_range *range)
201 return fld_range_type(range) == LU_SEQ_RANGE_MDT;
205 * This all range is only being used when fld client sends fld query request,
206 * but it does not know whether the seq is MDT or OST, so it will send req
207 * with ALL type, which means either seq type gotten from lookup can be
210 static inline unsigned fld_range_is_any(const struct lu_seq_range *range)
212 return fld_range_type(range) == LU_SEQ_RANGE_ANY;
215 static inline void fld_range_set_type(struct lu_seq_range *range,
218 range->lsr_flags |= flags;
221 static inline void fld_range_set_mdt(struct lu_seq_range *range)
223 fld_range_set_type(range, LU_SEQ_RANGE_MDT);
226 static inline void fld_range_set_ost(struct lu_seq_range *range)
228 fld_range_set_type(range, LU_SEQ_RANGE_OST);
231 static inline void fld_range_set_any(struct lu_seq_range *range)
233 fld_range_set_type(range, LU_SEQ_RANGE_ANY);
237 * returns width of given range \a r
240 static inline __u64 range_space(const struct lu_seq_range *range)
242 return range->lsr_end - range->lsr_start;
246 * initialize range to zero
249 static inline void range_init(struct lu_seq_range *range)
251 memset(range, 0, sizeof(*range));
255 * check if given seq id \a s is within given range \a r
258 static inline bool range_within(const struct lu_seq_range *range,
261 return s >= range->lsr_start && s < range->lsr_end;
264 static inline bool range_is_sane(const struct lu_seq_range *range)
266 return range->lsr_end >= range->lsr_start;
269 static inline bool range_is_zero(const struct lu_seq_range *range)
271 return range->lsr_start == 0 && range->lsr_end == 0;
274 static inline bool range_is_exhausted(const struct lu_seq_range *range)
276 return range_space(range) == 0;
279 /* return 0 if two range have the same location */
280 static inline int range_compare_loc(const struct lu_seq_range *r1,
281 const struct lu_seq_range *r2)
283 return r1->lsr_index != r2->lsr_index ||
284 r1->lsr_flags != r2->lsr_flags;
287 #define DRANGE "[%#16.16"LPF64"x-%#16.16"LPF64"x):%x:%s"
289 #define PRANGE(range) \
290 (range)->lsr_start, \
292 (range)->lsr_index, \
293 fld_range_is_mdt(range) ? "mdt" : "ost"
296 /** \defgroup lu_fid lu_fid
300 * Flags for lustre_mdt_attrs::lma_compat and lustre_mdt_attrs::lma_incompat.
301 * Deprecated since HSM and SOM attributes are now stored in separate on-disk
305 LMAC_HSM = 0x00000001,
306 LMAC_SOM = 0x00000002,
307 LMAC_NOT_IN_OI = 0x00000004, /* the object does NOT need OI mapping */
308 LMAC_FID_ON_OST = 0x00000008, /* For OST-object, its OI mapping is
309 * under /O/<seq>/d<x>. */
313 * Masks for all features that should be supported by a Lustre version to
314 * access a specific file.
315 * This information is stored in lustre_mdt_attrs::lma_incompat.
318 LMAI_RELEASED = 0x00000001, /* file is released */
319 LMAI_AGENT = 0x00000002, /* agent inode */
320 LMAI_REMOTE_PARENT = 0x00000004, /* the parent of the object
321 is on the remote MDT */
322 LMAI_STRIPED = 0x00000008, /* striped directory inode */
324 #define LMA_INCOMPAT_SUPP (LMAI_AGENT | LMAI_REMOTE_PARENT | LMAI_STRIPED)
326 extern void lustre_lma_swab(struct lustre_mdt_attrs *lma);
327 extern void lustre_lma_init(struct lustre_mdt_attrs *lma,
328 const struct lu_fid *fid,
329 __u32 compat, __u32 incompat);
331 * SOM on-disk attributes stored in a separate xattr.
334 /** Bitfield for supported data in this structure. For future use. */
337 /** Incompat feature list. The supported feature mask is availabe in
338 * SOM_INCOMPAT_SUPP */
341 /** IO Epoch SOM attributes belongs to */
343 /** total file size in objects */
345 /** total fs blocks in objects */
347 /** mds mount id the size is valid for */
350 extern void lustre_som_swab(struct som_attrs *attrs);
352 #define SOM_INCOMPAT_SUPP 0x0
354 /* copytool uses a 32b bitmask field to encode archive-Ids during register
356 * archive num = 0 => all
357 * archive num from 1 to 32
359 #define LL_HSM_MAX_ARCHIVE (sizeof(__u32) * 8)
362 * HSM on-disk attributes stored in a separate xattr.
365 /** Bitfield for supported data in this structure. For future use. */
368 /** HSM flags, see hsm_flags enum below */
370 /** backend archive id associated with the file */
372 /** version associated with the last archiving, if any */
375 extern void lustre_hsm_swab(struct hsm_attrs *attrs);
381 /** LASTID file has zero OID */
382 LUSTRE_FID_LASTID_OID = 0UL,
383 /** initial fid id value */
384 LUSTRE_FID_INIT_OID = 1UL
387 /** returns fid object sequence */
388 static inline __u64 fid_seq(const struct lu_fid *fid)
393 /** returns fid object id */
394 static inline __u32 fid_oid(const struct lu_fid *fid)
399 /** returns fid object version */
400 static inline __u32 fid_ver(const struct lu_fid *fid)
405 static inline void fid_zero(struct lu_fid *fid)
407 memset(fid, 0, sizeof(*fid));
410 static inline __u64 fid_ver_oid(const struct lu_fid *fid)
412 return ((__u64)fid_ver(fid) << 32 | fid_oid(fid));
416 * Note that reserved SEQ numbers below 12 will conflict with ldiskfs
417 * inodes in the IGIF namespace, so these reserved SEQ numbers can be
418 * used for other purposes and not risk collisions with existing inodes.
420 * Different FID Format
421 * http://arch.lustre.org/index.php?title=Interoperability_fids_zfs#NEW.0
424 FID_SEQ_OST_MDT0 = 0,
425 FID_SEQ_LLOG = 1, /* unnamed llogs */
427 FID_SEQ_OST_MDT1 = 3,
428 FID_SEQ_OST_MAX = 9, /* Max MDT count before OST_on_FID */
429 FID_SEQ_LLOG_NAME = 10, /* named llogs */
432 FID_SEQ_IGIF_MAX = 0x0ffffffffULL,
433 FID_SEQ_IDIF = 0x100000000ULL,
434 FID_SEQ_IDIF_MAX = 0x1ffffffffULL,
435 /* Normal FID sequence starts from this value, i.e. 1<<33 */
436 FID_SEQ_START = 0x200000000ULL,
437 /* sequence for local pre-defined FIDs listed in local_oid */
438 FID_SEQ_LOCAL_FILE = 0x200000001ULL,
439 FID_SEQ_DOT_LUSTRE = 0x200000002ULL,
440 /* sequence is used for local named objects FIDs generated
441 * by local_object_storage library */
442 FID_SEQ_LOCAL_NAME = 0x200000003ULL,
443 /* Because current FLD will only cache the fid sequence, instead
444 * of oid on the client side, if the FID needs to be exposed to
445 * clients sides, it needs to make sure all of fids under one
446 * sequence will be located in one MDT. */
447 FID_SEQ_SPECIAL = 0x200000004ULL,
448 FID_SEQ_QUOTA = 0x200000005ULL,
449 FID_SEQ_QUOTA_GLB = 0x200000006ULL,
450 FID_SEQ_ROOT = 0x200000007ULL, /* Located on MDT0 */
451 FID_SEQ_LAYOUT_RBTREE = 0x200000008ULL,
452 FID_SEQ_NORMAL = 0x200000400ULL,
453 FID_SEQ_LOV_DEFAULT = 0xffffffffffffffffULL
456 #define OBIF_OID_MAX_BITS 32
457 #define OBIF_MAX_OID (1ULL << OBIF_OID_MAX_BITS)
458 #define OBIF_OID_MASK ((1ULL << OBIF_OID_MAX_BITS) - 1)
459 #define IDIF_OID_MAX_BITS 48
460 #define IDIF_MAX_OID (1ULL << IDIF_OID_MAX_BITS)
461 #define IDIF_OID_MASK ((1ULL << IDIF_OID_MAX_BITS) - 1)
463 /** OID for FID_SEQ_SPECIAL */
465 /* Big Filesystem Lock to serialize rename operations */
466 FID_OID_SPECIAL_BFL = 1UL,
469 /** OID for FID_SEQ_DOT_LUSTRE */
470 enum dot_lustre_oid {
471 FID_OID_DOT_LUSTRE = 1UL,
472 FID_OID_DOT_LUSTRE_OBF = 2UL,
473 FID_OID_DOT_LUSTRE_LPF = 3UL,
476 /** OID for FID_SEQ_ROOT */
479 FID_OID_ECHO_ROOT = 2UL,
482 static inline bool fid_seq_is_mdt0(__u64 seq)
484 return seq == FID_SEQ_OST_MDT0;
487 static inline bool fid_seq_is_mdt(__u64 seq)
489 return seq == FID_SEQ_OST_MDT0 || seq >= FID_SEQ_NORMAL;
492 static inline bool fid_seq_is_echo(__u64 seq)
494 return seq == FID_SEQ_ECHO;
497 static inline bool fid_is_echo(const struct lu_fid *fid)
499 return fid_seq_is_echo(fid_seq(fid));
502 static inline bool fid_seq_is_llog(__u64 seq)
504 return seq == FID_SEQ_LLOG;
507 static inline bool fid_is_llog(const struct lu_fid *fid)
509 /* file with OID == 0 is not llog but contains last oid */
510 return fid_seq_is_llog(fid_seq(fid)) && fid_oid(fid) > 0;
513 static inline bool fid_seq_is_rsvd(__u64 seq)
515 return seq > FID_SEQ_OST_MDT0 && seq <= FID_SEQ_RSVD;
518 static inline bool fid_seq_is_special(__u64 seq)
520 return seq == FID_SEQ_SPECIAL;
523 static inline bool fid_seq_is_local_file(__u64 seq)
525 return seq == FID_SEQ_LOCAL_FILE ||
526 seq == FID_SEQ_LOCAL_NAME;
529 static inline bool fid_seq_is_root(__u64 seq)
531 return seq == FID_SEQ_ROOT;
534 static inline bool fid_seq_is_dot(__u64 seq)
536 return seq == FID_SEQ_DOT_LUSTRE;
539 static inline bool fid_seq_is_default(__u64 seq)
541 return seq == FID_SEQ_LOV_DEFAULT;
544 static inline bool fid_is_mdt0(const struct lu_fid *fid)
546 return fid_seq_is_mdt0(fid_seq(fid));
549 static inline void lu_root_fid(struct lu_fid *fid)
551 fid->f_seq = FID_SEQ_ROOT;
552 fid->f_oid = FID_OID_ROOT;
556 static inline void lu_echo_root_fid(struct lu_fid *fid)
558 fid->f_seq = FID_SEQ_ROOT;
559 fid->f_oid = FID_OID_ECHO_ROOT;
564 * Check if a fid is igif or not.
565 * \param fid the fid to be tested.
566 * \return true if the fid is an igif; otherwise false.
568 static inline bool fid_seq_is_igif(__u64 seq)
570 return seq >= FID_SEQ_IGIF && seq <= FID_SEQ_IGIF_MAX;
573 static inline bool fid_is_igif(const struct lu_fid *fid)
575 return fid_seq_is_igif(fid_seq(fid));
579 * Check if a fid is idif or not.
580 * \param fid the fid to be tested.
581 * \return true if the fid is an idif; otherwise false.
583 static inline bool fid_seq_is_idif(__u64 seq)
585 return seq >= FID_SEQ_IDIF && seq <= FID_SEQ_IDIF_MAX;
588 static inline bool fid_is_idif(const struct lu_fid *fid)
590 return fid_seq_is_idif(fid_seq(fid));
593 static inline bool fid_is_local_file(const struct lu_fid *fid)
595 return fid_seq_is_local_file(fid_seq(fid));
598 static inline bool fid_seq_is_norm(__u64 seq)
600 return (seq >= FID_SEQ_NORMAL);
603 static inline bool fid_is_norm(const struct lu_fid *fid)
605 return fid_seq_is_norm(fid_seq(fid));
608 static inline int fid_is_layout_rbtree(const struct lu_fid *fid)
610 return fid_seq(fid) == FID_SEQ_LAYOUT_RBTREE;
613 /* convert an OST objid into an IDIF FID SEQ number */
614 static inline __u64 fid_idif_seq(__u64 id, __u32 ost_idx)
616 return FID_SEQ_IDIF | (ost_idx << 16) | ((id >> 32) & 0xffff);
619 /* convert a packed IDIF FID into an OST objid */
620 static inline __u64 fid_idif_id(__u64 seq, __u32 oid, __u32 ver)
622 return ((__u64)ver << 48) | ((seq & 0xffff) << 32) | oid;
625 static inline __u32 idif_ost_idx(__u64 seq)
627 return (seq >> 16) & 0xffff;
630 /* extract ost index from IDIF FID */
631 static inline __u32 fid_idif_ost_idx(const struct lu_fid *fid)
633 return idif_ost_idx(fid_seq(fid));
636 /* extract OST sequence (group) from a wire ost_id (id/seq) pair */
637 static inline __u64 ostid_seq(const struct ost_id *ostid)
639 if (fid_seq_is_mdt0(ostid->oi.oi_seq))
640 return FID_SEQ_OST_MDT0;
642 if (unlikely(fid_seq_is_default(ostid->oi.oi_seq)))
643 return FID_SEQ_LOV_DEFAULT;
645 if (fid_is_idif(&ostid->oi_fid))
646 return FID_SEQ_OST_MDT0;
648 return fid_seq(&ostid->oi_fid);
651 /* extract OST objid from a wire ost_id (id/seq) pair */
652 static inline __u64 ostid_id(const struct ost_id *ostid)
654 if (fid_seq_is_mdt0(ostid->oi.oi_seq))
655 return ostid->oi.oi_id & IDIF_OID_MASK;
657 if (unlikely(fid_seq_is_default(ostid->oi.oi_seq)))
658 return ostid->oi.oi_id;
660 if (fid_is_idif(&ostid->oi_fid))
661 return fid_idif_id(fid_seq(&ostid->oi_fid),
662 fid_oid(&ostid->oi_fid), 0);
664 return fid_oid(&ostid->oi_fid);
667 static inline void ostid_set_seq(struct ost_id *oi, __u64 seq)
669 if (fid_seq_is_mdt0(seq) || fid_seq_is_default(seq)) {
672 oi->oi_fid.f_seq = seq;
673 /* Note: if f_oid + f_ver is zero, we need init it
674 * to be 1, otherwise, ostid_seq will treat this
675 * as old ostid (oi_seq == 0) */
676 if (oi->oi_fid.f_oid == 0 && oi->oi_fid.f_ver == 0)
677 oi->oi_fid.f_oid = LUSTRE_FID_INIT_OID;
681 static inline void ostid_set_seq_mdt0(struct ost_id *oi)
683 ostid_set_seq(oi, FID_SEQ_OST_MDT0);
686 static inline void ostid_set_seq_echo(struct ost_id *oi)
688 ostid_set_seq(oi, FID_SEQ_ECHO);
691 static inline void ostid_set_seq_llog(struct ost_id *oi)
693 ostid_set_seq(oi, FID_SEQ_LLOG);
697 * Note: we need check oi_seq to decide where to set oi_id,
698 * so oi_seq should always be set ahead of oi_id.
700 static inline void ostid_set_id(struct ost_id *oi, __u64 oid)
702 if (fid_seq_is_mdt0(oi->oi.oi_seq)) {
703 if (oid >= IDIF_MAX_OID) {
704 CERROR("Bad "LPU64" to set "DOSTID"\n",
709 } else if (fid_is_idif(&oi->oi_fid)) {
710 if (oid >= IDIF_MAX_OID) {
711 CERROR("Bad "LPU64" to set "DOSTID"\n",
715 oi->oi_fid.f_seq = fid_idif_seq(oid,
716 fid_idif_ost_idx(&oi->oi_fid));
717 oi->oi_fid.f_oid = oid;
718 oi->oi_fid.f_ver = oid >> 48;
720 if (oid > OBIF_MAX_OID) {
721 CERROR("Bad "LPU64" to set "DOSTID"\n",
725 oi->oi_fid.f_oid = oid;
729 static inline int fid_set_id(struct lu_fid *fid, __u64 oid)
731 if (unlikely(fid_seq_is_igif(fid->f_seq))) {
732 CERROR("bad IGIF, "DFID"\n", PFID(fid));
736 if (fid_is_idif(fid)) {
737 if (oid >= IDIF_MAX_OID) {
738 CERROR("Bad "LPU64" to set "DFID"\n",
742 fid->f_seq = fid_idif_seq(oid, fid_idif_ost_idx(fid));
744 fid->f_ver = oid >> 48;
746 if (oid > OBIF_MAX_OID) {
747 CERROR("Bad "LPU64" to set "DFID"\n",
757 * Unpack an OST object id/seq (group) into a FID. This is needed for
758 * converting all obdo, lmm, lsm, etc. 64-bit id/seq pairs into proper
759 * FIDs. Note that if an id/seq is already in FID/IDIF format it will
760 * be passed through unchanged. Only legacy OST objects in "group 0"
761 * will be mapped into the IDIF namespace so that they can fit into the
762 * struct lu_fid fields without loss. For reference see:
763 * http://arch.lustre.org/index.php?title=Interoperability_fids_zfs
765 static inline int ostid_to_fid(struct lu_fid *fid, const struct ost_id *ostid,
768 __u64 seq = ostid_seq(ostid);
770 if (ost_idx > 0xffff) {
771 CERROR("bad ost_idx, "DOSTID" ost_idx:%u\n", POSTID(ostid),
776 if (fid_seq_is_mdt0(seq)) {
777 __u64 oid = ostid_id(ostid);
779 /* This is a "legacy" (old 1.x/2.early) OST object in "group 0"
780 * that we map into the IDIF namespace. It allows up to 2^48
781 * objects per OST, as this is the object namespace that has
782 * been in production for years. This can handle create rates
783 * of 1M objects/s/OST for 9 years, or combinations thereof. */
784 if (oid >= IDIF_MAX_OID) {
785 CERROR("bad MDT0 id(1), "DOSTID" ost_idx:%u\n",
786 POSTID(ostid), ost_idx);
789 fid->f_seq = fid_idif_seq(oid, ost_idx);
790 /* truncate to 32 bits by assignment */
792 /* in theory, not currently used */
793 fid->f_ver = oid >> 48;
794 } else if (likely(!fid_seq_is_default(seq)))
795 /* if (fid_seq_is_idif(seq) || fid_seq_is_norm(seq)) */ {
796 /* This is either an IDIF object, which identifies objects across
797 * all OSTs, or a regular FID. The IDIF namespace maps legacy
798 * OST objects into the FID namespace. In both cases, we just
799 * pass the FID through, no conversion needed. */
800 if (ostid->oi_fid.f_ver != 0) {
801 CERROR("bad MDT0 id(2), "DOSTID" ost_idx:%u\n",
802 POSTID(ostid), ost_idx);
805 *fid = ostid->oi_fid;
811 /* pack any OST FID into an ostid (id/seq) for the wire/disk */
812 static inline int fid_to_ostid(const struct lu_fid *fid, struct ost_id *ostid)
814 if (unlikely(fid_seq_is_igif(fid->f_seq))) {
815 CERROR("bad IGIF, "DFID"\n", PFID(fid));
819 if (fid_is_idif(fid)) {
820 ostid_set_seq_mdt0(ostid);
821 ostid_set_id(ostid, fid_idif_id(fid_seq(fid), fid_oid(fid),
824 ostid->oi_fid = *fid;
830 /* Check whether the fid is for LAST_ID */
831 static inline bool fid_is_last_id(const struct lu_fid *fid)
833 return fid_oid(fid) == 0;
837 * Get inode number from an igif.
838 * \param fid an igif to get inode number from.
839 * \return inode number for the igif.
841 static inline ino_t lu_igif_ino(const struct lu_fid *fid)
846 extern void lustre_swab_ost_id(struct ost_id *oid);
849 * Get inode generation from an igif.
850 * \param fid an igif to get inode generation from.
851 * \return inode generation for the igif.
853 static inline __u32 lu_igif_gen(const struct lu_fid *fid)
859 * Build igif from the inode number/generation.
861 static inline void lu_igif_build(struct lu_fid *fid, __u32 ino, __u32 gen)
869 * Fids are transmitted across network (in the sender byte-ordering),
870 * and stored on disk in big-endian order.
872 static inline void fid_cpu_to_le(struct lu_fid *dst, const struct lu_fid *src)
874 dst->f_seq = cpu_to_le64(fid_seq(src));
875 dst->f_oid = cpu_to_le32(fid_oid(src));
876 dst->f_ver = cpu_to_le32(fid_ver(src));
879 static inline void fid_le_to_cpu(struct lu_fid *dst, const struct lu_fid *src)
881 dst->f_seq = le64_to_cpu(fid_seq(src));
882 dst->f_oid = le32_to_cpu(fid_oid(src));
883 dst->f_ver = le32_to_cpu(fid_ver(src));
886 static inline void fid_cpu_to_be(struct lu_fid *dst, const struct lu_fid *src)
888 dst->f_seq = cpu_to_be64(fid_seq(src));
889 dst->f_oid = cpu_to_be32(fid_oid(src));
890 dst->f_ver = cpu_to_be32(fid_ver(src));
893 static inline void fid_be_to_cpu(struct lu_fid *dst, const struct lu_fid *src)
895 dst->f_seq = be64_to_cpu(fid_seq(src));
896 dst->f_oid = be32_to_cpu(fid_oid(src));
897 dst->f_ver = be32_to_cpu(fid_ver(src));
900 static inline bool fid_is_sane(const struct lu_fid *fid)
902 return fid != NULL &&
903 ((fid_seq(fid) >= FID_SEQ_START && fid_ver(fid) == 0) ||
904 fid_is_igif(fid) || fid_is_idif(fid) ||
905 fid_seq_is_rsvd(fid_seq(fid)));
908 extern void lustre_swab_lu_fid(struct lu_fid *fid);
909 extern void lustre_swab_lu_seq_range(struct lu_seq_range *range);
911 static inline bool lu_fid_eq(const struct lu_fid *f0, const struct lu_fid *f1)
913 return memcmp(f0, f1, sizeof *f0) == 0;
916 #define __diff_normalize(val0, val1) \
918 typeof(val0) __val0 = (val0); \
919 typeof(val1) __val1 = (val1); \
921 (__val0 == __val1 ? 0 : __val0 > __val1 ? +1 : -1); \
924 static inline int lu_fid_cmp(const struct lu_fid *f0,
925 const struct lu_fid *f1)
928 __diff_normalize(fid_seq(f0), fid_seq(f1)) ?:
929 __diff_normalize(fid_oid(f0), fid_oid(f1)) ?:
930 __diff_normalize(fid_ver(f0), fid_ver(f1));
933 static inline void ostid_cpu_to_le(const struct ost_id *src_oi,
934 struct ost_id *dst_oi)
936 if (fid_seq_is_mdt0(src_oi->oi.oi_seq)) {
937 dst_oi->oi.oi_id = cpu_to_le64(src_oi->oi.oi_id);
938 dst_oi->oi.oi_seq = cpu_to_le64(src_oi->oi.oi_seq);
940 fid_cpu_to_le(&dst_oi->oi_fid, &src_oi->oi_fid);
944 static inline void ostid_le_to_cpu(const struct ost_id *src_oi,
945 struct ost_id *dst_oi)
947 if (fid_seq_is_mdt0(src_oi->oi.oi_seq)) {
948 dst_oi->oi.oi_id = le64_to_cpu(src_oi->oi.oi_id);
949 dst_oi->oi.oi_seq = le64_to_cpu(src_oi->oi.oi_seq);
951 fid_le_to_cpu(&dst_oi->oi_fid, &src_oi->oi_fid);
955 struct lu_orphan_rec {
956 /* The MDT-object's FID referenced by the orphan OST-object */
957 struct lu_fid lor_fid;
962 struct lu_orphan_ent {
963 /* The orphan OST-object's FID */
964 struct lu_fid loe_key;
965 struct lu_orphan_rec loe_rec;
967 void lustre_swab_orphan_ent(struct lu_orphan_ent *ent);
971 /** \defgroup lu_dir lu_dir
975 * Enumeration of possible directory entry attributes.
977 * Attributes follow directory entry header in the order they appear in this
980 enum lu_dirent_attrs {
983 LUDA_64BITHASH = 0x0004,
985 /* The following attrs are used for MDT internal only,
986 * not visible to client */
988 /* Verify the dirent consistency */
989 LUDA_VERIFY = 0x8000,
990 /* Only check but not repair the dirent inconsistency */
991 LUDA_VERIFY_DRYRUN = 0x4000,
992 /* The dirent has been repaired, or to be repaired (dryrun). */
993 LUDA_REPAIR = 0x2000,
994 /* The system is upgraded, has beed or to be repaired (dryrun). */
995 LUDA_UPGRADE = 0x1000,
996 /* Ignore this record, go to next directly. */
997 LUDA_IGNORE = 0x0800,
1000 #define LU_DIRENT_ATTRS_MASK 0xf800
1003 * Layout of readdir pages, as transmitted on wire.
1006 /** valid if LUDA_FID is set. */
1007 struct lu_fid lde_fid;
1008 /** a unique entry identifier: a hash or an offset. */
1010 /** total record length, including all attributes. */
1014 /** optional variable size attributes following this entry.
1015 * taken from enum lu_dirent_attrs.
1018 /** name is followed by the attributes indicated in ->ldp_attrs, in
1019 * their natural order. After the last attribute, padding bytes are
1020 * added to make ->lde_reclen a multiple of 8.
1026 * Definitions of optional directory entry attributes formats.
1028 * Individual attributes do not have their length encoded in a generic way. It
1029 * is assumed that consumer of an attribute knows its format. This means that
1030 * it is impossible to skip over an unknown attribute, except by skipping over all
1031 * remaining attributes (by using ->lde_reclen), which is not too
1032 * constraining, because new server versions will append new attributes at
1033 * the end of an entry.
1037 * Fid directory attribute: a fid of an object referenced by the entry. This
1038 * will be almost always requested by the client and supplied by the server.
1040 * Aligned to 8 bytes.
1042 /* To have compatibility with 1.8, lets have fid in lu_dirent struct. */
1047 * Aligned to 2 bytes.
1054 __u64 ldp_hash_start;
1058 struct lu_dirent ldp_entries[0];
1061 enum lu_dirpage_flags {
1063 * dirpage contains no entry.
1067 * last entry's lde_hash equals ldp_hash_end.
1069 LDF_COLLIDE = 1 << 1
1072 static inline struct lu_dirent *lu_dirent_start(struct lu_dirpage *dp)
1074 if (le32_to_cpu(dp->ldp_flags) & LDF_EMPTY)
1077 return dp->ldp_entries;
1080 static inline struct lu_dirent *lu_dirent_next(struct lu_dirent *ent)
1082 struct lu_dirent *next;
1084 if (le16_to_cpu(ent->lde_reclen) != 0)
1085 next = ((void *)ent) + le16_to_cpu(ent->lde_reclen);
1092 static inline size_t lu_dirent_calc_size(size_t namelen, __u16 attr)
1096 if (attr & LUDA_TYPE) {
1097 const size_t align = sizeof(struct luda_type) - 1;
1098 size = (sizeof(struct lu_dirent) + namelen + align) & ~align;
1099 size += sizeof(struct luda_type);
1101 size = sizeof(struct lu_dirent) + namelen;
1103 return (size + 7) & ~7;
1106 #define MDS_DIR_END_OFF 0xfffffffffffffffeULL
1109 * MDS_READPAGE page size
1111 * This is the directory page size packed in MDS_READPAGE RPC.
1112 * It's different than PAGE_CACHE_SIZE because the client needs to
1113 * access the struct lu_dirpage header packed at the beginning of
1114 * the "page" and without this there isn't any way to know find the
1115 * lu_dirpage header is if client and server PAGE_CACHE_SIZE differ.
1117 #define LU_PAGE_SHIFT 12
1118 #define LU_PAGE_SIZE (1UL << LU_PAGE_SHIFT)
1119 #define LU_PAGE_MASK (~(LU_PAGE_SIZE - 1))
1121 #define LU_PAGE_COUNT (1 << (PAGE_CACHE_SHIFT - LU_PAGE_SHIFT))
1125 struct lustre_handle {
1128 #define DEAD_HANDLE_MAGIC 0xdeadbeefcafebabeULL
1130 static inline bool lustre_handle_is_used(const struct lustre_handle *lh)
1132 return lh->cookie != 0;
1135 static inline bool lustre_handle_equal(const struct lustre_handle *lh1,
1136 const struct lustre_handle *lh2)
1138 return lh1->cookie == lh2->cookie;
1141 static inline void lustre_handle_copy(struct lustre_handle *tgt,
1142 const struct lustre_handle *src)
1144 tgt->cookie = src->cookie;
1147 /* flags for lm_flags */
1148 #define MSGHDR_AT_SUPPORT 0x1
1149 #define MSGHDR_CKSUM_INCOMPAT18 0x2
1151 #define lustre_msg lustre_msg_v2
1152 /* we depend on this structure to be 8-byte aligned */
1153 /* this type is only endian-adjusted in lustre_unpack_msg() */
1154 struct lustre_msg_v2 {
1163 __u32 lm_buflens[0];
1166 /* without gss, ptlrpc_body is put at the first buffer. */
1167 #define PTLRPC_NUM_VERSIONS 4
1168 struct ptlrpc_body_v3 {
1169 struct lustre_handle pb_handle;
1174 __u64 pb_last_xid; /* highest replied XID without lower unreplied XID */
1175 __u16 pb_tag; /* virtual slot idx for multiple modifying RPCs */
1178 __u64 pb_last_committed;
1183 __u32 pb_timeout; /* for req, the deadline, for rep, the service est */
1184 __u32 pb_service_time; /* for rep, actual service time */
1187 /* VBR: pre-versions */
1188 __u64 pb_pre_versions[PTLRPC_NUM_VERSIONS];
1189 /* padding for future needs */
1190 __u64 pb_padding[4];
1191 char pb_jobid[LUSTRE_JOBID_SIZE];
1193 #define ptlrpc_body ptlrpc_body_v3
1195 struct ptlrpc_body_v2 {
1196 struct lustre_handle pb_handle;
1201 __u64 pb_last_xid; /* highest replied XID without lower unreplied XID */
1202 __u16 pb_tag; /* virtual slot idx for multiple modifying RPCs */
1205 __u64 pb_last_committed;
1210 __u32 pb_timeout; /* for req, the deadline, for rep, the service est */
1211 __u32 pb_service_time; /* for rep, actual service time, also used for
1212 net_latency of req */
1215 /* VBR: pre-versions */
1216 __u64 pb_pre_versions[PTLRPC_NUM_VERSIONS];
1217 /* padding for future needs */
1218 __u64 pb_padding[4];
1221 extern void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb);
1223 /* message body offset for lustre_msg_v2 */
1224 /* ptlrpc body offset in all request/reply messages */
1225 #define MSG_PTLRPC_BODY_OFF 0
1227 /* normal request/reply message record offset */
1228 #define REQ_REC_OFF 1
1229 #define REPLY_REC_OFF 1
1231 /* ldlm request message body offset */
1232 #define DLM_LOCKREQ_OFF 1 /* lockreq offset */
1233 #define DLM_REQ_REC_OFF 2 /* normal dlm request record offset */
1235 /* ldlm intent lock message body offset */
1236 #define DLM_INTENT_IT_OFF 2 /* intent lock it offset */
1237 #define DLM_INTENT_REC_OFF 3 /* intent lock record offset */
1239 /* ldlm reply message body offset */
1240 #define DLM_LOCKREPLY_OFF 1 /* lockrep offset */
1241 #define DLM_REPLY_REC_OFF 2 /* reply record offset */
1243 /** only use in req->rq_{req,rep}_swab_mask */
1244 #define MSG_PTLRPC_HEADER_OFF 31
1246 /* Flags that are operation-specific go in the top 16 bits. */
1247 #define MSG_OP_FLAG_MASK 0xffff0000
1248 #define MSG_OP_FLAG_SHIFT 16
1250 /* Flags that apply to all requests are in the bottom 16 bits */
1251 #define MSG_GEN_FLAG_MASK 0x0000ffff
1252 #define MSG_LAST_REPLAY 0x0001
1253 #define MSG_RESENT 0x0002
1254 #define MSG_REPLAY 0x0004
1255 /* #define MSG_AT_SUPPORT 0x0008
1256 * This was used in early prototypes of adaptive timeouts, and while there
1257 * shouldn't be any users of that code there also isn't a need for using this
1258 * bits. Defer usage until at least 1.10 to avoid potential conflict. */
1259 #define MSG_DELAY_REPLAY 0x0010
1260 #define MSG_VERSION_REPLAY 0x0020
1261 #define MSG_REQ_REPLAY_DONE 0x0040
1262 #define MSG_LOCK_REPLAY_DONE 0x0080
1265 * Flags for all connect opcodes (MDS_CONNECT, OST_CONNECT)
1268 #define MSG_CONNECT_RECOVERING 0x00000001
1269 #define MSG_CONNECT_RECONNECT 0x00000002
1270 #define MSG_CONNECT_REPLAYABLE 0x00000004
1271 //#define MSG_CONNECT_PEER 0x8
1272 #define MSG_CONNECT_LIBCLIENT 0x00000010
1273 #define MSG_CONNECT_INITIAL 0x00000020
1274 #define MSG_CONNECT_ASYNC 0x00000040
1275 #define MSG_CONNECT_NEXT_VER 0x00000080 /* use next version of lustre_msg */
1276 #define MSG_CONNECT_TRANSNO 0x00000100 /* report transno */
1279 #define OBD_CONNECT_RDONLY 0x1ULL /*client has read-only access*/
1280 #define OBD_CONNECT_INDEX 0x2ULL /*connect specific LOV idx */
1281 #define OBD_CONNECT_MDS 0x4ULL /*connect from MDT to OST */
1282 #define OBD_CONNECT_GRANT 0x8ULL /*OSC gets grant at connect */
1283 #define OBD_CONNECT_SRVLOCK 0x10ULL /*server takes locks for cli */
1284 #define OBD_CONNECT_VERSION 0x20ULL /*Lustre versions in ocd */
1285 #define OBD_CONNECT_REQPORTAL 0x40ULL /*Separate non-IO req portal */
1286 #define OBD_CONNECT_ACL 0x80ULL /*access control lists */
1287 #define OBD_CONNECT_XATTR 0x100ULL /*client use extended attr */
1288 #define OBD_CONNECT_CROW 0x200ULL /*MDS+OST create obj on write*/
1289 #define OBD_CONNECT_TRUNCLOCK 0x400ULL /*locks on server for punch */
1290 #define OBD_CONNECT_TRANSNO 0x800ULL /*replay sends init transno */
1291 #define OBD_CONNECT_IBITS 0x1000ULL /*support for inodebits locks*/
1292 #define OBD_CONNECT_JOIN 0x2000ULL /*files can be concatenated.
1293 *We do not support JOIN FILE
1294 *anymore, reserve this flags
1295 *just for preventing such bit
1297 #define OBD_CONNECT_ATTRFID 0x4000ULL /*Server can GetAttr By Fid*/
1298 #define OBD_CONNECT_NODEVOH 0x8000ULL /*No open hndl on specl nodes*/
1299 #define OBD_CONNECT_RMT_CLIENT 0x10000ULL /*Remote client */
1300 #define OBD_CONNECT_RMT_CLIENT_FORCE 0x20000ULL /*Remote client by force */
1301 #define OBD_CONNECT_BRW_SIZE 0x40000ULL /*Max bytes per rpc */
1302 #define OBD_CONNECT_QUOTA64 0x80000ULL /*Not used since 2.4 */
1303 #define OBD_CONNECT_MDS_CAPA 0x100000ULL /*MDS capability */
1304 #define OBD_CONNECT_OSS_CAPA 0x200000ULL /*OSS capability */
1305 #define OBD_CONNECT_CANCELSET 0x400000ULL /*Early batched cancels. */
1306 #define OBD_CONNECT_SOM 0x800000ULL /*Size on MDS */
1307 #define OBD_CONNECT_AT 0x1000000ULL /*client uses AT */
1308 #define OBD_CONNECT_LRU_RESIZE 0x2000000ULL /*LRU resize feature. */
1309 #define OBD_CONNECT_MDS_MDS 0x4000000ULL /*MDS-MDS connection */
1310 #define OBD_CONNECT_REAL 0x8000000ULL /*real connection */
1311 #define OBD_CONNECT_CHANGE_QS 0x10000000ULL /*Not used since 2.4 */
1312 #define OBD_CONNECT_CKSUM 0x20000000ULL /*support several cksum algos*/
1313 #define OBD_CONNECT_FID 0x40000000ULL /*FID is supported by server */
1314 #define OBD_CONNECT_VBR 0x80000000ULL /*version based recovery */
1315 #define OBD_CONNECT_LOV_V3 0x100000000ULL /*client supports LOV v3 EA */
1316 #define OBD_CONNECT_GRANT_SHRINK 0x200000000ULL /* support grant shrink */
1317 #define OBD_CONNECT_SKIP_ORPHAN 0x400000000ULL /* don't reuse orphan objids */
1318 #define OBD_CONNECT_MAX_EASIZE 0x800000000ULL /* preserved for large EA */
1319 #define OBD_CONNECT_FULL20 0x1000000000ULL /* it is 2.0 client */
1320 #define OBD_CONNECT_LAYOUTLOCK 0x2000000000ULL /* client uses layout lock */
1321 #define OBD_CONNECT_64BITHASH 0x4000000000ULL /* client supports 64-bits
1323 #define OBD_CONNECT_MAXBYTES 0x8000000000ULL /* max stripe size */
1324 #define OBD_CONNECT_IMP_RECOV 0x10000000000ULL /* imp recovery support */
1325 #define OBD_CONNECT_JOBSTATS 0x20000000000ULL /* jobid in ptlrpc_body */
1326 #define OBD_CONNECT_UMASK 0x40000000000ULL /* create uses client umask */
1327 #define OBD_CONNECT_EINPROGRESS 0x80000000000ULL /* client handles -EINPROGRESS
1328 * RPC error properly */
1329 #define OBD_CONNECT_GRANT_PARAM 0x100000000000ULL/* extra grant params used for
1330 * finer space reservation */
1331 #define OBD_CONNECT_FLOCK_OWNER 0x200000000000ULL /* for the fixed 1.8
1332 * policy and 2.x server */
1333 #define OBD_CONNECT_LVB_TYPE 0x400000000000ULL /* variable type of LVB */
1334 #define OBD_CONNECT_NANOSEC_TIME 0x800000000000ULL /* nanosecond timestamps */
1335 #define OBD_CONNECT_LIGHTWEIGHT 0x1000000000000ULL/* lightweight connection */
1336 #define OBD_CONNECT_SHORTIO 0x2000000000000ULL/* short io */
1337 #define OBD_CONNECT_PINGLESS 0x4000000000000ULL/* pings not required */
1338 #define OBD_CONNECT_FLOCK_DEAD 0x8000000000000ULL/* improved flock deadlock detection */
1339 #define OBD_CONNECT_DISP_STRIPE 0x10000000000000ULL/* create stripe disposition*/
1340 #define OBD_CONNECT_OPEN_BY_FID 0x20000000000000ULL /* open by fid won't pack
1342 #define OBD_CONNECT_LFSCK 0x40000000000000ULL/* support online LFSCK */
1343 #define OBD_CONNECT_UNLINK_CLOSE 0x100000000000000ULL/* close file in unlink */
1344 #define OBD_CONNECT_MULTIMODRPCS 0x200000000000000ULL /* support multiple modify
1346 #define OBD_CONNECT_DIR_STRIPE 0x400000000000000ULL /* striped DNE dir */
1349 * Please DO NOT add flag values here before first ensuring that this same
1350 * flag value is not in use on some other branch. Please clear any such
1351 * changes with senior engineers before starting to use a new flag. Then,
1352 * submit a small patch against EVERY branch that ONLY adds the new flag,
1353 * updates obd_connect_names[] for lprocfs_rd_connect_flags(), adds the
1354 * flag to check_obd_connect_data(), and updates wiretests accordingly, so it
1355 * can be approved and landed easily to reserve the flag for future use. */
1357 /* The MNE_SWAB flag is overloading the MDS_MDS bit only for the MGS
1358 * connection. It is a temporary bug fix for Imperative Recovery interop
1359 * between 2.2 and 2.3 x86/ppc nodes, and can be removed when interop for
1360 * 2.2 clients/servers is no longer needed. LU-1252/LU-1644. */
1361 #define OBD_CONNECT_MNE_SWAB OBD_CONNECT_MDS_MDS
1363 #define OCD_HAS_FLAG(ocd, flg) \
1364 (!!((ocd)->ocd_connect_flags & OBD_CONNECT_##flg))
1367 #ifdef HAVE_LRU_RESIZE_SUPPORT
1368 #define LRU_RESIZE_CONNECT_FLAG OBD_CONNECT_LRU_RESIZE
1370 #define LRU_RESIZE_CONNECT_FLAG 0
1373 #define MDT_CONNECT_SUPPORTED (OBD_CONNECT_RDONLY | OBD_CONNECT_VERSION | \
1374 OBD_CONNECT_ACL | OBD_CONNECT_XATTR | \
1375 OBD_CONNECT_IBITS | \
1376 OBD_CONNECT_NODEVOH | OBD_CONNECT_ATTRFID | \
1377 OBD_CONNECT_CANCELSET | OBD_CONNECT_AT | \
1378 OBD_CONNECT_RMT_CLIENT | \
1379 OBD_CONNECT_RMT_CLIENT_FORCE | \
1380 OBD_CONNECT_BRW_SIZE | OBD_CONNECT_MDS_CAPA | \
1381 OBD_CONNECT_OSS_CAPA | OBD_CONNECT_MDS_MDS | \
1382 OBD_CONNECT_FID | LRU_RESIZE_CONNECT_FLAG | \
1383 OBD_CONNECT_VBR | OBD_CONNECT_LOV_V3 | \
1384 OBD_CONNECT_SOM | OBD_CONNECT_FULL20 | \
1385 OBD_CONNECT_64BITHASH | OBD_CONNECT_JOBSTATS | \
1386 OBD_CONNECT_EINPROGRESS | \
1387 OBD_CONNECT_LIGHTWEIGHT | OBD_CONNECT_UMASK | \
1388 OBD_CONNECT_LVB_TYPE | OBD_CONNECT_LAYOUTLOCK |\
1389 OBD_CONNECT_PINGLESS | OBD_CONNECT_MAX_EASIZE |\
1390 OBD_CONNECT_FLOCK_DEAD | \
1391 OBD_CONNECT_DISP_STRIPE | OBD_CONNECT_LFSCK | \
1392 OBD_CONNECT_OPEN_BY_FID | \
1393 OBD_CONNECT_DIR_STRIPE)
1395 #define OST_CONNECT_SUPPORTED (OBD_CONNECT_SRVLOCK | OBD_CONNECT_GRANT | \
1396 OBD_CONNECT_REQPORTAL | OBD_CONNECT_VERSION | \
1397 OBD_CONNECT_TRUNCLOCK | OBD_CONNECT_INDEX | \
1398 OBD_CONNECT_BRW_SIZE | OBD_CONNECT_OSS_CAPA | \
1399 OBD_CONNECT_CANCELSET | OBD_CONNECT_AT | \
1400 LRU_RESIZE_CONNECT_FLAG | OBD_CONNECT_CKSUM | \
1401 OBD_CONNECT_RMT_CLIENT | \
1402 OBD_CONNECT_RMT_CLIENT_FORCE | OBD_CONNECT_VBR | \
1403 OBD_CONNECT_MDS | OBD_CONNECT_SKIP_ORPHAN | \
1404 OBD_CONNECT_GRANT_SHRINK | OBD_CONNECT_FULL20 | \
1405 OBD_CONNECT_64BITHASH | OBD_CONNECT_MAXBYTES | \
1406 OBD_CONNECT_MAX_EASIZE | \
1407 OBD_CONNECT_EINPROGRESS | \
1408 OBD_CONNECT_JOBSTATS | \
1409 OBD_CONNECT_LIGHTWEIGHT | OBD_CONNECT_LVB_TYPE|\
1410 OBD_CONNECT_LAYOUTLOCK | OBD_CONNECT_FID | \
1411 OBD_CONNECT_PINGLESS | OBD_CONNECT_LFSCK)
1412 #define ECHO_CONNECT_SUPPORTED (0)
1413 #define MGS_CONNECT_SUPPORTED (OBD_CONNECT_VERSION | OBD_CONNECT_AT | \
1414 OBD_CONNECT_FULL20 | OBD_CONNECT_IMP_RECOV | \
1415 OBD_CONNECT_MNE_SWAB | OBD_CONNECT_PINGLESS)
1417 /* Features required for this version of the client to work with server */
1418 #define CLIENT_CONNECT_MDT_REQD (OBD_CONNECT_IBITS | OBD_CONNECT_FID | \
1421 /* This structure is used for both request and reply.
1423 * If we eventually have separate connect data for different types, which we
1424 * almost certainly will, then perhaps we stick a union in here. */
1425 struct obd_connect_data {
1426 __u64 ocd_connect_flags; /* OBD_CONNECT_* per above */
1427 __u32 ocd_version; /* lustre release version number */
1428 __u32 ocd_grant; /* initial cache grant amount (bytes) */
1429 __u32 ocd_index; /* LOV index to connect to */
1430 __u32 ocd_brw_size; /* Maximum BRW size in bytes */
1431 __u64 ocd_ibits_known; /* inode bits this client understands */
1432 __u8 ocd_blocksize; /* log2 of the backend filesystem blocksize */
1433 __u8 ocd_inodespace; /* log2 of the per-inode space consumption */
1434 __u16 ocd_grant_extent; /* per-extent grant overhead, in 1K blocks */
1435 __u32 ocd_unused; /* also fix lustre_swab_connect */
1436 __u64 ocd_transno; /* first transno from client to be replayed */
1437 __u32 ocd_group; /* MDS group on OST */
1438 __u32 ocd_cksum_types; /* supported checksum algorithms */
1439 __u32 ocd_max_easize; /* How big LOV EA can be on MDS */
1440 __u32 ocd_instance; /* instance # of this target */
1441 __u64 ocd_maxbytes; /* Maximum stripe size in bytes */
1442 /* Fields after ocd_maxbytes are only accessible by the receiver
1443 * if the corresponding flag in ocd_connect_flags is set. Accessing
1444 * any field after ocd_maxbytes on the receiver without a valid flag
1445 * may result in out-of-bound memory access and kernel oops. */
1446 __u16 ocd_maxmodrpcs; /* Maximum modify RPCs in parallel */
1447 __u16 padding0; /* added 2.1.0. also fix lustre_swab_connect */
1448 __u32 padding1; /* added 2.1.0. also fix lustre_swab_connect */
1449 __u64 padding2; /* added 2.1.0. also fix lustre_swab_connect */
1450 __u64 padding3; /* added 2.1.0. also fix lustre_swab_connect */
1451 __u64 padding4; /* added 2.1.0. also fix lustre_swab_connect */
1452 __u64 padding5; /* added 2.1.0. also fix lustre_swab_connect */
1453 __u64 padding6; /* added 2.1.0. also fix lustre_swab_connect */
1454 __u64 padding7; /* added 2.1.0. also fix lustre_swab_connect */
1455 __u64 padding8; /* added 2.1.0. also fix lustre_swab_connect */
1456 __u64 padding9; /* added 2.1.0. also fix lustre_swab_connect */
1457 __u64 paddingA; /* added 2.1.0. also fix lustre_swab_connect */
1458 __u64 paddingB; /* added 2.1.0. also fix lustre_swab_connect */
1459 __u64 paddingC; /* added 2.1.0. also fix lustre_swab_connect */
1460 __u64 paddingD; /* added 2.1.0. also fix lustre_swab_connect */
1461 __u64 paddingE; /* added 2.1.0. also fix lustre_swab_connect */
1462 __u64 paddingF; /* added 2.1.0. also fix lustre_swab_connect */
1465 * Please DO NOT use any fields here before first ensuring that this same
1466 * field is not in use on some other branch. Please clear any such changes
1467 * with senior engineers before starting to use a new field. Then, submit
1468 * a small patch against EVERY branch that ONLY adds the new field along with
1469 * the matching OBD_CONNECT flag, so that can be approved and landed easily to
1470 * reserve the flag for future use. */
1473 extern void lustre_swab_connect(struct obd_connect_data *ocd);
1476 * Supported checksum algorithms. Up to 32 checksum types are supported.
1477 * (32-bit mask stored in obd_connect_data::ocd_cksum_types)
1478 * Please update DECLARE_CKSUM_NAME/OBD_CKSUM_ALL in obd.h when adding a new
1479 * algorithm and also the OBD_FL_CKSUM* flags.
1482 OBD_CKSUM_CRC32 = 0x00000001,
1483 OBD_CKSUM_ADLER = 0x00000002,
1484 OBD_CKSUM_CRC32C= 0x00000004,
1488 * OST requests: OBDO & OBD request records
1493 OST_REPLY = 0, /* reply ? */
1509 OST_QUOTACHECK = 18,
1511 OST_QUOTA_ADJUST_QUNIT = 20, /* not used since 2.4 */
1514 #define OST_FIRST_OPC OST_REPLY
1517 OBD_FL_INLINEDATA = 0x00000001,
1518 OBD_FL_OBDMDEXISTS = 0x00000002,
1519 OBD_FL_DELORPHAN = 0x00000004, /* if set in o_flags delete orphans */
1520 OBD_FL_NORPC = 0x00000008, /* set in o_flags do in OSC not OST */
1521 OBD_FL_IDONLY = 0x00000010, /* set in o_flags only adjust obj id*/
1522 OBD_FL_RECREATE_OBJS= 0x00000020, /* recreate missing obj */
1523 OBD_FL_DEBUG_CHECK = 0x00000040, /* echo client/server debug check */
1524 OBD_FL_NO_USRQUOTA = 0x00000100, /* the object's owner is over quota */
1525 OBD_FL_NO_GRPQUOTA = 0x00000200, /* the object's group is over quota */
1526 OBD_FL_CREATE_CROW = 0x00000400, /* object should be create on write */
1527 OBD_FL_SRVLOCK = 0x00000800, /* delegate DLM locking to server */
1528 OBD_FL_CKSUM_CRC32 = 0x00001000, /* CRC32 checksum type */
1529 OBD_FL_CKSUM_ADLER = 0x00002000, /* ADLER checksum type */
1530 OBD_FL_CKSUM_CRC32C = 0x00004000, /* CRC32C checksum type */
1531 OBD_FL_CKSUM_RSVD2 = 0x00008000, /* for future cksum types */
1532 OBD_FL_CKSUM_RSVD3 = 0x00010000, /* for future cksum types */
1533 OBD_FL_SHRINK_GRANT = 0x00020000, /* object shrink the grant */
1534 OBD_FL_MMAP = 0x00040000, /* object is mmapped on the client.
1535 * XXX: obsoleted - reserved for old
1536 * clients prior than 2.2 */
1537 OBD_FL_RECOV_RESEND = 0x00080000, /* recoverable resent */
1538 OBD_FL_NOSPC_BLK = 0x00100000, /* no more block space on OST */
1539 OBD_FL_FLUSH = 0x00200000, /* flush pages on the OST */
1540 OBD_FL_SHORT_IO = 0x00400000, /* short io request */
1542 /* Note that while these checksum values are currently separate bits,
1543 * in 2.x we can actually allow all values from 1-31 if we wanted. */
1544 OBD_FL_CKSUM_ALL = OBD_FL_CKSUM_CRC32 | OBD_FL_CKSUM_ADLER |
1545 OBD_FL_CKSUM_CRC32C,
1547 /* mask for local-only flag, which won't be sent over network */
1548 OBD_FL_LOCAL_MASK = 0xF0000000,
1552 * All LOV EA magics should have the same postfix, if some new version
1553 * Lustre instroduces new LOV EA magic, then when down-grade to an old
1554 * Lustre, even though the old version system does not recognizes such
1555 * new magic, it still can distinguish the corrupted cases by checking
1556 * the magic's postfix.
1558 #define LOV_MAGIC_MAGIC 0x0BD0
1559 #define LOV_MAGIC_MASK 0xFFFF
1561 #define LOV_MAGIC_V1 (0x0BD10000 | LOV_MAGIC_MAGIC)
1562 #define LOV_MAGIC_JOIN_V1 (0x0BD20000 | LOV_MAGIC_MAGIC)
1563 #define LOV_MAGIC_V3 (0x0BD30000 | LOV_MAGIC_MAGIC)
1564 #define LOV_MAGIC_MIGRATE (0x0BD40000 | LOV_MAGIC_MAGIC)
1565 /* reserved for specifying OSTs */
1566 #define LOV_MAGIC_SPECIFIC (0x0BD50000 | LOV_MAGIC_MAGIC)
1567 #define LOV_MAGIC LOV_MAGIC_V1
1570 * magic for fully defined striping
1571 * the idea is that we should have different magics for striping "hints"
1572 * (struct lov_user_md_v[13]) and defined ready-to-use striping (struct
1573 * lov_mds_md_v[13]). at the moment the magics are used in wire protocol,
1574 * we can't just change it w/o long way preparation, but we still need a
1575 * mechanism to allow LOD to differentiate hint versus ready striping.
1576 * so, at the moment we do a trick: MDT knows what to expect from request
1577 * depending on the case (replay uses ready striping, non-replay req uses
1578 * hints), so MDT replaces magic with appropriate one and now LOD can
1579 * easily understand what's inside -bzzz
1581 #define LOV_MAGIC_V1_DEF 0x0CD10BD0
1582 #define LOV_MAGIC_V3_DEF 0x0CD30BD0
1584 #define lov_pattern(pattern) (pattern & ~LOV_PATTERN_F_MASK)
1585 #define lov_pattern_flags(pattern) (pattern & LOV_PATTERN_F_MASK)
1587 #define lov_ost_data lov_ost_data_v1
1588 struct lov_ost_data_v1 { /* per-stripe data structure (little-endian)*/
1589 struct ost_id l_ost_oi; /* OST object ID */
1590 __u32 l_ost_gen; /* generation of this l_ost_idx */
1591 __u32 l_ost_idx; /* OST index in LOV (lov_tgt_desc->tgts) */
1594 #define lov_mds_md lov_mds_md_v1
1595 struct lov_mds_md_v1 { /* LOV EA mds/wire data (little-endian) */
1596 __u32 lmm_magic; /* magic number = LOV_MAGIC_V1 */
1597 __u32 lmm_pattern; /* LOV_PATTERN_RAID0, LOV_PATTERN_RAID1 */
1598 struct ost_id lmm_oi; /* LOV object ID */
1599 __u32 lmm_stripe_size; /* size of stripe in bytes */
1600 /* lmm_stripe_count used to be __u32 */
1601 __u16 lmm_stripe_count; /* num stripes in use for this object */
1602 __u16 lmm_layout_gen; /* layout generation number */
1603 struct lov_ost_data_v1 lmm_objects[0]; /* per-stripe data */
1607 * Sigh, because pre-2.4 uses
1608 * struct lov_mds_md_v1 {
1610 * __u64 lmm_object_id;
1611 * __u64 lmm_object_seq;
1614 * to identify the LOV(MDT) object, and lmm_object_seq will
1615 * be normal_fid, which make it hard to combine these conversion
1616 * to ostid_to FID. so we will do lmm_oi/fid conversion separately
1618 * We can tell the lmm_oi by this way,
1619 * 1.8: lmm_object_id = {inode}, lmm_object_gr = 0
1620 * 2.1: lmm_object_id = {oid < 128k}, lmm_object_seq = FID_SEQ_NORMAL
1621 * 2.4: lmm_oi.f_seq = FID_SEQ_NORMAL, lmm_oi.f_oid = {oid < 128k},
1624 * But currently lmm_oi/lsm_oi does not have any "real" usages,
1625 * except for printing some information, and the user can always
1626 * get the real FID from LMA, besides this multiple case check might
1627 * make swab more complicate. So we will keep using id/seq for lmm_oi.
1630 static inline void fid_to_lmm_oi(const struct lu_fid *fid,
1633 oi->oi.oi_id = fid_oid(fid);
1634 oi->oi.oi_seq = fid_seq(fid);
1637 static inline void lmm_oi_set_seq(struct ost_id *oi, __u64 seq)
1639 oi->oi.oi_seq = seq;
1642 static inline void lmm_oi_set_id(struct ost_id *oi, __u64 oid)
1647 static inline __u64 lmm_oi_id(const struct ost_id *oi)
1649 return oi->oi.oi_id;
1652 static inline __u64 lmm_oi_seq(const struct ost_id *oi)
1654 return oi->oi.oi_seq;
1657 static inline void lmm_oi_le_to_cpu(struct ost_id *dst_oi,
1658 const struct ost_id *src_oi)
1660 dst_oi->oi.oi_id = le64_to_cpu(src_oi->oi.oi_id);
1661 dst_oi->oi.oi_seq = le64_to_cpu(src_oi->oi.oi_seq);
1664 static inline void lmm_oi_cpu_to_le(struct ost_id *dst_oi,
1665 const struct ost_id *src_oi)
1667 dst_oi->oi.oi_id = cpu_to_le64(src_oi->oi.oi_id);
1668 dst_oi->oi.oi_seq = cpu_to_le64(src_oi->oi.oi_seq);
1671 /* extern void lustre_swab_lov_mds_md(struct lov_mds_md *llm); */
1673 #define MAX_MD_SIZE (sizeof(struct lov_mds_md) + 4 * sizeof(struct lov_ost_data))
1674 #define MIN_MD_SIZE (sizeof(struct lov_mds_md) + 1 * sizeof(struct lov_ost_data))
1676 /* This is the default MDT reply size allocated, should the striping be bigger,
1677 * it will be reallocated in mdt_fix_reply.
1678 * 100 stripes is a bit less than 2.5k of data */
1679 #define DEF_REP_MD_SIZE (sizeof(struct lov_mds_md) + \
1680 100 * sizeof(struct lov_ost_data))
1682 #define XATTR_NAME_ACL_ACCESS "system.posix_acl_access"
1683 #define XATTR_NAME_ACL_DEFAULT "system.posix_acl_default"
1684 #define XATTR_USER_PREFIX "user."
1685 #define XATTR_TRUSTED_PREFIX "trusted."
1686 #define XATTR_SECURITY_PREFIX "security."
1688 #define XATTR_NAME_LOV "trusted.lov"
1689 #define XATTR_NAME_LMA "trusted.lma"
1690 #define XATTR_NAME_LMV "trusted.lmv"
1691 #define XATTR_NAME_DEFAULT_LMV "trusted.dmv"
1692 #define XATTR_NAME_LINK "trusted.link"
1693 #define XATTR_NAME_FID "trusted.fid"
1694 #define XATTR_NAME_VERSION "trusted.version"
1695 #define XATTR_NAME_SOM "trusted.som"
1696 #define XATTR_NAME_HSM "trusted.hsm"
1697 #define XATTR_NAME_LFSCK_BITMAP "trusted.lfsck_bitmap"
1698 #define XATTR_NAME_DUMMY "trusted.dummy"
1700 #if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 8, 53, 0)
1701 # define XATTR_NAME_LFSCK_NAMESPACE_OLD "trusted.lfsck_namespace"
1704 #define XATTR_NAME_LFSCK_NAMESPACE "trusted.lfsck_ns"
1705 #define XATTR_NAME_MAX_LEN 32 /* increase this, if there is longer name. */
1707 struct lov_mds_md_v3 { /* LOV EA mds/wire data (little-endian) */
1708 __u32 lmm_magic; /* magic number = LOV_MAGIC_V3 */
1709 __u32 lmm_pattern; /* LOV_PATTERN_RAID0, LOV_PATTERN_RAID1 */
1710 struct ost_id lmm_oi; /* LOV object ID */
1711 __u32 lmm_stripe_size; /* size of stripe in bytes */
1712 /* lmm_stripe_count used to be __u32 */
1713 __u16 lmm_stripe_count; /* num stripes in use for this object */
1714 __u16 lmm_layout_gen; /* layout generation number */
1715 char lmm_pool_name[LOV_MAXPOOLNAME + 1]; /* must be 32bit aligned */
1716 struct lov_ost_data_v1 lmm_objects[0]; /* per-stripe data */
1719 static inline __u32 lov_mds_md_size(__u16 stripes, __u32 lmm_magic)
1721 if (lmm_magic == LOV_MAGIC_V3)
1722 return sizeof(struct lov_mds_md_v3) +
1723 stripes * sizeof(struct lov_ost_data_v1);
1725 return sizeof(struct lov_mds_md_v1) +
1726 stripes * sizeof(struct lov_ost_data_v1);
1730 lov_mds_md_max_stripe_count(size_t buf_size, __u32 lmm_magic)
1732 switch (lmm_magic) {
1733 case LOV_MAGIC_V1: {
1734 struct lov_mds_md_v1 lmm;
1736 if (buf_size < sizeof(lmm))
1739 return (buf_size - sizeof(lmm)) / sizeof(lmm.lmm_objects[0]);
1741 case LOV_MAGIC_V3: {
1742 struct lov_mds_md_v3 lmm;
1744 if (buf_size < sizeof(lmm))
1747 return (buf_size - sizeof(lmm)) / sizeof(lmm.lmm_objects[0]);
1754 #define OBD_MD_FLID (0x00000001ULL) /* object ID */
1755 #define OBD_MD_FLATIME (0x00000002ULL) /* access time */
1756 #define OBD_MD_FLMTIME (0x00000004ULL) /* data modification time */
1757 #define OBD_MD_FLCTIME (0x00000008ULL) /* change time */
1758 #define OBD_MD_FLSIZE (0x00000010ULL) /* size */
1759 #define OBD_MD_FLBLOCKS (0x00000020ULL) /* allocated blocks count */
1760 #define OBD_MD_FLBLKSZ (0x00000040ULL) /* block size */
1761 #define OBD_MD_FLMODE (0x00000080ULL) /* access bits (mode & ~S_IFMT) */
1762 #define OBD_MD_FLTYPE (0x00000100ULL) /* object type (mode & S_IFMT) */
1763 #define OBD_MD_FLUID (0x00000200ULL) /* user ID */
1764 #define OBD_MD_FLGID (0x00000400ULL) /* group ID */
1765 #define OBD_MD_FLFLAGS (0x00000800ULL) /* flags word */
1766 #define OBD_MD_FLNLINK (0x00002000ULL) /* link count */
1767 #define OBD_MD_FLGENER (0x00004000ULL) /* generation number */
1768 /*#define OBD_MD_FLINLINE (0x00008000ULL) inline data. used until 1.6.5 */
1769 #define OBD_MD_FLRDEV (0x00010000ULL) /* device number */
1770 #define OBD_MD_FLEASIZE (0x00020000ULL) /* extended attribute data */
1771 #define OBD_MD_LINKNAME (0x00040000ULL) /* symbolic link target */
1772 #define OBD_MD_FLHANDLE (0x00080000ULL) /* file/lock handle */
1773 #define OBD_MD_FLCKSUM (0x00100000ULL) /* bulk data checksum */
1774 #define OBD_MD_FLQOS (0x00200000ULL) /* quality of service stats */
1775 /*#define OBD_MD_FLOSCOPQ (0x00400000ULL) osc opaque data, never used */
1776 #define OBD_MD_FLCOOKIE (0x00800000ULL) /* log cancellation cookie */
1777 #define OBD_MD_FLGROUP (0x01000000ULL) /* group */
1778 #define OBD_MD_FLFID (0x02000000ULL) /* ->ost write inline fid */
1779 #define OBD_MD_FLEPOCH (0x04000000ULL) /* ->ost write with ioepoch */
1780 /* ->mds if epoch opens or closes */
1781 #define OBD_MD_FLGRANT (0x08000000ULL) /* ost preallocation space grant */
1782 #define OBD_MD_FLDIREA (0x10000000ULL) /* dir's extended attribute data */
1783 #define OBD_MD_FLUSRQUOTA (0x20000000ULL) /* over quota flags sent from ost */
1784 #define OBD_MD_FLGRPQUOTA (0x40000000ULL) /* over quota flags sent from ost */
1785 #define OBD_MD_FLMODEASIZE (0x80000000ULL) /* EA size will be changed */
1787 #define OBD_MD_MDS (0x0000000100000000ULL) /* where an inode lives on */
1788 #define OBD_MD_REINT (0x0000000200000000ULL) /* reintegrate oa */
1789 #define OBD_MD_MEA (0x0000000400000000ULL) /* CMD split EA */
1790 #define OBD_MD_TSTATE (0x0000000800000000ULL) /* transient state field */
1792 #define OBD_MD_FLXATTR (0x0000001000000000ULL) /* xattr */
1793 #define OBD_MD_FLXATTRLS (0x0000002000000000ULL) /* xattr list */
1794 #define OBD_MD_FLXATTRRM (0x0000004000000000ULL) /* xattr remove */
1795 #define OBD_MD_FLACL (0x0000008000000000ULL) /* ACL */
1796 #define OBD_MD_FLRMTPERM (0x0000010000000000ULL) /* remote permission */
1797 #define OBD_MD_FLMDSCAPA (0x0000020000000000ULL) /* MDS capability */
1798 #define OBD_MD_FLOSSCAPA (0x0000040000000000ULL) /* OSS capability */
1799 #define OBD_MD_FLCKSPLIT (0x0000080000000000ULL) /* Check split on server */
1800 #define OBD_MD_FLCROSSREF (0x0000100000000000ULL) /* Cross-ref case */
1801 #define OBD_MD_FLGETATTRLOCK (0x0000200000000000ULL) /* Get IOEpoch attributes
1802 * under lock; for xattr
1803 * requests means the
1804 * client holds the lock */
1805 #define OBD_MD_FLOBJCOUNT (0x0000400000000000ULL) /* for multiple destroy */
1807 #define OBD_MD_FLRMTLSETFACL (0x0001000000000000ULL) /* lfs lsetfacl case */
1808 #define OBD_MD_FLRMTLGETFACL (0x0002000000000000ULL) /* lfs lgetfacl case */
1809 #define OBD_MD_FLRMTRSETFACL (0x0004000000000000ULL) /* lfs rsetfacl case */
1810 #define OBD_MD_FLRMTRGETFACL (0x0008000000000000ULL) /* lfs rgetfacl case */
1812 #define OBD_MD_FLDATAVERSION (0x0010000000000000ULL) /* iversion sum */
1813 #define OBD_MD_FLRELEASED (0x0020000000000000ULL) /* file released */
1815 #define OBD_MD_DEFAULT_MEA (0x0040000000000000ULL) /* default MEA */
1817 #define OBD_MD_FLGETATTR (OBD_MD_FLID | OBD_MD_FLATIME | OBD_MD_FLMTIME | \
1818 OBD_MD_FLCTIME | OBD_MD_FLSIZE | OBD_MD_FLBLKSZ | \
1819 OBD_MD_FLMODE | OBD_MD_FLTYPE | OBD_MD_FLUID | \
1820 OBD_MD_FLGID | OBD_MD_FLFLAGS | OBD_MD_FLNLINK | \
1821 OBD_MD_FLGENER | OBD_MD_FLRDEV | OBD_MD_FLGROUP)
1823 #define OBD_MD_FLXATTRALL (OBD_MD_FLXATTR | OBD_MD_FLXATTRLS)
1825 /* don't forget obdo_fid which is way down at the bottom so it can
1826 * come after the definition of llog_cookie */
1830 HSS_CLEARMASK = 0x02,
1831 HSS_ARCHIVE_ID = 0x04,
1834 struct hsm_state_set {
1836 __u32 hss_archive_id;
1838 __u64 hss_clearmask;
1841 extern void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
1842 extern void lustre_swab_hsm_state_set(struct hsm_state_set *hss);
1844 extern void lustre_swab_obd_statfs (struct obd_statfs *os);
1846 /* ost_body.data values for OST_BRW */
1848 #define OBD_BRW_READ 0x01
1849 #define OBD_BRW_WRITE 0x02
1850 #define OBD_BRW_RWMASK (OBD_BRW_READ | OBD_BRW_WRITE)
1851 #define OBD_BRW_SYNC 0x08 /* this page is a part of synchronous
1852 * transfer and is not accounted in
1854 #define OBD_BRW_CHECK 0x10
1855 #define OBD_BRW_FROM_GRANT 0x20 /* the osc manages this under llite */
1856 #define OBD_BRW_GRANTED 0x40 /* the ost manages this */
1857 #define OBD_BRW_NOCACHE 0x80 /* this page is a part of non-cached IO */
1858 #define OBD_BRW_NOQUOTA 0x100
1859 #define OBD_BRW_SRVLOCK 0x200 /* Client holds no lock over this page */
1860 #define OBD_BRW_ASYNC 0x400 /* Server may delay commit to disk */
1861 #define OBD_BRW_MEMALLOC 0x800 /* Client runs in the "kswapd" context */
1862 #define OBD_BRW_OVER_USRQUOTA 0x1000 /* Running out of user quota */
1863 #define OBD_BRW_OVER_GRPQUOTA 0x2000 /* Running out of group quota */
1864 #define OBD_BRW_SOFT_SYNC 0x4000 /* This flag notifies the server
1865 * that the client is running low on
1866 * space for unstable pages; asking
1867 * it to sync quickly */
1869 #define OBD_OBJECT_EOF LUSTRE_EOF
1871 #define OST_MIN_PRECREATE 32
1872 #define OST_MAX_PRECREATE 20000
1875 struct ost_id ioo_oid; /* object ID, if multi-obj BRW */
1876 __u32 ioo_max_brw; /* low 16 bits were o_mode before 2.4,
1877 * now (PTLRPC_BULK_OPS_COUNT - 1) in
1878 * high 16 bits in 2.4 and later */
1879 __u32 ioo_bufcnt; /* number of niobufs for this object */
1882 #define IOOBJ_MAX_BRW_BITS 16
1883 #define IOOBJ_TYPE_MASK ((1U << IOOBJ_MAX_BRW_BITS) - 1)
1884 #define ioobj_max_brw_get(ioo) (((ioo)->ioo_max_brw >> IOOBJ_MAX_BRW_BITS) + 1)
1885 #define ioobj_max_brw_set(ioo, num) \
1886 do { (ioo)->ioo_max_brw = ((num) - 1) << IOOBJ_MAX_BRW_BITS; } while (0)
1888 extern void lustre_swab_obd_ioobj (struct obd_ioobj *ioo);
1890 /* multiple of 8 bytes => can array */
1891 struct niobuf_remote {
1897 void lustre_swab_niobuf_remote(struct niobuf_remote *nbr);
1899 /* lock value block communicated between the filter and llite */
1901 /* OST_LVB_ERR_INIT is needed because the return code in rc is
1902 * negative, i.e. because ((MASK + rc) & MASK) != MASK. */
1903 #define OST_LVB_ERR_INIT 0xffbadbad80000000ULL
1904 #define OST_LVB_ERR_MASK 0xffbadbad00000000ULL
1905 #define OST_LVB_IS_ERR(blocks) \
1906 ((blocks & OST_LVB_ERR_MASK) == OST_LVB_ERR_MASK)
1907 #define OST_LVB_SET_ERR(blocks, rc) \
1908 do { blocks = OST_LVB_ERR_INIT + rc; } while (0)
1909 #define OST_LVB_GET_ERR(blocks) (int)(blocks - OST_LVB_ERR_INIT)
1919 extern void lustre_swab_ost_lvb_v1(struct ost_lvb_v1 *lvb);
1933 extern void lustre_swab_ost_lvb(struct ost_lvb *lvb);
1936 * lquota data structures
1939 #ifndef QUOTABLOCK_BITS
1940 # define QUOTABLOCK_BITS LUSTRE_QUOTABLOCK_BITS
1943 #ifndef QUOTABLOCK_SIZE
1944 # define QUOTABLOCK_SIZE LUSTRE_QUOTABLOCK_SIZE
1948 # define toqb lustre_stoqb
1951 /* The lquota_id structure is an union of all the possible identifier types that
1952 * can be used with quota, this includes:
1955 * - a FID which can be used for per-directory quota in the future */
1957 struct lu_fid qid_fid; /* FID for per-directory quota */
1958 __u64 qid_uid; /* user identifier */
1959 __u64 qid_gid; /* group identifier */
1962 /* quotactl management */
1963 struct obd_quotactl {
1965 __u32 qc_type; /* see Q_* flag below */
1968 struct obd_dqinfo qc_dqinfo;
1969 struct obd_dqblk qc_dqblk;
1972 extern void lustre_swab_obd_quotactl(struct obd_quotactl *q);
1974 #define Q_COPY(out, in, member) (out)->member = (in)->member
1976 #define QCTL_COPY(out, in) \
1978 Q_COPY(out, in, qc_cmd); \
1979 Q_COPY(out, in, qc_type); \
1980 Q_COPY(out, in, qc_id); \
1981 Q_COPY(out, in, qc_stat); \
1982 Q_COPY(out, in, qc_dqinfo); \
1983 Q_COPY(out, in, qc_dqblk); \
1986 /* Body of quota request used for quota acquire/release RPCs between quota
1987 * master (aka QMT) and slaves (ak QSD). */
1989 struct lu_fid qb_fid; /* FID of global index packing the pool ID
1990 * and type (data or metadata) as well as
1991 * the quota type (user or group). */
1992 union lquota_id qb_id; /* uid or gid or directory FID */
1993 __u32 qb_flags; /* see below */
1995 __u64 qb_count; /* acquire/release count (kbytes/inodes) */
1996 __u64 qb_usage; /* current slave usage (kbytes/inodes) */
1997 __u64 qb_slv_ver; /* slave index file version */
1998 struct lustre_handle qb_lockh; /* per-ID lock handle */
1999 struct lustre_handle qb_glb_lockh; /* global lock handle */
2000 __u64 qb_padding1[4];
2003 /* When the quota_body is used in the reply of quota global intent
2004 * lock (IT_QUOTA_CONN) reply, qb_fid contains slave index file FID. */
2005 #define qb_slv_fid qb_fid
2006 /* qb_usage is the current qunit (in kbytes/inodes) when quota_body is used in
2008 #define qb_qunit qb_usage
2010 #define QUOTA_DQACQ_FL_ACQ 0x1 /* acquire quota */
2011 #define QUOTA_DQACQ_FL_PREACQ 0x2 /* pre-acquire */
2012 #define QUOTA_DQACQ_FL_REL 0x4 /* release quota */
2013 #define QUOTA_DQACQ_FL_REPORT 0x8 /* report usage */
2015 extern void lustre_swab_quota_body(struct quota_body *b);
2017 /* Quota types currently supported */
2019 LQUOTA_TYPE_USR = 0x00, /* maps to USRQUOTA */
2020 LQUOTA_TYPE_GRP = 0x01, /* maps to GRPQUOTA */
2024 /* There are 2 different resource types on which a quota limit can be enforced:
2025 * - inodes on the MDTs
2026 * - blocks on the OSTs */
2028 LQUOTA_RES_MD = 0x01, /* skip 0 to avoid null oid in FID */
2029 LQUOTA_RES_DT = 0x02,
2031 LQUOTA_FIRST_RES = LQUOTA_RES_MD
2033 #define LQUOTA_NR_RES (LQUOTA_LAST_RES - LQUOTA_FIRST_RES + 1)
2036 * Space accounting support
2037 * Format of an accounting record, providing disk usage information for a given
2040 struct lquota_acct_rec { /* 16 bytes */
2041 __u64 bspace; /* current space in use */
2042 __u64 ispace; /* current # inodes in use */
2046 * Global quota index support
2047 * Format of a global record, providing global quota settings for a given quota
2050 struct lquota_glb_rec { /* 32 bytes */
2051 __u64 qbr_hardlimit; /* quota hard limit, in #inodes or kbytes */
2052 __u64 qbr_softlimit; /* quota soft limit, in #inodes or kbytes */
2053 __u64 qbr_time; /* grace time, in seconds */
2054 __u64 qbr_granted; /* how much is granted to slaves, in #inodes or
2059 * Slave index support
2060 * Format of a slave record, recording how much space is granted to a given
2063 struct lquota_slv_rec { /* 8 bytes */
2064 __u64 qsr_granted; /* space granted to the slave for the key=ID,
2065 * in #inodes or kbytes */
2068 /* Data structures associated with the quota locks */
2070 /* Glimpse descriptor used for the index & per-ID quota locks */
2071 struct ldlm_gl_lquota_desc {
2072 union lquota_id gl_id; /* quota ID subject to the glimpse */
2073 __u64 gl_flags; /* see LQUOTA_FL* below */
2074 __u64 gl_ver; /* new index version */
2075 __u64 gl_hardlimit; /* new hardlimit or qunit value */
2076 __u64 gl_softlimit; /* new softlimit */
2080 #define gl_qunit gl_hardlimit /* current qunit value used when
2081 * glimpsing per-ID quota locks */
2083 /* quota glimpse flags */
2084 #define LQUOTA_FL_EDQUOT 0x1 /* user/group out of quota space on QMT */
2086 /* LVB used with quota (global and per-ID) locks */
2088 __u64 lvb_flags; /* see LQUOTA_FL* above */
2089 __u64 lvb_id_may_rel; /* space that might be released later */
2090 __u64 lvb_id_rel; /* space released by the slave for this ID */
2091 __u64 lvb_id_qunit; /* current qunit value */
2095 extern void lustre_swab_lquota_lvb(struct lquota_lvb *lvb);
2097 /* LVB used with global quota lock */
2098 #define lvb_glb_ver lvb_id_may_rel /* current version of the global index */
2106 #define QUOTA_FIRST_OPC QUOTA_DQACQ
2115 MDS_GETATTR_NAME = 34,
2120 MDS_DISCONNECT = 39,
2123 MDS_PIN = 42, /* obsolete, never used in a release */
2124 MDS_UNPIN = 43, /* obsolete, never used in a release */
2126 MDS_DONE_WRITING = 45,
2128 MDS_QUOTACHECK = 47,
2131 MDS_SETXATTR = 50, /* obsolete, now it's MDS_REINT op */
2133 MDS_IS_SUBDIR = 52, /* obsolete, never used in a release */
2135 MDS_HSM_STATE_GET = 54,
2136 MDS_HSM_STATE_SET = 55,
2137 MDS_HSM_ACTION = 56,
2138 MDS_HSM_PROGRESS = 57,
2139 MDS_HSM_REQUEST = 58,
2140 MDS_HSM_CT_REGISTER = 59,
2141 MDS_HSM_CT_UNREGISTER = 60,
2142 MDS_SWAP_LAYOUTS = 61,
2146 #define MDS_FIRST_OPC MDS_GETATTR
2149 /* opcodes for object update */
2155 #define OUT_UPDATE_FIRST_OPC OUT_UPDATE
2172 } mds_reint_t, mdt_reint_t;
2174 extern void lustre_swab_generic_32s (__u32 *val);
2176 /* the disposition of the intent outlines what was executed */
2177 #define DISP_IT_EXECD 0x00000001
2178 #define DISP_LOOKUP_EXECD 0x00000002
2179 #define DISP_LOOKUP_NEG 0x00000004
2180 #define DISP_LOOKUP_POS 0x00000008
2181 #define DISP_OPEN_CREATE 0x00000010
2182 #define DISP_OPEN_OPEN 0x00000020
2183 #define DISP_ENQ_COMPLETE 0x00400000 /* obsolete and unused */
2184 #define DISP_ENQ_OPEN_REF 0x00800000
2185 #define DISP_ENQ_CREATE_REF 0x01000000
2186 #define DISP_OPEN_LOCK 0x02000000
2187 #define DISP_OPEN_LEASE 0x04000000
2188 #define DISP_OPEN_STRIPE 0x08000000
2189 #define DISP_OPEN_DENY 0x10000000
2191 /* INODE LOCK PARTS */
2192 #define MDS_INODELOCK_LOOKUP 0x000001 /* For namespace, dentry etc, and also
2193 * was used to protect permission (mode,
2194 * owner, group etc) before 2.4. */
2195 #define MDS_INODELOCK_UPDATE 0x000002 /* size, links, timestamps */
2196 #define MDS_INODELOCK_OPEN 0x000004 /* For opened files */
2197 #define MDS_INODELOCK_LAYOUT 0x000008 /* for layout */
2199 /* The PERM bit is added int 2.4, and it is used to protect permission(mode,
2200 * owner, group, acl etc), so to separate the permission from LOOKUP lock.
2201 * Because for remote directories(in DNE), these locks will be granted by
2202 * different MDTs(different ldlm namespace).
2204 * For local directory, MDT will always grant UPDATE_LOCK|PERM_LOCK together.
2205 * For Remote directory, the master MDT, where the remote directory is, will
2206 * grant UPDATE_LOCK|PERM_LOCK, and the remote MDT, where the name entry is,
2207 * will grant LOOKUP_LOCK. */
2208 #define MDS_INODELOCK_PERM 0x000010
2209 #define MDS_INODELOCK_XATTR 0x000020 /* extended attributes */
2211 #define MDS_INODELOCK_MAXSHIFT 5
2212 /* This FULL lock is useful to take on unlink sort of operations */
2213 #define MDS_INODELOCK_FULL ((1<<(MDS_INODELOCK_MAXSHIFT+1))-1)
2215 extern void lustre_swab_ll_fid (struct ll_fid *fid);
2217 /* NOTE: until Lustre 1.8.7/2.1.1 the fid_ver() was packed into name[2],
2218 * but was moved into name[1] along with the OID to avoid consuming the
2219 * name[2,3] fields that need to be used for the quota id (also a FID). */
2221 LUSTRE_RES_ID_SEQ_OFF = 0,
2222 LUSTRE_RES_ID_VER_OID_OFF = 1,
2223 LUSTRE_RES_ID_WAS_VER_OFF = 2, /* see note above */
2224 LUSTRE_RES_ID_QUOTA_SEQ_OFF = 2,
2225 LUSTRE_RES_ID_QUOTA_VER_OID_OFF = 3,
2226 LUSTRE_RES_ID_HSH_OFF = 3
2229 #define MDS_STATUS_CONN 1
2230 #define MDS_STATUS_LOV 2
2232 /* mdt_thread_info.mti_flags. */
2234 /* The flag indicates Size-on-MDS attributes are changed. */
2235 MF_SOM_CHANGE = (1 << 0),
2236 /* Flags indicates an epoch opens or closes. */
2237 MF_EPOCH_OPEN = (1 << 1),
2238 MF_EPOCH_CLOSE = (1 << 2),
2239 MF_MDC_CANCEL_FID1 = (1 << 3),
2240 MF_MDC_CANCEL_FID2 = (1 << 4),
2241 MF_MDC_CANCEL_FID3 = (1 << 5),
2242 MF_MDC_CANCEL_FID4 = (1 << 6),
2243 /* There is a pending attribute update. */
2244 MF_SOM_AU = (1 << 7),
2245 /* Cancel OST locks while getattr OST attributes. */
2246 MF_GETATTR_LOCK = (1 << 8),
2247 MF_GET_MDT_IDX = (1 << 9),
2250 #define LUSTRE_BFLAG_UNCOMMITTED_WRITES 0x1
2252 /* these should be identical to their EXT4_*_FL counterparts, they are
2253 * redefined here only to avoid dragging in fs/ext4/ext4.h */
2254 #define LUSTRE_SYNC_FL 0x00000008 /* Synchronous updates */
2255 #define LUSTRE_IMMUTABLE_FL 0x00000010 /* Immutable file */
2256 #define LUSTRE_APPEND_FL 0x00000020 /* writes to file may only append */
2257 #define LUSTRE_NOATIME_FL 0x00000080 /* do not update atime */
2258 #define LUSTRE_DIRSYNC_FL 0x00010000 /* dirsync behaviour (dir only) */
2261 /* Convert wire LUSTRE_*_FL to corresponding client local VFS S_* values
2262 * for the client inode i_flags. The LUSTRE_*_FL are the Lustre wire
2263 * protocol equivalents of LDISKFS_*_FL values stored on disk, while
2264 * the S_* flags are kernel-internal values that change between kernel
2265 * versions. These flags are set/cleared via FSFILT_IOC_{GET,SET}_FLAGS.
2266 * See b=16526 for a full history. */
2267 static inline int ll_ext_to_inode_flags(int flags)
2269 return (((flags & LUSTRE_SYNC_FL) ? S_SYNC : 0) |
2270 ((flags & LUSTRE_NOATIME_FL) ? S_NOATIME : 0) |
2271 ((flags & LUSTRE_APPEND_FL) ? S_APPEND : 0) |
2272 #if defined(S_DIRSYNC)
2273 ((flags & LUSTRE_DIRSYNC_FL) ? S_DIRSYNC : 0) |
2275 ((flags & LUSTRE_IMMUTABLE_FL) ? S_IMMUTABLE : 0));
2278 static inline int ll_inode_to_ext_flags(int iflags)
2280 return (((iflags & S_SYNC) ? LUSTRE_SYNC_FL : 0) |
2281 ((iflags & S_NOATIME) ? LUSTRE_NOATIME_FL : 0) |
2282 ((iflags & S_APPEND) ? LUSTRE_APPEND_FL : 0) |
2283 #if defined(S_DIRSYNC)
2284 ((iflags & S_DIRSYNC) ? LUSTRE_DIRSYNC_FL : 0) |
2286 ((iflags & S_IMMUTABLE) ? LUSTRE_IMMUTABLE_FL : 0));
2290 /* 64 possible states */
2291 enum md_transient_state {
2292 MS_RESTORE = (1 << 0), /* restore is running */
2296 struct lu_fid mbo_fid1;
2297 struct lu_fid mbo_fid2;
2298 struct lustre_handle mbo_handle;
2300 __u64 mbo_size; /* Offset, in the case of MDS_READPAGE */
2304 __u64 mbo_blocks; /* XID, in the case of MDS_READPAGE */
2306 __u64 mbo_t_state; /* transient file state defined in
2307 * enum md_transient_state
2308 * was "ino" until 2.4.0 */
2311 __u32 mbo_capability;
2317 __u32 mbo_nlink; /* #bytes to read in the case of MDS_READPAGE */
2318 __u32 mbo_unused2; /* was "generation" until 2.4.0 */
2320 __u32 mbo_eadatasize;
2322 __u32 mbo_max_mdsize;
2323 __u32 mbo_max_cookiesize;
2324 __u32 mbo_uid_h; /* high 32-bits of uid, for FUID */
2325 __u32 mbo_gid_h; /* high 32-bits of gid, for FUID */
2326 __u32 mbo_padding_5; /* also fix lustre_swab_mdt_body */
2327 __u64 mbo_padding_6;
2328 __u64 mbo_padding_7;
2329 __u64 mbo_padding_8;
2330 __u64 mbo_padding_9;
2331 __u64 mbo_padding_10;
2334 extern void lustre_swab_mdt_body (struct mdt_body *b);
2336 struct mdt_ioepoch {
2337 struct lustre_handle handle;
2343 extern void lustre_swab_mdt_ioepoch (struct mdt_ioepoch *b);
2345 /* permissions for md_perm.mp_perm */
2347 CFS_SETUID_PERM = 0x01,
2348 CFS_SETGID_PERM = 0x02,
2349 CFS_SETGRP_PERM = 0x04,
2350 CFS_RMTACL_PERM = 0x08,
2351 CFS_RMTOWN_PERM = 0x10
2354 /* inode access permission for remote user, the inode info are omitted,
2355 * for client knows them. */
2356 struct mdt_remote_perm {
2363 __u32 rp_access_perm; /* MAY_READ/WRITE/EXEC */
2367 extern void lustre_swab_mdt_remote_perm(struct mdt_remote_perm *p);
2369 struct mdt_rec_setattr {
2379 __u32 sa_padding_1_h;
2380 struct lu_fid sa_fid;
2389 __u32 sa_attr_flags;
2391 __u32 sa_bias; /* some operation flags */
2397 extern void lustre_swab_mdt_rec_setattr (struct mdt_rec_setattr *sa);
2400 * Attribute flags used in mdt_rec_setattr::sa_valid.
2401 * The kernel's #defines for ATTR_* should not be used over the network
2402 * since the client and MDS may run different kernels (see bug 13828)
2403 * Therefore, we should only use MDS_ATTR_* attributes for sa_valid.
2405 #define MDS_ATTR_MODE 0x1ULL /* = 1 */
2406 #define MDS_ATTR_UID 0x2ULL /* = 2 */
2407 #define MDS_ATTR_GID 0x4ULL /* = 4 */
2408 #define MDS_ATTR_SIZE 0x8ULL /* = 8 */
2409 #define MDS_ATTR_ATIME 0x10ULL /* = 16 */
2410 #define MDS_ATTR_MTIME 0x20ULL /* = 32 */
2411 #define MDS_ATTR_CTIME 0x40ULL /* = 64 */
2412 #define MDS_ATTR_ATIME_SET 0x80ULL /* = 128 */
2413 #define MDS_ATTR_MTIME_SET 0x100ULL /* = 256 */
2414 #define MDS_ATTR_FORCE 0x200ULL /* = 512, Not a change, but a change it */
2415 #define MDS_ATTR_ATTR_FLAG 0x400ULL /* = 1024 */
2416 #define MDS_ATTR_KILL_SUID 0x800ULL /* = 2048 */
2417 #define MDS_ATTR_KILL_SGID 0x1000ULL /* = 4096 */
2418 #define MDS_ATTR_CTIME_SET 0x2000ULL /* = 8192 */
2419 #define MDS_ATTR_FROM_OPEN 0x4000ULL /* = 16384, called from open path, ie O_TRUNC */
2420 #define MDS_ATTR_BLOCKS 0x8000ULL /* = 32768 */
2423 #define FMODE_READ 00000001
2424 #define FMODE_WRITE 00000002
2427 #define MDS_FMODE_CLOSED 00000000
2428 #define MDS_FMODE_EXEC 00000004
2429 /* IO Epoch is opened on a closed file. */
2430 #define MDS_FMODE_EPOCH 01000000
2431 /* IO Epoch is opened on a file truncate. */
2432 #define MDS_FMODE_TRUNC 02000000
2433 /* Size-on-MDS Attribute Update is pending. */
2434 #define MDS_FMODE_SOM 04000000
2436 #define MDS_OPEN_CREATED 00000010
2437 #define MDS_OPEN_CROSS 00000020
2439 #define MDS_OPEN_CREAT 00000100
2440 #define MDS_OPEN_EXCL 00000200
2441 #define MDS_OPEN_TRUNC 00001000
2442 #define MDS_OPEN_APPEND 00002000
2443 #define MDS_OPEN_SYNC 00010000
2444 #define MDS_OPEN_DIRECTORY 00200000
2446 #define MDS_OPEN_BY_FID 040000000 /* open_by_fid for known object */
2447 #define MDS_OPEN_DELAY_CREATE 0100000000 /* delay initial object create */
2448 #define MDS_OPEN_OWNEROVERRIDE 0200000000 /* NFSD rw-reopen ro file for owner */
2449 #define MDS_OPEN_JOIN_FILE 0400000000 /* open for join file.
2450 * We do not support JOIN FILE
2451 * anymore, reserve this flags
2452 * just for preventing such bit
2455 #define MDS_OPEN_LOCK 04000000000 /* This open requires open lock */
2456 #define MDS_OPEN_HAS_EA 010000000000 /* specify object create pattern */
2457 #define MDS_OPEN_HAS_OBJS 020000000000 /* Just set the EA the obj exist */
2458 #define MDS_OPEN_NORESTORE 0100000000000ULL /* Do not restore file at open */
2459 #define MDS_OPEN_NEWSTRIPE 0200000000000ULL /* New stripe needed (restripe or
2461 #define MDS_OPEN_VOLATILE 0400000000000ULL /* File is volatile = created
2463 #define MDS_OPEN_LEASE 01000000000000ULL /* Open the file and grant lease
2464 * delegation, succeed if it's not
2465 * being opened with conflict mode.
2467 #define MDS_OPEN_RELEASE 02000000000000ULL /* Open the file for HSM release */
2469 /* lustre internal open flags, which should not be set from user space */
2470 #define MDS_OPEN_FL_INTERNAL (MDS_OPEN_HAS_EA | MDS_OPEN_HAS_OBJS | \
2471 MDS_OPEN_OWNEROVERRIDE | MDS_OPEN_LOCK | \
2472 MDS_OPEN_BY_FID | MDS_OPEN_LEASE | \
2475 /* permission for create non-directory file */
2476 #define MAY_CREATE (1 << 7)
2477 /* permission for create directory file */
2478 #define MAY_LINK (1 << 8)
2479 /* permission for delete from the directory */
2480 #define MAY_UNLINK (1 << 9)
2481 /* source's permission for rename */
2482 #define MAY_RENAME_SRC (1 << 10)
2483 /* target's permission for rename */
2484 #define MAY_RENAME_TAR (1 << 11)
2485 /* part (parent's) VTX permission check */
2486 #define MAY_VTX_PART (1 << 12)
2487 /* full VTX permission check */
2488 #define MAY_VTX_FULL (1 << 13)
2489 /* lfs rgetfacl permission check */
2490 #define MAY_RGETFACL (1 << 14)
2493 MDS_CHECK_SPLIT = 1 << 0,
2494 MDS_CROSS_REF = 1 << 1,
2495 MDS_VTX_BYPASS = 1 << 2,
2496 MDS_PERM_BYPASS = 1 << 3,
2498 MDS_QUOTA_IGNORE = 1 << 5,
2499 /* Was MDS_CLOSE_CLEANUP (1 << 6), No more used */
2500 MDS_KEEP_ORPHAN = 1 << 7,
2501 MDS_RECOV_OPEN = 1 << 8,
2502 MDS_DATA_MODIFIED = 1 << 9,
2503 MDS_CREATE_VOLATILE = 1 << 10,
2504 MDS_OWNEROVERRIDE = 1 << 11,
2505 MDS_HSM_RELEASE = 1 << 12,
2506 MDS_RENAME_MIGRATE = 1 << 13,
2509 /* instance of mdt_reint_rec */
2510 struct mdt_rec_create {
2518 __u32 cr_suppgid1_h;
2520 __u32 cr_suppgid2_h;
2521 struct lu_fid cr_fid1;
2522 struct lu_fid cr_fid2;
2523 struct lustre_handle cr_old_handle; /* handle in case of open replay */
2527 __u64 cr_padding_1; /* rr_blocks */
2530 /* use of helpers set/get_mrc_cr_flags() is needed to access
2531 * 64 bits cr_flags [cr_flags_l, cr_flags_h], this is done to
2532 * extend cr_flags size without breaking 1.8 compat */
2533 __u32 cr_flags_l; /* for use with open, low 32 bits */
2534 __u32 cr_flags_h; /* for use with open, high 32 bits */
2535 __u32 cr_umask; /* umask for create */
2536 __u32 cr_padding_4; /* rr_padding_4 */
2539 static inline void set_mrc_cr_flags(struct mdt_rec_create *mrc, __u64 flags)
2541 mrc->cr_flags_l = (__u32)(flags & 0xFFFFFFFFUll);
2542 mrc->cr_flags_h = (__u32)(flags >> 32);
2545 static inline __u64 get_mrc_cr_flags(struct mdt_rec_create *mrc)
2547 return ((__u64)(mrc->cr_flags_l) | ((__u64)mrc->cr_flags_h << 32));
2550 /* instance of mdt_reint_rec */
2551 struct mdt_rec_link {
2559 __u32 lk_suppgid1_h;
2561 __u32 lk_suppgid2_h;
2562 struct lu_fid lk_fid1;
2563 struct lu_fid lk_fid2;
2565 __u64 lk_padding_1; /* rr_atime */
2566 __u64 lk_padding_2; /* rr_ctime */
2567 __u64 lk_padding_3; /* rr_size */
2568 __u64 lk_padding_4; /* rr_blocks */
2570 __u32 lk_padding_5; /* rr_mode */
2571 __u32 lk_padding_6; /* rr_flags */
2572 __u32 lk_padding_7; /* rr_padding_2 */
2573 __u32 lk_padding_8; /* rr_padding_3 */
2574 __u32 lk_padding_9; /* rr_padding_4 */
2577 /* instance of mdt_reint_rec */
2578 struct mdt_rec_unlink {
2586 __u32 ul_suppgid1_h;
2588 __u32 ul_suppgid2_h;
2589 struct lu_fid ul_fid1;
2590 struct lu_fid ul_fid2;
2592 __u64 ul_padding_2; /* rr_atime */
2593 __u64 ul_padding_3; /* rr_ctime */
2594 __u64 ul_padding_4; /* rr_size */
2595 __u64 ul_padding_5; /* rr_blocks */
2598 __u32 ul_padding_6; /* rr_flags */
2599 __u32 ul_padding_7; /* rr_padding_2 */
2600 __u32 ul_padding_8; /* rr_padding_3 */
2601 __u32 ul_padding_9; /* rr_padding_4 */
2604 /* instance of mdt_reint_rec */
2605 struct mdt_rec_rename {
2613 __u32 rn_suppgid1_h;
2615 __u32 rn_suppgid2_h;
2616 struct lu_fid rn_fid1;
2617 struct lu_fid rn_fid2;
2619 __u64 rn_padding_1; /* rr_atime */
2620 __u64 rn_padding_2; /* rr_ctime */
2621 __u64 rn_padding_3; /* rr_size */
2622 __u64 rn_padding_4; /* rr_blocks */
2623 __u32 rn_bias; /* some operation flags */
2624 __u32 rn_mode; /* cross-ref rename has mode */
2625 __u32 rn_padding_5; /* rr_flags */
2626 __u32 rn_padding_6; /* rr_padding_2 */
2627 __u32 rn_padding_7; /* rr_padding_3 */
2628 __u32 rn_padding_8; /* rr_padding_4 */
2631 /* instance of mdt_reint_rec */
2632 struct mdt_rec_setxattr {
2640 __u32 sx_suppgid1_h;
2642 __u32 sx_suppgid2_h;
2643 struct lu_fid sx_fid;
2644 __u64 sx_padding_1; /* These three are rr_fid2 */
2649 __u64 sx_padding_5; /* rr_ctime */
2650 __u64 sx_padding_6; /* rr_size */
2651 __u64 sx_padding_7; /* rr_blocks */
2654 __u32 sx_padding_8; /* rr_flags */
2655 __u32 sx_padding_9; /* rr_padding_2 */
2656 __u32 sx_padding_10; /* rr_padding_3 */
2657 __u32 sx_padding_11; /* rr_padding_4 */
2661 * mdt_rec_reint is the template for all mdt_reint_xxx structures.
2662 * Do NOT change the size of various members, otherwise the value
2663 * will be broken in lustre_swab_mdt_rec_reint().
2665 * If you add new members in other mdt_reint_xxx structres and need to use the
2666 * rr_padding_x fields, then update lustre_swab_mdt_rec_reint() also.
2668 struct mdt_rec_reint {
2676 __u32 rr_suppgid1_h;
2678 __u32 rr_suppgid2_h;
2679 struct lu_fid rr_fid1;
2680 struct lu_fid rr_fid2;
2691 __u32 rr_padding_4; /* also fix lustre_swab_mdt_rec_reint */
2694 extern void lustre_swab_mdt_rec_reint(struct mdt_rec_reint *rr);
2696 /* lmv structures */
2698 __u32 ld_tgt_count; /* how many MDS's */
2699 __u32 ld_active_tgt_count; /* how many active */
2700 __u32 ld_default_stripe_count; /* how many objects are used */
2701 __u32 ld_pattern; /* default hash pattern */
2702 __u64 ld_default_hash_size;
2703 __u64 ld_padding_1; /* also fix lustre_swab_lmv_desc */
2704 __u32 ld_padding_2; /* also fix lustre_swab_lmv_desc */
2705 __u32 ld_qos_maxage; /* in second */
2706 __u32 ld_padding_3; /* also fix lustre_swab_lmv_desc */
2707 __u32 ld_padding_4; /* also fix lustre_swab_lmv_desc */
2708 struct obd_uuid ld_uuid;
2711 extern void lustre_swab_lmv_desc (struct lmv_desc *ld);
2713 /* LMV layout EA, and it will be stored both in master and slave object */
2714 struct lmv_mds_md_v1 {
2716 __u32 lmv_stripe_count;
2717 __u32 lmv_master_mdt_index; /* On master object, it is master
2718 * MDT index, on slave object, it
2719 * is stripe index of the slave obj */
2720 __u32 lmv_hash_type; /* dir stripe policy, i.e. indicate
2721 * which hash function to be used,
2722 * Note: only lower 16 bits is being
2723 * used for now. Higher 16 bits will
2724 * be used to mark the object status,
2725 * for example migrating or dead. */
2726 __u32 lmv_layout_version; /* Used for directory restriping */
2730 char lmv_pool_name[LOV_MAXPOOLNAME + 1]; /* pool name */
2731 struct lu_fid lmv_stripe_fids[0]; /* FIDs for each stripe */
2734 #define LMV_MAGIC_V1 0x0CD20CD0 /* normal stripe lmv magic */
2735 #define LMV_MAGIC LMV_MAGIC_V1
2737 /* #define LMV_USER_MAGIC 0x0CD30CD0 */
2738 #define LMV_MAGIC_STRIPE 0x0CD40CD0 /* magic for dir sub_stripe */
2740 /* Right now only the lower part(0-16bits) of lmv_hash_type is being used,
2741 * and the higher part will be the flag to indicate the status of object,
2742 * for example the object is being migrated. And the hash function
2743 * might be interpreted differently with different flags. */
2744 #define LMV_HASH_TYPE_MASK 0x0000ffff
2746 #define LMV_HASH_FLAG_MIGRATION 0x80000000
2747 #define LMV_HASH_FLAG_DEAD 0x40000000
2748 #define LMV_HASH_FLAG_BAD_TYPE 0x20000000
2750 /* The striped directory has ever lost its master LMV EA, then LFSCK
2751 * re-generated it. This flag is used to indicate such case. It is an
2753 #define LMV_HASH_FLAG_LOST_LMV 0x10000000
2756 * The FNV-1a hash algorithm is as follows:
2757 * hash = FNV_offset_basis
2758 * for each octet_of_data to be hashed
2759 * hash = hash XOR octet_of_data
2760 * hash = hash × FNV_prime
2762 * http://en.wikipedia.org/wiki/Fowler–Noll–Vo_hash_function#FNV-1a_hash
2764 * http://www.isthe.com/chongo/tech/comp/fnv/index.html#FNV-reference-source
2765 * FNV_prime is 2^40 + 2^8 + 0xb3 = 0x100000001b3ULL
2767 #define LUSTRE_FNV_1A_64_PRIME 0x100000001b3ULL
2768 #define LUSTRE_FNV_1A_64_OFFSET_BIAS 0xcbf29ce484222325ULL
2769 static inline __u64 lustre_hash_fnv_1a_64(const void *buf, size_t size)
2771 __u64 hash = LUSTRE_FNV_1A_64_OFFSET_BIAS;
2772 const unsigned char *p = buf;
2775 for (i = 0; i < size; i++) {
2777 hash *= LUSTRE_FNV_1A_64_PRIME;
2785 struct lmv_mds_md_v1 lmv_md_v1;
2786 struct lmv_user_md lmv_user_md;
2789 extern void lustre_swab_lmv_mds_md(union lmv_mds_md *lmm);
2791 static inline int lmv_mds_md_size(int stripe_count, unsigned int lmm_magic)
2793 switch (lmm_magic) {
2795 struct lmv_mds_md_v1 *lmm1;
2797 return sizeof(*lmm1) + stripe_count *
2798 sizeof(lmm1->lmv_stripe_fids[0]);
2805 static inline int lmv_mds_md_stripe_count_get(const union lmv_mds_md *lmm)
2807 switch (le32_to_cpu(lmm->lmv_magic)) {
2809 return le32_to_cpu(lmm->lmv_md_v1.lmv_stripe_count);
2810 case LMV_USER_MAGIC:
2811 return le32_to_cpu(lmm->lmv_user_md.lum_stripe_count);
2817 static inline int lmv_mds_md_stripe_count_set(union lmv_mds_md *lmm,
2818 unsigned int stripe_count)
2820 switch (le32_to_cpu(lmm->lmv_magic)) {
2822 lmm->lmv_md_v1.lmv_stripe_count = cpu_to_le32(stripe_count);
2824 case LMV_USER_MAGIC:
2825 lmm->lmv_user_md.lum_stripe_count = cpu_to_le32(stripe_count);
2837 FLD_FIRST_OPC = FLD_QUERY
2843 SEQ_FIRST_OPC = SEQ_QUERY
2847 SEQ_ALLOC_SUPER = 0,
2859 LFSCK_NOTIFY = 1101,
2862 LFSCK_FIRST_OPC = LFSCK_NOTIFY
2866 * LOV data structures
2869 #define LOV_MAX_UUID_BUFFER_SIZE 8192
2870 /* The size of the buffer the lov/mdc reserves for the
2871 * array of UUIDs returned by the MDS. With the current
2872 * protocol, this will limit the max number of OSTs per LOV */
2874 #define LOV_DESC_MAGIC 0xB0CCDE5C
2875 #define LOV_DESC_QOS_MAXAGE_DEFAULT 5 /* Seconds */
2876 #define LOV_DESC_STRIPE_SIZE_DEFAULT (1 << LNET_MTU_BITS)
2878 /* LOV settings descriptor (should only contain static info) */
2880 __u32 ld_tgt_count; /* how many OBD's */
2881 __u32 ld_active_tgt_count; /* how many active */
2882 __u32 ld_default_stripe_count; /* how many objects are used */
2883 __u32 ld_pattern; /* default PATTERN_RAID0 */
2884 __u64 ld_default_stripe_size; /* in bytes */
2885 __u64 ld_default_stripe_offset; /* in bytes */
2886 __u32 ld_padding_0; /* unused */
2887 __u32 ld_qos_maxage; /* in second */
2888 __u32 ld_padding_1; /* also fix lustre_swab_lov_desc */
2889 __u32 ld_padding_2; /* also fix lustre_swab_lov_desc */
2890 struct obd_uuid ld_uuid;
2893 #define ld_magic ld_active_tgt_count /* for swabbing from llogs */
2895 extern void lustre_swab_lov_desc (struct lov_desc *ld);
2900 /* opcodes -- MUST be distinct from OST/MDS opcodes */
2905 LDLM_BL_CALLBACK = 104,
2906 LDLM_CP_CALLBACK = 105,
2907 LDLM_GL_CALLBACK = 106,
2908 LDLM_SET_INFO = 107,
2911 #define LDLM_FIRST_OPC LDLM_ENQUEUE
2913 #define RES_NAME_SIZE 4
2914 struct ldlm_res_id {
2915 __u64 name[RES_NAME_SIZE];
2918 #define DLDLMRES "["LPX64":"LPX64":"LPX64"]."LPX64i
2919 #define PLDLMRES(res) (res)->lr_name.name[0], (res)->lr_name.name[1], \
2920 (res)->lr_name.name[2], (res)->lr_name.name[3]
2922 extern void lustre_swab_ldlm_res_id (struct ldlm_res_id *id);
2924 static inline bool ldlm_res_eq(const struct ldlm_res_id *res0,
2925 const struct ldlm_res_id *res1)
2927 return memcmp(res0, res1, sizeof(*res0)) == 0;
2944 #define LCK_MODE_NUM 8
2954 #define LDLM_MIN_TYPE LDLM_PLAIN
2956 struct ldlm_extent {
2962 static inline int ldlm_extent_overlap(const struct ldlm_extent *ex1,
2963 const struct ldlm_extent *ex2)
2965 return ex1->start <= ex2->end && ex2->start <= ex1->end;
2968 /* check if @ex1 contains @ex2 */
2969 static inline int ldlm_extent_contain(const struct ldlm_extent *ex1,
2970 const struct ldlm_extent *ex2)
2972 return ex1->start <= ex2->start && ex1->end >= ex2->end;
2975 struct ldlm_inodebits {
2979 struct ldlm_flock_wire {
2987 /* it's important that the fields of the ldlm_extent structure match
2988 * the first fields of the ldlm_flock structure because there is only
2989 * one ldlm_swab routine to process the ldlm_policy_data_t union. if
2990 * this ever changes we will need to swab the union differently based
2991 * on the resource type. */
2994 struct ldlm_extent l_extent;
2995 struct ldlm_flock_wire l_flock;
2996 struct ldlm_inodebits l_inodebits;
2997 } ldlm_wire_policy_data_t;
2999 extern void lustre_swab_ldlm_policy_data (ldlm_wire_policy_data_t *d);
3001 union ldlm_gl_desc {
3002 struct ldlm_gl_lquota_desc lquota_desc;
3005 extern void lustre_swab_gl_desc(union ldlm_gl_desc *);
3007 struct ldlm_intent {
3011 extern void lustre_swab_ldlm_intent (struct ldlm_intent *i);
3013 struct ldlm_resource_desc {
3014 ldlm_type_t lr_type;
3015 __u32 lr_padding; /* also fix lustre_swab_ldlm_resource_desc */
3016 struct ldlm_res_id lr_name;
3019 extern void lustre_swab_ldlm_resource_desc (struct ldlm_resource_desc *r);
3021 struct ldlm_lock_desc {
3022 struct ldlm_resource_desc l_resource;
3023 ldlm_mode_t l_req_mode;
3024 ldlm_mode_t l_granted_mode;
3025 ldlm_wire_policy_data_t l_policy_data;
3028 extern void lustre_swab_ldlm_lock_desc (struct ldlm_lock_desc *l);
3030 #define LDLM_LOCKREQ_HANDLES 2
3031 #define LDLM_ENQUEUE_CANCEL_OFF 1
3033 struct ldlm_request {
3036 struct ldlm_lock_desc lock_desc;
3037 struct lustre_handle lock_handle[LDLM_LOCKREQ_HANDLES];
3040 extern void lustre_swab_ldlm_request (struct ldlm_request *rq);
3042 /* If LDLM_ENQUEUE, 1 slot is already occupied, 1 is available.
3043 * Otherwise, 2 are available. */
3044 #define ldlm_request_bufsize(count,type) \
3046 int _avail = LDLM_LOCKREQ_HANDLES; \
3047 _avail -= (type == LDLM_ENQUEUE ? LDLM_ENQUEUE_CANCEL_OFF : 0); \
3048 sizeof(struct ldlm_request) + \
3049 (count > _avail ? count - _avail : 0) * \
3050 sizeof(struct lustre_handle); \
3055 __u32 lock_padding; /* also fix lustre_swab_ldlm_reply */
3056 struct ldlm_lock_desc lock_desc;
3057 struct lustre_handle lock_handle;
3058 __u64 lock_policy_res1;
3059 __u64 lock_policy_res2;
3062 extern void lustre_swab_ldlm_reply (struct ldlm_reply *r);
3064 #define ldlm_flags_to_wire(flags) ((__u32)(flags))
3065 #define ldlm_flags_from_wire(flags) ((__u64)(flags))
3068 * Opcodes for mountconf (mgs and mgc)
3073 MGS_EXCEPTION, /* node died, etc. */
3074 MGS_TARGET_REG, /* whenever target starts up */
3080 #define MGS_FIRST_OPC MGS_CONNECT
3082 #define MGS_PARAM_MAXLEN 1024
3083 #define KEY_SET_INFO "set_info"
3085 struct mgs_send_param {
3086 char mgs_param[MGS_PARAM_MAXLEN];
3089 /* We pass this info to the MGS so it can write config logs */
3090 #define MTI_NAME_MAXLEN 64
3091 #define MTI_PARAM_MAXLEN 4096
3092 #define MTI_NIDS_MAX 32
3093 struct mgs_target_info {
3094 __u32 mti_lustre_ver;
3095 __u32 mti_stripe_index;
3096 __u32 mti_config_ver;
3098 __u32 mti_nid_count;
3099 __u32 mti_instance; /* Running instance of target */
3100 char mti_fsname[MTI_NAME_MAXLEN];
3101 char mti_svname[MTI_NAME_MAXLEN];
3102 char mti_uuid[sizeof(struct obd_uuid)];
3103 __u64 mti_nids[MTI_NIDS_MAX]; /* host nids (lnet_nid_t)*/
3104 char mti_params[MTI_PARAM_MAXLEN];
3106 extern void lustre_swab_mgs_target_info(struct mgs_target_info *oinfo);
3108 struct mgs_nidtbl_entry {
3109 __u64 mne_version; /* table version of this entry */
3110 __u32 mne_instance; /* target instance # */
3111 __u32 mne_index; /* target index */
3112 __u32 mne_length; /* length of this entry - by bytes */
3113 __u8 mne_type; /* target type LDD_F_SV_TYPE_OST/MDT */
3114 __u8 mne_nid_type; /* type of nid(mbz). for ipv6. */
3115 __u8 mne_nid_size; /* size of each NID, by bytes */
3116 __u8 mne_nid_count; /* # of NIDs in buffer */
3118 lnet_nid_t nids[0]; /* variable size buffer for NIDs. */
3121 extern void lustre_swab_mgs_nidtbl_entry(struct mgs_nidtbl_entry *oinfo);
3123 struct mgs_config_body {
3124 char mcb_name[MTI_NAME_MAXLEN]; /* logname */
3125 __u64 mcb_offset; /* next index of config log to request */
3126 __u16 mcb_type; /* type of log: CONFIG_T_[CONFIG|RECOVER] */
3128 __u8 mcb_bits; /* bits unit size of config log */
3129 __u32 mcb_units; /* # of units for bulk transfer */
3131 extern void lustre_swab_mgs_config_body(struct mgs_config_body *body);
3133 struct mgs_config_res {
3134 __u64 mcr_offset; /* index of last config log */
3135 __u64 mcr_size; /* size of the log */
3137 extern void lustre_swab_mgs_config_res(struct mgs_config_res *body);
3139 /* Config marker flags (in config log) */
3140 #define CM_START 0x01
3142 #define CM_SKIP 0x04
3143 #define CM_UPGRADE146 0x08
3144 #define CM_EXCLUDE 0x10
3145 #define CM_START_SKIP (CM_START | CM_SKIP)
3148 __u32 cm_step; /* aka config version */
3150 __u32 cm_vers; /* lustre release version number */
3151 __u32 cm_padding; /* 64 bit align */
3152 __s64 cm_createtime; /*when this record was first created */
3153 __s64 cm_canceltime; /*when this record is no longer valid*/
3154 char cm_tgtname[MTI_NAME_MAXLEN];
3155 char cm_comment[MTI_NAME_MAXLEN];
3158 extern void lustre_swab_cfg_marker(struct cfg_marker *marker,
3159 int swab, int size);
3162 * Opcodes for multiple servers.
3172 #define OBD_FIRST_OPC OBD_PING
3175 * llog contexts indices.
3177 * There is compatibility problem with indexes below, they are not
3178 * continuous and must keep their numbers for compatibility needs.
3179 * See LU-5218 for details.
3182 LLOG_CONFIG_ORIG_CTXT = 0,
3183 LLOG_CONFIG_REPL_CTXT = 1,
3184 LLOG_MDS_OST_ORIG_CTXT = 2,
3185 LLOG_MDS_OST_REPL_CTXT = 3, /* kept just to avoid re-assignment */
3186 LLOG_SIZE_ORIG_CTXT = 4,
3187 LLOG_SIZE_REPL_CTXT = 5,
3188 LLOG_TEST_ORIG_CTXT = 8,
3189 LLOG_TEST_REPL_CTXT = 9, /* kept just to avoid re-assignment */
3190 LLOG_CHANGELOG_ORIG_CTXT = 12, /**< changelog generation on mdd */
3191 LLOG_CHANGELOG_REPL_CTXT = 13, /**< changelog access on clients */
3192 /* for multiple changelog consumers */
3193 LLOG_CHANGELOG_USER_ORIG_CTXT = 14,
3194 LLOG_AGENT_ORIG_CTXT = 15, /**< agent requests generation on cdt */
3198 /** Identifier for a single log object */
3200 struct ost_id lgl_oi;
3202 } __attribute__((packed));
3204 /** Records written to the CATALOGS list */
3205 #define CATLIST "CATALOGS"
3207 struct llog_logid lci_logid;
3211 } __attribute__((packed));
3213 /* Log data record types - there is no specific reason that these need to
3214 * be related to the RPC opcodes, but no reason not to (may be handy later?)
3216 #define LLOG_OP_MAGIC 0x10600000
3217 #define LLOG_OP_MASK 0xfff00000
3220 LLOG_PAD_MAGIC = LLOG_OP_MAGIC | 0x00000,
3221 OST_SZ_REC = LLOG_OP_MAGIC | 0x00f00,
3222 /* OST_RAID1_REC = LLOG_OP_MAGIC | 0x01000, never used */
3223 MDS_UNLINK_REC = LLOG_OP_MAGIC | 0x10000 | (MDS_REINT << 8) |
3224 REINT_UNLINK, /* obsolete after 2.5.0 */
3225 MDS_UNLINK64_REC = LLOG_OP_MAGIC | 0x90000 | (MDS_REINT << 8) |
3227 /* MDS_SETATTR_REC = LLOG_OP_MAGIC | 0x12401, obsolete 1.8.0 */
3228 MDS_SETATTR64_REC = LLOG_OP_MAGIC | 0x90000 | (MDS_REINT << 8) |
3230 OBD_CFG_REC = LLOG_OP_MAGIC | 0x20000,
3231 /* PTL_CFG_REC = LLOG_OP_MAGIC | 0x30000, obsolete 1.4.0 */
3232 LLOG_GEN_REC = LLOG_OP_MAGIC | 0x40000,
3233 /* LLOG_JOIN_REC = LLOG_OP_MAGIC | 0x50000, obsolete 1.8.0 */
3234 CHANGELOG_REC = LLOG_OP_MAGIC | 0x60000,
3235 CHANGELOG_USER_REC = LLOG_OP_MAGIC | 0x70000,
3236 HSM_AGENT_REC = LLOG_OP_MAGIC | 0x80000,
3237 LLOG_HDR_MAGIC = LLOG_OP_MAGIC | 0x45539,
3238 LLOG_LOGID_MAGIC = LLOG_OP_MAGIC | 0x4553b,
3241 #define LLOG_REC_HDR_NEEDS_SWABBING(r) \
3242 (((r)->lrh_type & __swab32(LLOG_OP_MASK)) == __swab32(LLOG_OP_MAGIC))
3244 /** Log record header - stored in little endian order.
3245 * Each record must start with this struct, end with a llog_rec_tail,
3246 * and be a multiple of 256 bits in size.
3248 struct llog_rec_hdr {
3255 struct llog_rec_tail {
3260 /* Where data follow just after header */
3261 #define REC_DATA(ptr) \
3262 ((void *)((char *)ptr + sizeof(struct llog_rec_hdr)))
3264 #define REC_DATA_LEN(rec) \
3265 (rec->lrh_len - sizeof(struct llog_rec_hdr) - \
3266 sizeof(struct llog_rec_tail))
3268 static inline void *rec_tail(struct llog_rec_hdr *rec)
3270 return (void *)((char *)rec + rec->lrh_len -
3271 sizeof(struct llog_rec_tail));
3274 struct llog_logid_rec {
3275 struct llog_rec_hdr lid_hdr;
3276 struct llog_logid lid_id;
3280 struct llog_rec_tail lid_tail;
3281 } __attribute__((packed));
3283 struct llog_unlink_rec {
3284 struct llog_rec_hdr lur_hdr;
3288 struct llog_rec_tail lur_tail;
3289 } __attribute__((packed));
3291 struct llog_unlink64_rec {
3292 struct llog_rec_hdr lur_hdr;
3293 struct lu_fid lur_fid;
3294 __u32 lur_count; /* to destroy the lost precreated */
3298 struct llog_rec_tail lur_tail;
3299 } __attribute__((packed));
3301 struct llog_setattr64_rec {
3302 struct llog_rec_hdr lsr_hdr;
3303 struct ost_id lsr_oi;
3309 struct llog_rec_tail lsr_tail;
3310 } __attribute__((packed));
3312 struct llog_size_change_rec {
3313 struct llog_rec_hdr lsc_hdr;
3314 struct ll_fid lsc_fid;
3319 struct llog_rec_tail lsc_tail;
3320 } __attribute__((packed));
3322 #define CHANGELOG_MAGIC 0xca103000
3324 /** \a changelog_rec_type's that can't be masked */
3325 #define CHANGELOG_MINMASK (1 << CL_MARK)
3326 /** bits covering all \a changelog_rec_type's */
3327 #define CHANGELOG_ALLMASK 0XFFFFFFFF
3328 /** default \a changelog_rec_type mask. Allow all of them, except
3329 * CL_ATIME since it can really be time consuming, and not necessary
3330 * under normal use. */
3331 #define CHANGELOG_DEFMASK (CHANGELOG_ALLMASK & ~(1 << CL_ATIME))
3333 /* changelog llog name, needed by client replicators */
3334 #define CHANGELOG_CATALOG "changelog_catalog"
3336 struct changelog_setinfo {
3339 } __attribute__((packed));
3341 /** changelog record */
3342 struct llog_changelog_rec {
3343 struct llog_rec_hdr cr_hdr;
3344 struct changelog_rec cr; /**< Variable length field */
3345 struct llog_rec_tail cr_do_not_use; /**< for_sizeof_only */
3346 } __attribute__((packed));
3348 #define CHANGELOG_USER_PREFIX "cl"
3350 struct llog_changelog_user_rec {
3351 struct llog_rec_hdr cur_hdr;
3355 struct llog_rec_tail cur_tail;
3356 } __attribute__((packed));
3358 enum agent_req_status {
3366 static inline const char *agent_req_status2name(enum agent_req_status ars)
3384 static inline bool agent_req_in_final_state(enum agent_req_status ars)
3386 return ((ars == ARS_SUCCEED) || (ars == ARS_FAILED) ||
3387 (ars == ARS_CANCELED));
3390 struct llog_agent_req_rec {
3391 struct llog_rec_hdr arr_hdr; /**< record header */
3392 __u32 arr_status; /**< status of the request */
3394 * agent_req_status */
3395 __u32 arr_archive_id; /**< backend archive number */
3396 __u64 arr_flags; /**< req flags */
3397 __u64 arr_compound_id; /**< compound cookie */
3398 __u64 arr_req_create; /**< req. creation time */
3399 __u64 arr_req_change; /**< req. status change time */
3400 struct hsm_action_item arr_hai; /**< req. to the agent */
3401 struct llog_rec_tail arr_tail; /**< record tail for_sizezof_only */
3402 } __attribute__((packed));
3404 /* Old llog gen for compatibility */
3408 } __attribute__((packed));
3410 struct llog_gen_rec {
3411 struct llog_rec_hdr lgr_hdr;
3412 struct llog_gen lgr_gen;
3416 struct llog_rec_tail lgr_tail;
3419 /* On-disk header structure of each log object, stored in little endian order */
3420 #define LLOG_CHUNK_SIZE 8192
3421 #define LLOG_HEADER_SIZE (96)
3422 #define LLOG_BITMAP_BYTES (LLOG_CHUNK_SIZE - LLOG_HEADER_SIZE)
3424 #define LLOG_MIN_REC_SIZE (24) /* round(llog_rec_hdr + llog_rec_tail) */
3426 /* flags for the logs */
3428 LLOG_F_ZAP_WHEN_EMPTY = 0x1,
3429 LLOG_F_IS_CAT = 0x2,
3430 LLOG_F_IS_PLAIN = 0x4,
3431 LLOG_F_EXT_JOBID = 0x8,
3433 LLOG_F_EXT_MASK = LLOG_F_EXT_JOBID,
3436 struct llog_log_hdr {
3437 struct llog_rec_hdr llh_hdr;
3438 __s64 llh_timestamp;
3440 __u32 llh_bitmap_offset;
3444 /* for a catalog the first plain slot is next to it */
3445 struct obd_uuid llh_tgtuuid;
3446 __u32 llh_reserved[LLOG_HEADER_SIZE/sizeof(__u32) - 23];
3447 __u32 llh_bitmap[LLOG_BITMAP_BYTES/sizeof(__u32)];
3448 struct llog_rec_tail llh_tail;
3449 } __attribute__((packed));
3451 #define LLOG_BITMAP_SIZE(llh) (__u32)((llh->llh_hdr.lrh_len - \
3452 llh->llh_bitmap_offset - \
3453 sizeof(llh->llh_tail)) * 8)
3455 /** log cookies are used to reference a specific log file and a record therein */
3456 struct llog_cookie {
3457 struct llog_logid lgc_lgl;
3461 } __attribute__((packed));
3463 /** llog protocol */
3464 enum llogd_rpc_ops {
3465 LLOG_ORIGIN_HANDLE_CREATE = 501,
3466 LLOG_ORIGIN_HANDLE_NEXT_BLOCK = 502,
3467 LLOG_ORIGIN_HANDLE_READ_HEADER = 503,
3468 LLOG_ORIGIN_HANDLE_WRITE_REC = 504,
3469 LLOG_ORIGIN_HANDLE_CLOSE = 505,
3470 LLOG_ORIGIN_CONNECT = 506,
3471 LLOG_CATINFO = 507, /* deprecated */
3472 LLOG_ORIGIN_HANDLE_PREV_BLOCK = 508,
3473 LLOG_ORIGIN_HANDLE_DESTROY = 509, /* for destroy llog object*/
3475 LLOG_FIRST_OPC = LLOG_ORIGIN_HANDLE_CREATE
3479 struct llog_logid lgd_logid;
3481 __u32 lgd_llh_flags;
3483 __u32 lgd_saved_index;
3485 __u64 lgd_cur_offset;
3486 } __attribute__((packed));
3488 struct llogd_conn_body {
3489 struct llog_gen lgdc_gen;
3490 struct llog_logid lgdc_logid;
3491 __u32 lgdc_ctxt_idx;
3492 } __attribute__((packed));
3494 /* Note: 64-bit types are 64-bit aligned in structure */
3496 __u64 o_valid; /* hot fields in this obdo */
3499 __u64 o_size; /* o_size-o_blocks == ost_lvb */
3503 __u64 o_blocks; /* brw: cli sent cached bytes */
3506 /* 32-bit fields start here: keep an even number of them via padding */
3507 __u32 o_blksize; /* optimal IO blocksize */
3508 __u32 o_mode; /* brw: cli sent cache remain */
3512 __u32 o_nlink; /* brw: checksum */
3514 __u32 o_misc; /* brw: o_dropped */
3516 __u64 o_ioepoch; /* epoch in ost writes */
3517 __u32 o_stripe_idx; /* holds stripe idx */
3519 struct lustre_handle o_handle; /* brw: lock handle to prolong
3521 struct llog_cookie o_lcookie; /* destroy: unlink cookie from
3526 __u64 o_data_version; /* getattr: sum of iversion for
3528 * brw: grant space consumed on
3529 * the client for the write */
3535 #define o_dirty o_blocks
3536 #define o_undirty o_mode
3537 #define o_dropped o_misc
3538 #define o_cksum o_nlink
3539 #define o_grant_used o_data_version
3541 struct lfsck_request {
3554 __u16 lr_async_windows;
3556 struct lu_fid lr_fid;
3557 struct lu_fid lr_fid2;
3558 struct lu_fid lr_fid3;
3563 void lustre_swab_lfsck_request(struct lfsck_request *lr);
3565 struct lfsck_reply {
3571 void lustre_swab_lfsck_reply(struct lfsck_reply *lr);
3574 LE_LASTID_REBUILDING = 1,
3575 LE_LASTID_REBUILT = 2,
3581 LE_FID_ACCESSED = 8,
3583 LE_CONDITIONAL_DESTROY = 10,
3584 LE_PAIRS_VERIFY = 11,
3585 LE_SKIP_NLINK_DECLARE = 13,
3587 LE_SET_LMV_MASTER = 15,
3588 LE_SET_LMV_SLAVE = 16,
3591 enum lfsck_event_flags {
3592 LEF_TO_OST = 0x00000001,
3593 LEF_FROM_OST = 0x00000002,
3594 LEF_SET_LMV_HASH = 0x00000004,
3595 LEF_SET_LMV_ALL = 0x00000008,
3596 LEF_RECHECK_NAME_HASH = 0x00000010,
3599 static inline void lustre_set_wire_obdo(const struct obd_connect_data *ocd,
3601 const struct obdo *lobdo)
3604 wobdo->o_flags &= ~OBD_FL_LOCAL_MASK;
3608 if (unlikely(!(ocd->ocd_connect_flags & OBD_CONNECT_FID)) &&
3609 fid_seq_is_echo(ostid_seq(&lobdo->o_oi))) {
3610 /* Currently OBD_FL_OSTID will only be used when 2.4 echo
3611 * client communicate with pre-2.4 server */
3612 wobdo->o_oi.oi.oi_id = fid_oid(&lobdo->o_oi.oi_fid);
3613 wobdo->o_oi.oi.oi_seq = fid_seq(&lobdo->o_oi.oi_fid);
3617 static inline void lustre_get_wire_obdo(const struct obd_connect_data *ocd,
3619 const struct obdo *wobdo)
3621 __u32 local_flags = 0;
3623 if (lobdo->o_valid & OBD_MD_FLFLAGS)
3624 local_flags = lobdo->o_flags & OBD_FL_LOCAL_MASK;
3627 if (local_flags != 0) {
3628 lobdo->o_valid |= OBD_MD_FLFLAGS;
3629 lobdo->o_flags &= ~OBD_FL_LOCAL_MASK;
3630 lobdo->o_flags |= local_flags;
3635 if (unlikely(!(ocd->ocd_connect_flags & OBD_CONNECT_FID)) &&
3636 fid_seq_is_echo(wobdo->o_oi.oi.oi_seq)) {
3638 lobdo->o_oi.oi_fid.f_seq = wobdo->o_oi.oi.oi_seq;
3639 lobdo->o_oi.oi_fid.f_oid = wobdo->o_oi.oi.oi_id;
3640 lobdo->o_oi.oi_fid.f_ver = 0;
3644 extern void lustre_swab_obdo (struct obdo *o);
3646 /* request structure for OST's */
3651 /* Key for FIEMAP to be used in get_info calls */
3652 struct ll_fiemap_info_key {
3655 struct ll_user_fiemap fiemap;
3658 extern void lustre_swab_ost_body (struct ost_body *b);
3659 extern void lustre_swab_ost_last_id(__u64 *id);
3660 extern void lustre_swab_fiemap(struct ll_user_fiemap *fiemap);
3662 extern void lustre_swab_lov_user_md_v1(struct lov_user_md_v1 *lum);
3663 extern void lustre_swab_lov_user_md_v3(struct lov_user_md_v3 *lum);
3664 extern void lustre_swab_lov_user_md_objects(struct lov_user_ost_data *lod,
3666 extern void lustre_swab_lov_mds_md(struct lov_mds_md *lmm);
3667 void lustre_print_user_md(unsigned int level, struct lov_user_md *lum,
3671 extern void lustre_swab_llogd_body (struct llogd_body *d);
3672 extern void lustre_swab_llog_hdr (struct llog_log_hdr *h);
3673 extern void lustre_swab_llogd_conn_body (struct llogd_conn_body *d);
3674 extern void lustre_swab_llog_rec(struct llog_rec_hdr *rec);
3675 extern void lustre_swab_llog_id(struct llog_logid *lid);
3678 extern void lustre_swab_lustre_cfg(struct lustre_cfg *lcfg);
3680 /* Functions for dumping PTLRPC fields */
3681 void dump_rniobuf(struct niobuf_remote *rnb);
3682 void dump_ioo(struct obd_ioobj *nb);
3683 void dump_obdo(struct obdo *oa);
3684 void dump_ost_body(struct ost_body *ob);
3685 void dump_rcs(__u32 *rc);
3687 #define IDX_INFO_MAGIC 0x3D37CC37
3689 /* Index file transfer through the network. The server serializes the index into
3690 * a byte stream which is sent to the client via a bulk transfer */
3694 /* reply: see idx_info_flags below */
3697 /* request & reply: number of lu_idxpage (to be) transferred */
3701 /* request: requested attributes passed down to the iterator API */
3704 /* request & reply: index file identifier (FID) */
3705 struct lu_fid ii_fid;
3707 /* reply: version of the index file before starting to walk the index.
3708 * Please note that the version can be modified at any time during the
3712 /* request: hash to start with:
3713 * reply: hash of the first entry of the first lu_idxpage and hash
3714 * of the entry to read next if any */
3715 __u64 ii_hash_start;
3718 /* reply: size of keys in lu_idxpages, minimal one if II_FL_VARKEY is
3722 /* reply: size of records in lu_idxpages, minimal one if II_FL_VARREC
3730 extern void lustre_swab_idx_info(struct idx_info *ii);
3732 #define II_END_OFF MDS_DIR_END_OFF /* all entries have been read */
3734 /* List of flags used in idx_info::ii_flags */
3735 enum idx_info_flags {
3736 II_FL_NOHASH = 1 << 0, /* client doesn't care about hash value */
3737 II_FL_VARKEY = 1 << 1, /* keys can be of variable size */
3738 II_FL_VARREC = 1 << 2, /* records can be of variable size */
3739 II_FL_NONUNQ = 1 << 3, /* index supports non-unique keys */
3740 II_FL_NOKEY = 1 << 4, /* client doesn't care about key */
3743 #define LIP_MAGIC 0x8A6D6B6C
3745 /* 4KB (= LU_PAGE_SIZE) container gathering key/record pairs */
3747 /* 16-byte header */
3750 __u16 lip_nr; /* number of entries in the container */
3751 __u64 lip_pad0; /* additional padding for future use */
3753 /* key/record pairs are stored in the remaining 4080 bytes.
3754 * depending upon the flags in idx_info::ii_flags, each key/record
3755 * pair might be preceded by:
3757 * - the key size (II_FL_VARKEY is set)
3758 * - the record size (II_FL_VARREC is set)
3760 * For the time being, we only support fixed-size key & record. */
3761 char lip_entries[0];
3763 extern void lustre_swab_lip_header(struct lu_idxpage *lip);
3765 #define LIP_HDR_SIZE (offsetof(struct lu_idxpage, lip_entries))
3767 /* Gather all possible type associated with a 4KB container */
3769 struct lu_dirpage lp_dir; /* for MDS_READPAGE */
3770 struct lu_idxpage lp_idx; /* for OBD_IDX_READ */
3771 char lp_array[LU_PAGE_SIZE];
3774 /* security opcodes */
3777 SEC_CTX_INIT_CONT = 802,
3780 SEC_FIRST_OPC = SEC_CTX_INIT
3784 * capa related definitions
3786 #define CAPA_HMAC_MAX_LEN 64
3787 #define CAPA_HMAC_KEY_MAX_LEN 56
3789 /* NB take care when changing the sequence of elements this struct,
3790 * because the offset info is used in find_capa() */
3791 struct lustre_capa {
3792 struct lu_fid lc_fid; /** fid */
3793 __u64 lc_opc; /** operations allowed */
3794 __u64 lc_uid; /** file owner */
3795 __u64 lc_gid; /** file group */
3796 __u32 lc_flags; /** HMAC algorithm & flags */
3797 __u32 lc_keyid; /** key# used for the capability */
3798 __u32 lc_timeout; /** capa timeout value (sec) */
3799 __u32 lc_expiry; /** expiry time (sec) */
3800 __u8 lc_hmac[CAPA_HMAC_MAX_LEN]; /** HMAC */
3801 } __attribute__((packed));
3803 extern void lustre_swab_lustre_capa(struct lustre_capa *c);
3805 /** lustre_capa::lc_opc */
3807 CAPA_OPC_BODY_WRITE = 1<<0, /**< write object data */
3808 CAPA_OPC_BODY_READ = 1<<1, /**< read object data */
3809 CAPA_OPC_INDEX_LOOKUP = 1<<2, /**< lookup object fid */
3810 CAPA_OPC_INDEX_INSERT = 1<<3, /**< insert object fid */
3811 CAPA_OPC_INDEX_DELETE = 1<<4, /**< delete object fid */
3812 CAPA_OPC_OSS_WRITE = 1<<5, /**< write oss object data */
3813 CAPA_OPC_OSS_READ = 1<<6, /**< read oss object data */
3814 CAPA_OPC_OSS_TRUNC = 1<<7, /**< truncate oss object */
3815 CAPA_OPC_OSS_DESTROY = 1<<8, /**< destroy oss object */
3816 CAPA_OPC_META_WRITE = 1<<9, /**< write object meta data */
3817 CAPA_OPC_META_READ = 1<<10, /**< read object meta data */
3820 #define CAPA_OPC_OSS_RW (CAPA_OPC_OSS_READ | CAPA_OPC_OSS_WRITE)
3821 #define CAPA_OPC_MDS_ONLY \
3822 (CAPA_OPC_BODY_WRITE | CAPA_OPC_BODY_READ | CAPA_OPC_INDEX_LOOKUP | \
3823 CAPA_OPC_INDEX_INSERT | CAPA_OPC_INDEX_DELETE)
3824 #define CAPA_OPC_OSS_ONLY \
3825 (CAPA_OPC_OSS_WRITE | CAPA_OPC_OSS_READ | CAPA_OPC_OSS_TRUNC | \
3826 CAPA_OPC_OSS_DESTROY)
3827 #define CAPA_OPC_MDS_DEFAULT ~CAPA_OPC_OSS_ONLY
3828 #define CAPA_OPC_OSS_DEFAULT ~(CAPA_OPC_MDS_ONLY | CAPA_OPC_OSS_ONLY)
3830 /* MDS capability covers object capability for operations of body r/w
3831 * (dir readpage/sendpage), index lookup/insert/delete and meta data r/w,
3832 * while OSS capability only covers object capability for operations of
3833 * oss data(file content) r/w/truncate.
3835 static inline int capa_for_mds(struct lustre_capa *c)
3837 return (c->lc_opc & CAPA_OPC_INDEX_LOOKUP) != 0;
3840 static inline int capa_for_oss(struct lustre_capa *c)
3842 return (c->lc_opc & CAPA_OPC_INDEX_LOOKUP) == 0;
3845 static inline bool lovea_slot_is_dummy(const struct lov_ost_data_v1 *obj)
3847 /* zero area does not care about the bytes-order. */
3848 if (obj->l_ost_oi.oi.oi_id == 0 && obj->l_ost_oi.oi.oi_seq == 0 &&
3849 obj->l_ost_idx == 0 && obj->l_ost_gen == 0)
3855 /* lustre_capa::lc_hmac_alg */
3857 CAPA_HMAC_ALG_SHA1 = 1, /**< sha1 algorithm */
3861 #define CAPA_FL_MASK 0x00ffffff
3862 #define CAPA_HMAC_ALG_MASK 0xff000000
3864 struct lustre_capa_key {
3865 __u64 lk_seq; /**< mds# */
3866 __u32 lk_keyid; /**< key# */
3868 __u8 lk_key[CAPA_HMAC_KEY_MAX_LEN]; /**< key */
3869 } __attribute__((packed));
3871 extern void lustre_swab_lustre_capa_key(struct lustre_capa_key *k);
3873 /** The link ea holds 1 \a link_ea_entry for each hardlink */
3874 #define LINK_EA_MAGIC 0x11EAF1DFUL
3875 struct link_ea_header {
3878 __u64 leh_len; /* total size */
3884 /** Hardlink data is name and parent fid.
3885 * Stored in this crazy struct for maximum packing and endian-neutrality
3887 struct link_ea_entry {
3888 /** __u16 stored big-endian, unaligned */
3889 unsigned char lee_reclen[2];
3890 unsigned char lee_parent_fid[sizeof(struct lu_fid)];
3892 }__attribute__((packed));
3894 /** fid2path request/reply structure */
3895 struct getinfo_fid2path {
3896 struct lu_fid gf_fid;
3901 } __attribute__((packed));
3903 void lustre_swab_fid2path (struct getinfo_fid2path *gf);
3905 /** path2parent request/reply structures */
3907 struct lu_fid gp_fid; /**< parent FID */
3908 __u32 gp_linkno; /**< hardlink number */
3909 __u32 gp_name_size; /**< size of the name field */
3910 char gp_name[0]; /**< zero-terminated link name */
3911 } __attribute__((packed));
3914 LAYOUT_INTENT_ACCESS = 0,
3915 LAYOUT_INTENT_READ = 1,
3916 LAYOUT_INTENT_WRITE = 2,
3917 LAYOUT_INTENT_GLIMPSE = 3,
3918 LAYOUT_INTENT_TRUNC = 4,
3919 LAYOUT_INTENT_RELEASE = 5,
3920 LAYOUT_INTENT_RESTORE = 6
3923 /* enqueue layout lock with intent */
3924 struct layout_intent {
3925 __u32 li_opc; /* intent operation for enqueue, read, write etc */
3931 void lustre_swab_layout_intent(struct layout_intent *li);
3934 * On the wire version of hsm_progress structure.
3936 * Contains the userspace hsm_progress and some internal fields.
3938 struct hsm_progress_kernel {
3939 /* Field taken from struct hsm_progress */
3942 struct hsm_extent hpk_extent;
3944 __u16 hpk_errval; /* positive val */
3946 /* Additional fields */
3947 __u64 hpk_data_version;
3949 } __attribute__((packed));
3951 extern void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
3952 extern void lustre_swab_hsm_current_action(struct hsm_current_action *action);
3953 extern void lustre_swab_hsm_progress_kernel(struct hsm_progress_kernel *hpk);
3954 extern void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
3955 extern void lustre_swab_hsm_user_item(struct hsm_user_item *hui);
3956 extern void lustre_swab_hsm_request(struct hsm_request *hr);
3959 * OUT_UPDATE RPC Format
3961 * During the cross-ref operation, the Master MDT, which the client send the
3962 * request to, will disassembly the operation into object updates, then OSP
3963 * will send these updates to the remote MDT to be executed.
3965 * An UPDATE_OBJ RPC does a list of updates. Each update belongs to an
3966 * operation and does a type of modification to an object.
3974 * update (ub_count-th)
3976 * ub_count must be less than or equal to UPDATE_PER_RPC_MAX.
3981 * rc [+ buffers] (1st)
3982 * rc [+ buffers] (2st)
3984 * rc [+ buffers] (nr_count-th)
3986 * ur_count must be less than or equal to UPDATE_PER_RPC_MAX and should usually
3987 * be equal to ub_count.
3991 * Type of each update
4002 OUT_INDEX_LOOKUP = 9,
4003 OUT_INDEX_INSERT = 10,
4004 OUT_INDEX_DELETE = 11,
4011 UPDATE_FL_OST = 0x00000001, /* op from OST (not MDT) */
4012 UPDATE_FL_SYNC = 0x00000002, /* commit before replying */
4013 UPDATE_FL_COMMITTED = 0x00000004, /* op committed globally */
4014 UPDATE_FL_NOLOG = 0x00000008 /* for idempotent updates */
4017 struct object_update_param {
4018 __u16 oup_len; /* length of this parameter */
4024 static inline size_t
4025 object_update_param_size(const struct object_update_param *param)
4027 return cfs_size_round(sizeof(*param) + param->oup_len);
4031 struct object_update {
4032 __u16 ou_type; /* enum update_type */
4033 __u16 ou_params_count; /* update parameters count */
4034 __u32 ou_master_index; /* master MDT/OST index */
4035 __u32 ou_flags; /* enum update_flag */
4036 __u32 ou_padding1; /* padding 1 */
4037 __u64 ou_batchid; /* op transno on master */
4038 struct lu_fid ou_fid; /* object to be updated */
4039 struct object_update_param ou_params[0]; /* update params */
4042 #define UPDATE_REQUEST_MAGIC_V1 0xBDDE0001
4043 #define UPDATE_REQUEST_MAGIC_V2 0xBDDE0002
4044 #define UPDATE_REQUEST_MAGIC UPDATE_REQUEST_MAGIC_V2
4045 /* Hold object_updates sending to the remote OUT in single RPC */
4046 struct object_update_request {
4048 __u16 ourq_count; /* number of ourq_updates[] */
4050 struct object_update ourq_updates[0];
4053 void lustre_swab_object_update(struct object_update *ou);
4054 void lustre_swab_object_update_request(struct object_update_request *our);
4056 static inline size_t
4057 object_update_size(const struct object_update *update)
4059 const struct object_update_param *param;
4063 size = offsetof(struct object_update, ou_params[0]);
4064 for (i = 0; i < update->ou_params_count; i++) {
4065 param = (struct object_update_param *)((char *)update + size);
4066 size += object_update_param_size(param);
4072 static inline struct object_update *
4073 object_update_request_get(const struct object_update_request *our,
4074 unsigned int index, size_t *size)
4079 if (index >= our->ourq_count)
4082 ptr = (void *)&our->ourq_updates[0];
4083 for (i = 0; i < index; i++)
4084 ptr += object_update_size(ptr);
4087 *size = object_update_size(ptr);
4093 /* the result of object update */
4094 struct object_update_result {
4101 #define UPDATE_REPLY_MAGIC_V1 0x00BD0001
4102 #define UPDATE_REPLY_MAGIC_V2 0x00BD0002
4103 #define UPDATE_REPLY_MAGIC UPDATE_REPLY_MAGIC_V2
4104 /* Hold object_update_results being replied from the remote OUT. */
4105 struct object_update_reply {
4112 void lustre_swab_object_update_result(struct object_update_result *our);
4113 void lustre_swab_object_update_reply(struct object_update_reply *our);
4115 static inline struct object_update_result *
4116 object_update_result_get(const struct object_update_reply *reply,
4117 unsigned int index, size_t *size)
4119 __u16 count = reply->ourp_count;
4126 ptr = (char *)reply +
4127 cfs_size_round(offsetof(struct object_update_reply,
4129 for (i = 0; i < index; i++) {
4130 if (reply->ourp_lens[i] == 0)
4133 ptr += cfs_size_round(reply->ourp_lens[i]);
4137 *size = reply->ourp_lens[index];
4142 /** layout swap request structure
4143 * fid1 and fid2 are in mdt_body
4145 struct mdc_swap_layouts {
4149 void lustre_swab_swap_layouts(struct mdc_swap_layouts *msl);
4152 struct lustre_handle cd_handle;
4153 struct lu_fid cd_fid;
4154 __u64 cd_data_version;
4155 __u64 cd_reserved[8];
4158 void lustre_swab_close_data(struct close_data *data);