4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2014, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/include/lustre/lustre_idl.h
38 * Lustre wire protocol definitions.
41 /** \defgroup lustreidl lustreidl
43 * Lustre wire protocol definitions.
45 * ALL structs passing over the wire should be declared here. Structs
46 * that are used in interfaces with userspace should go in lustre_user.h.
48 * All structs being declared here should be built from simple fixed-size
49 * types (__u8, __u16, __u32, __u64) or be built from other types or
50 * structs also declared in this file. Similarly, all flags and magic
51 * values in those structs should also be declared here. This ensures
52 * that the Lustre wire protocol is not influenced by external dependencies.
54 * The only other acceptable items in this file are VERY SIMPLE accessor
55 * functions to avoid callers grubbing inside the structures, and the
56 * prototypes of the swabber functions for each struct. Nothing that
57 * depends on external functions or definitions should be in here.
59 * Structs must be properly aligned to put 64-bit values on an 8-byte
60 * boundary. Any structs being added here must also be added to
61 * utils/wirecheck.c and "make newwiretest" run to regenerate the
62 * utils/wiretest.c sources. This allows us to verify that wire structs
63 * have the proper alignment/size on all architectures.
65 * DO NOT CHANGE any of the structs, flags, values declared here and used
66 * in released Lustre versions. Some structs may have padding fields that
67 * can be used. Some structs might allow addition at the end (verify this
68 * in the code to ensure that new/old clients that see this larger struct
69 * do not fail, otherwise you need to implement protocol compatibility).
71 * We assume all nodes are either little-endian or big-endian, and we
72 * always send messages in the sender's native format. The receiver
73 * detects the message format by checking the 'magic' field of the message
74 * (see lustre_msg_swabbed() below).
76 * Each wire type has corresponding 'lustre_swab_xxxtypexxx()' routines,
77 * implemented either here, inline (trivial implementations) or in
78 * ptlrpc/pack_generic.c. These 'swabbers' convert the type from "other"
79 * endian, in-place in the message buffer.
81 * A swabber takes a single pointer argument. The caller must already have
82 * verified that the length of the message buffer >= sizeof (type).
84 * For variable length types, a second 'lustre_swab_v_xxxtypexxx()' routine
85 * may be defined that swabs just the variable part, after the caller has
86 * verified that the message buffer is large enough.
91 #ifndef _LUSTRE_IDL_H_
92 #define _LUSTRE_IDL_H_
94 #include <libcfs/libcfs.h> /* for LPUX64, etc */
95 #include <lnet/types.h>
96 #include <lustre/lustre_user.h> /* Defn's shared with user-space. */
97 #include <lustre/lustre_errno.h>
98 #include <lustre_ver.h>
103 /* FOO_REQUEST_PORTAL is for incoming requests on the FOO
104 * FOO_REPLY_PORTAL is for incoming replies on the FOO
105 * FOO_BULK_PORTAL is for incoming bulk on the FOO
108 #define CONNMGR_REQUEST_PORTAL 1
109 #define CONNMGR_REPLY_PORTAL 2
110 //#define OSC_REQUEST_PORTAL 3
111 #define OSC_REPLY_PORTAL 4
112 //#define OSC_BULK_PORTAL 5
113 #define OST_IO_PORTAL 6
114 #define OST_CREATE_PORTAL 7
115 #define OST_BULK_PORTAL 8
116 //#define MDC_REQUEST_PORTAL 9
117 #define MDC_REPLY_PORTAL 10
118 //#define MDC_BULK_PORTAL 11
119 #define MDS_REQUEST_PORTAL 12
120 //#define MDS_REPLY_PORTAL 13
121 #define MDS_BULK_PORTAL 14
122 #define LDLM_CB_REQUEST_PORTAL 15
123 #define LDLM_CB_REPLY_PORTAL 16
124 #define LDLM_CANCEL_REQUEST_PORTAL 17
125 #define LDLM_CANCEL_REPLY_PORTAL 18
126 //#define PTLBD_REQUEST_PORTAL 19
127 //#define PTLBD_REPLY_PORTAL 20
128 //#define PTLBD_BULK_PORTAL 21
129 #define MDS_SETATTR_PORTAL 22
130 #define MDS_READPAGE_PORTAL 23
131 #define OUT_PORTAL 24
132 #define MGC_REPLY_PORTAL 25
133 #define MGS_REQUEST_PORTAL 26
134 #define MGS_REPLY_PORTAL 27
135 #define OST_REQUEST_PORTAL 28
136 #define FLD_REQUEST_PORTAL 29
137 #define SEQ_METADATA_PORTAL 30
138 #define SEQ_DATA_PORTAL 31
139 #define SEQ_CONTROLLER_PORTAL 32
140 #define MGS_BULK_PORTAL 33
142 /* Portal 63 is reserved for the Cray Inc DVS - nic@cray.com, roe@cray.com, n8851@cray.com */
145 #define PTL_RPC_MSG_REQUEST 4711
146 #define PTL_RPC_MSG_ERR 4712
147 #define PTL_RPC_MSG_REPLY 4713
149 /* DON'T use swabbed values of MAGIC as magic! */
150 #define LUSTRE_MSG_MAGIC_V2 0x0BD00BD3
151 #define LUSTRE_MSG_MAGIC_V2_SWABBED 0xD30BD00B
153 #define LUSTRE_MSG_MAGIC LUSTRE_MSG_MAGIC_V2
155 #define PTLRPC_MSG_VERSION 0x00000003
156 #define LUSTRE_VERSION_MASK 0xffff0000
157 #define LUSTRE_OBD_VERSION 0x00010000
158 #define LUSTRE_MDS_VERSION 0x00020000
159 #define LUSTRE_OST_VERSION 0x00030000
160 #define LUSTRE_DLM_VERSION 0x00040000
161 #define LUSTRE_LOG_VERSION 0x00050000
162 #define LUSTRE_MGS_VERSION 0x00060000
165 * Describes a range of sequence, lsr_start is included but lsr_end is
167 * Same structure is used in fld module where lsr_index field holds mdt id
170 struct lu_seq_range {
177 struct lu_seq_range_array {
180 struct lu_seq_range lsra_lsr[0];
183 #define LU_SEQ_RANGE_MDT 0x0
184 #define LU_SEQ_RANGE_OST 0x1
185 #define LU_SEQ_RANGE_ANY 0x3
187 #define LU_SEQ_RANGE_MASK 0x3
189 /** \defgroup lu_fid lu_fid
193 * Flags for lustre_mdt_attrs::lma_compat and lustre_mdt_attrs::lma_incompat.
194 * Deprecated since HSM and SOM attributes are now stored in separate on-disk
198 LMAC_HSM = 0x00000001,
199 /* LMAC_SOM = 0x00000002, obsolete since 2.8.0 */
200 LMAC_NOT_IN_OI = 0x00000004, /* the object does NOT need OI mapping */
201 LMAC_FID_ON_OST = 0x00000008, /* For OST-object, its OI mapping is
202 * under /O/<seq>/d<x>. */
206 * Masks for all features that should be supported by a Lustre version to
207 * access a specific file.
208 * This information is stored in lustre_mdt_attrs::lma_incompat.
211 LMAI_RELEASED = 0x00000001, /* file is released */
212 LMAI_AGENT = 0x00000002, /* agent inode */
213 LMAI_REMOTE_PARENT = 0x00000004, /* the parent of the object
214 is on the remote MDT */
215 LMAI_STRIPED = 0x00000008, /* striped directory inode */
217 #define LMA_INCOMPAT_SUPP (LMAI_AGENT | LMAI_REMOTE_PARENT | LMAI_STRIPED)
219 extern void lustre_lma_swab(struct lustre_mdt_attrs *lma);
220 extern void lustre_lma_init(struct lustre_mdt_attrs *lma,
221 const struct lu_fid *fid,
222 __u32 compat, __u32 incompat);
224 /* copytool uses a 32b bitmask field to encode archive-Ids during register
226 * archive num = 0 => all
227 * archive num from 1 to 32
229 #define LL_HSM_MAX_ARCHIVE (sizeof(__u32) * 8)
232 * HSM on-disk attributes stored in a separate xattr.
235 /** Bitfield for supported data in this structure. For future use. */
238 /** HSM flags, see hsm_flags enum below */
240 /** backend archive id associated with the file */
242 /** version associated with the last archiving, if any */
245 extern void lustre_hsm_swab(struct hsm_attrs *attrs);
251 /** LASTID file has zero OID */
252 LUSTRE_FID_LASTID_OID = 0UL,
253 /** initial fid id value */
254 LUSTRE_FID_INIT_OID = 1UL
257 /** returns fid object sequence */
258 static inline __u64 fid_seq(const struct lu_fid *fid)
263 /** returns fid object id */
264 static inline __u32 fid_oid(const struct lu_fid *fid)
269 /** returns fid object version */
270 static inline __u32 fid_ver(const struct lu_fid *fid)
275 static inline void fid_zero(struct lu_fid *fid)
277 memset(fid, 0, sizeof(*fid));
280 static inline __u64 fid_ver_oid(const struct lu_fid *fid)
282 return ((__u64)fid_ver(fid) << 32 | fid_oid(fid));
286 * Note that reserved SEQ numbers below 12 will conflict with ldiskfs
287 * inodes in the IGIF namespace, so these reserved SEQ numbers can be
288 * used for other purposes and not risk collisions with existing inodes.
290 * Different FID Format
291 * http://arch.lustre.org/index.php?title=Interoperability_fids_zfs#NEW.0
294 FID_SEQ_OST_MDT0 = 0,
295 FID_SEQ_LLOG = 1, /* unnamed llogs */
297 FID_SEQ_UNUSED_START = 3,
298 FID_SEQ_UNUSED_END = 9,
299 FID_SEQ_LLOG_NAME = 10, /* named llogs */
302 FID_SEQ_IGIF_MAX = 0x0ffffffffULL,
303 FID_SEQ_IDIF = 0x100000000ULL,
304 FID_SEQ_IDIF_MAX = 0x1ffffffffULL,
305 /* Normal FID sequence starts from this value, i.e. 1<<33 */
306 FID_SEQ_START = 0x200000000ULL,
307 /* sequence for local pre-defined FIDs listed in local_oid */
308 FID_SEQ_LOCAL_FILE = 0x200000001ULL,
309 FID_SEQ_DOT_LUSTRE = 0x200000002ULL,
310 /* sequence is used for local named objects FIDs generated
311 * by local_object_storage library */
312 FID_SEQ_LOCAL_NAME = 0x200000003ULL,
313 /* Because current FLD will only cache the fid sequence, instead
314 * of oid on the client side, if the FID needs to be exposed to
315 * clients sides, it needs to make sure all of fids under one
316 * sequence will be located in one MDT. */
317 FID_SEQ_SPECIAL = 0x200000004ULL,
318 FID_SEQ_QUOTA = 0x200000005ULL,
319 FID_SEQ_QUOTA_GLB = 0x200000006ULL,
320 FID_SEQ_ROOT = 0x200000007ULL, /* Located on MDT0 */
321 FID_SEQ_LAYOUT_RBTREE = 0x200000008ULL,
322 /* sequence is used for update logs of cross-MDT operation */
323 FID_SEQ_UPDATE_LOG = 0x200000009ULL,
324 /* Sequence is used for the directory under which update logs
326 FID_SEQ_UPDATE_LOG_DIR = 0x20000000aULL,
327 FID_SEQ_NORMAL = 0x200000400ULL,
328 FID_SEQ_LOV_DEFAULT = 0xffffffffffffffffULL
331 #define OBIF_OID_MAX_BITS 32
332 #define OBIF_MAX_OID (1ULL << OBIF_OID_MAX_BITS)
333 #define OBIF_OID_MASK ((1ULL << OBIF_OID_MAX_BITS) - 1)
334 #define IDIF_OID_MAX_BITS 48
335 #define IDIF_MAX_OID (1ULL << IDIF_OID_MAX_BITS)
336 #define IDIF_OID_MASK ((1ULL << IDIF_OID_MAX_BITS) - 1)
338 /** OID for FID_SEQ_SPECIAL */
340 /* Big Filesystem Lock to serialize rename operations */
341 FID_OID_SPECIAL_BFL = 1UL,
344 /** OID for FID_SEQ_DOT_LUSTRE */
345 enum dot_lustre_oid {
346 FID_OID_DOT_LUSTRE = 1UL,
347 FID_OID_DOT_LUSTRE_OBF = 2UL,
348 FID_OID_DOT_LUSTRE_LPF = 3UL,
351 /** OID for FID_SEQ_ROOT */
354 FID_OID_ECHO_ROOT = 2UL,
357 static inline bool fid_seq_is_mdt0(__u64 seq)
359 return seq == FID_SEQ_OST_MDT0;
362 static inline bool fid_seq_is_mdt(__u64 seq)
364 return seq == FID_SEQ_OST_MDT0 || seq >= FID_SEQ_NORMAL;
367 static inline bool fid_seq_is_echo(__u64 seq)
369 return seq == FID_SEQ_ECHO;
372 static inline bool fid_is_echo(const struct lu_fid *fid)
374 return fid_seq_is_echo(fid_seq(fid));
377 static inline bool fid_seq_is_llog(__u64 seq)
379 return seq == FID_SEQ_LLOG;
382 static inline bool fid_is_llog(const struct lu_fid *fid)
384 /* file with OID == 0 is not llog but contains last oid */
385 return fid_seq_is_llog(fid_seq(fid)) && fid_oid(fid) > 0;
388 static inline bool fid_seq_is_rsvd(__u64 seq)
390 return seq > FID_SEQ_OST_MDT0 && seq <= FID_SEQ_RSVD;
393 static inline bool fid_seq_is_special(__u64 seq)
395 return seq == FID_SEQ_SPECIAL;
398 static inline bool fid_seq_is_local_file(__u64 seq)
400 return seq == FID_SEQ_LOCAL_FILE ||
401 seq == FID_SEQ_LOCAL_NAME;
404 static inline bool fid_seq_is_root(__u64 seq)
406 return seq == FID_SEQ_ROOT;
409 static inline bool fid_seq_is_dot(__u64 seq)
411 return seq == FID_SEQ_DOT_LUSTRE;
414 static inline bool fid_seq_is_default(__u64 seq)
416 return seq == FID_SEQ_LOV_DEFAULT;
419 static inline bool fid_is_mdt0(const struct lu_fid *fid)
421 return fid_seq_is_mdt0(fid_seq(fid));
424 static inline void lu_root_fid(struct lu_fid *fid)
426 fid->f_seq = FID_SEQ_ROOT;
427 fid->f_oid = FID_OID_ROOT;
431 static inline void lu_echo_root_fid(struct lu_fid *fid)
433 fid->f_seq = FID_SEQ_ROOT;
434 fid->f_oid = FID_OID_ECHO_ROOT;
438 static inline void lu_update_log_fid(struct lu_fid *fid, __u32 index)
440 fid->f_seq = FID_SEQ_UPDATE_LOG;
445 static inline void lu_update_log_dir_fid(struct lu_fid *fid, __u32 index)
447 fid->f_seq = FID_SEQ_UPDATE_LOG_DIR;
453 * Check if a fid is igif or not.
454 * \param fid the fid to be tested.
455 * \return true if the fid is an igif; otherwise false.
457 static inline bool fid_seq_is_igif(__u64 seq)
459 return seq >= FID_SEQ_IGIF && seq <= FID_SEQ_IGIF_MAX;
462 static inline bool fid_is_igif(const struct lu_fid *fid)
464 return fid_seq_is_igif(fid_seq(fid));
468 * Check if a fid is idif or not.
469 * \param fid the fid to be tested.
470 * \return true if the fid is an idif; otherwise false.
472 static inline bool fid_seq_is_idif(__u64 seq)
474 return seq >= FID_SEQ_IDIF && seq <= FID_SEQ_IDIF_MAX;
477 static inline bool fid_is_idif(const struct lu_fid *fid)
479 return fid_seq_is_idif(fid_seq(fid));
482 static inline bool fid_is_local_file(const struct lu_fid *fid)
484 return fid_seq_is_local_file(fid_seq(fid));
487 static inline bool fid_seq_is_norm(__u64 seq)
489 return (seq >= FID_SEQ_NORMAL);
492 static inline bool fid_is_norm(const struct lu_fid *fid)
494 return fid_seq_is_norm(fid_seq(fid));
497 static inline int fid_is_layout_rbtree(const struct lu_fid *fid)
499 return fid_seq(fid) == FID_SEQ_LAYOUT_RBTREE;
502 static inline bool fid_seq_is_update_log(__u64 seq)
504 return seq == FID_SEQ_UPDATE_LOG;
507 static inline bool fid_is_update_log(const struct lu_fid *fid)
509 return fid_seq_is_update_log(fid_seq(fid));
512 static inline bool fid_seq_is_update_log_dir(__u64 seq)
514 return seq == FID_SEQ_UPDATE_LOG_DIR;
517 static inline bool fid_is_update_log_dir(const struct lu_fid *fid)
519 return fid_seq_is_update_log_dir(fid_seq(fid));
522 /* convert an OST objid into an IDIF FID SEQ number */
523 static inline __u64 fid_idif_seq(__u64 id, __u32 ost_idx)
525 return FID_SEQ_IDIF | (ost_idx << 16) | ((id >> 32) & 0xffff);
528 /* convert a packed IDIF FID into an OST objid */
529 static inline __u64 fid_idif_id(__u64 seq, __u32 oid, __u32 ver)
531 return ((__u64)ver << 48) | ((seq & 0xffff) << 32) | oid;
534 static inline __u32 idif_ost_idx(__u64 seq)
536 return (seq >> 16) & 0xffff;
539 /* extract ost index from IDIF FID */
540 static inline __u32 fid_idif_ost_idx(const struct lu_fid *fid)
542 return idif_ost_idx(fid_seq(fid));
545 /* extract OST sequence (group) from a wire ost_id (id/seq) pair */
546 static inline __u64 ostid_seq(const struct ost_id *ostid)
548 if (fid_seq_is_mdt0(ostid->oi.oi_seq))
549 return FID_SEQ_OST_MDT0;
551 if (unlikely(fid_seq_is_default(ostid->oi.oi_seq)))
552 return FID_SEQ_LOV_DEFAULT;
554 if (fid_is_idif(&ostid->oi_fid))
555 return FID_SEQ_OST_MDT0;
557 return fid_seq(&ostid->oi_fid);
560 /* extract OST objid from a wire ost_id (id/seq) pair */
561 static inline __u64 ostid_id(const struct ost_id *ostid)
563 if (fid_seq_is_mdt0(ostid->oi.oi_seq))
564 return ostid->oi.oi_id & IDIF_OID_MASK;
566 if (unlikely(fid_seq_is_default(ostid->oi.oi_seq)))
567 return ostid->oi.oi_id;
569 if (fid_is_idif(&ostid->oi_fid))
570 return fid_idif_id(fid_seq(&ostid->oi_fid),
571 fid_oid(&ostid->oi_fid), 0);
573 return fid_oid(&ostid->oi_fid);
576 static inline void ostid_set_seq(struct ost_id *oi, __u64 seq)
578 if (fid_seq_is_mdt0(seq) || fid_seq_is_default(seq)) {
581 oi->oi_fid.f_seq = seq;
582 /* Note: if f_oid + f_ver is zero, we need init it
583 * to be 1, otherwise, ostid_seq will treat this
584 * as old ostid (oi_seq == 0) */
585 if (oi->oi_fid.f_oid == 0 && oi->oi_fid.f_ver == 0)
586 oi->oi_fid.f_oid = LUSTRE_FID_INIT_OID;
590 static inline void ostid_set_seq_mdt0(struct ost_id *oi)
592 ostid_set_seq(oi, FID_SEQ_OST_MDT0);
595 static inline void ostid_set_seq_echo(struct ost_id *oi)
597 ostid_set_seq(oi, FID_SEQ_ECHO);
600 static inline void ostid_set_seq_llog(struct ost_id *oi)
602 ostid_set_seq(oi, FID_SEQ_LLOG);
606 * Note: we need check oi_seq to decide where to set oi_id,
607 * so oi_seq should always be set ahead of oi_id.
609 static inline void ostid_set_id(struct ost_id *oi, __u64 oid)
611 if (fid_seq_is_mdt0(oi->oi.oi_seq)) {
612 if (oid >= IDIF_MAX_OID) {
613 CERROR("Bad %llu to set "DOSTID"\n",
614 (unsigned long long)oid, POSTID(oi));
618 } else if (fid_is_idif(&oi->oi_fid)) {
619 if (oid >= IDIF_MAX_OID) {
620 CERROR("Bad %llu to set "DOSTID"\n",
621 (unsigned long long)oid, POSTID(oi));
624 oi->oi_fid.f_seq = fid_idif_seq(oid,
625 fid_idif_ost_idx(&oi->oi_fid));
626 oi->oi_fid.f_oid = oid;
627 oi->oi_fid.f_ver = oid >> 48;
629 if (oid > OBIF_MAX_OID) {
630 CERROR("Bad %llu to set "DOSTID"\n",
631 (unsigned long long)oid, POSTID(oi));
634 oi->oi_fid.f_oid = oid;
638 static inline int fid_set_id(struct lu_fid *fid, __u64 oid)
640 if (unlikely(fid_seq_is_igif(fid->f_seq))) {
641 CERROR("bad IGIF, "DFID"\n", PFID(fid));
645 if (fid_is_idif(fid)) {
646 if (oid >= IDIF_MAX_OID) {
647 CERROR("Bad %llu to set "DFID"\n",
648 (unsigned long long)oid, PFID(fid));
651 fid->f_seq = fid_idif_seq(oid, fid_idif_ost_idx(fid));
653 fid->f_ver = oid >> 48;
655 if (oid > OBIF_MAX_OID) {
656 CERROR("Bad %llu to set "DFID"\n",
657 (unsigned long long)oid, PFID(fid));
666 * Unpack an OST object id/seq (group) into a FID. This is needed for
667 * converting all obdo, lmm, lsm, etc. 64-bit id/seq pairs into proper
668 * FIDs. Note that if an id/seq is already in FID/IDIF format it will
669 * be passed through unchanged. Only legacy OST objects in "group 0"
670 * will be mapped into the IDIF namespace so that they can fit into the
671 * struct lu_fid fields without loss. For reference see:
672 * http://arch.lustre.org/index.php?title=Interoperability_fids_zfs
674 static inline int ostid_to_fid(struct lu_fid *fid, const struct ost_id *ostid,
677 __u64 seq = ostid_seq(ostid);
679 if (ost_idx > 0xffff) {
680 CERROR("bad ost_idx, "DOSTID" ost_idx:%u\n", POSTID(ostid),
685 if (fid_seq_is_mdt0(seq)) {
686 __u64 oid = ostid_id(ostid);
688 /* This is a "legacy" (old 1.x/2.early) OST object in "group 0"
689 * that we map into the IDIF namespace. It allows up to 2^48
690 * objects per OST, as this is the object namespace that has
691 * been in production for years. This can handle create rates
692 * of 1M objects/s/OST for 9 years, or combinations thereof. */
693 if (oid >= IDIF_MAX_OID) {
694 CERROR("bad MDT0 id(1), "DOSTID" ost_idx:%u\n",
695 POSTID(ostid), ost_idx);
698 fid->f_seq = fid_idif_seq(oid, ost_idx);
699 /* truncate to 32 bits by assignment */
701 /* in theory, not currently used */
702 fid->f_ver = oid >> 48;
703 } else if (likely(!fid_seq_is_default(seq)))
704 /* if (fid_seq_is_idif(seq) || fid_seq_is_norm(seq)) */ {
705 /* This is either an IDIF object, which identifies objects across
706 * all OSTs, or a regular FID. The IDIF namespace maps legacy
707 * OST objects into the FID namespace. In both cases, we just
708 * pass the FID through, no conversion needed. */
709 if (ostid->oi_fid.f_ver != 0) {
710 CERROR("bad MDT0 id(2), "DOSTID" ost_idx:%u\n",
711 POSTID(ostid), ost_idx);
714 *fid = ostid->oi_fid;
720 /* pack any OST FID into an ostid (id/seq) for the wire/disk */
721 static inline int fid_to_ostid(const struct lu_fid *fid, struct ost_id *ostid)
723 if (unlikely(fid_seq_is_igif(fid->f_seq))) {
724 CERROR("bad IGIF, "DFID"\n", PFID(fid));
728 if (fid_is_idif(fid)) {
729 ostid_set_seq_mdt0(ostid);
730 ostid_set_id(ostid, fid_idif_id(fid_seq(fid), fid_oid(fid),
733 ostid->oi_fid = *fid;
739 /* Check whether the fid is for LAST_ID */
740 static inline bool fid_is_last_id(const struct lu_fid *fid)
742 return fid_oid(fid) == 0 && fid_seq(fid) != FID_SEQ_UPDATE_LOG &&
743 fid_seq(fid) != FID_SEQ_UPDATE_LOG_DIR;
747 * Get inode number from an igif.
748 * \param fid an igif to get inode number from.
749 * \return inode number for the igif.
751 static inline ino_t lu_igif_ino(const struct lu_fid *fid)
756 extern void lustre_swab_ost_id(struct ost_id *oid);
759 * Get inode generation from an igif.
760 * \param fid an igif to get inode generation from.
761 * \return inode generation for the igif.
763 static inline __u32 lu_igif_gen(const struct lu_fid *fid)
769 * Build igif from the inode number/generation.
771 static inline void lu_igif_build(struct lu_fid *fid, __u32 ino, __u32 gen)
779 * Fids are transmitted across network (in the sender byte-ordering),
780 * and stored on disk in big-endian order.
782 static inline void fid_cpu_to_le(struct lu_fid *dst, const struct lu_fid *src)
784 dst->f_seq = cpu_to_le64(fid_seq(src));
785 dst->f_oid = cpu_to_le32(fid_oid(src));
786 dst->f_ver = cpu_to_le32(fid_ver(src));
789 static inline void fid_le_to_cpu(struct lu_fid *dst, const struct lu_fid *src)
791 dst->f_seq = le64_to_cpu(fid_seq(src));
792 dst->f_oid = le32_to_cpu(fid_oid(src));
793 dst->f_ver = le32_to_cpu(fid_ver(src));
796 static inline void fid_cpu_to_be(struct lu_fid *dst, const struct lu_fid *src)
798 dst->f_seq = cpu_to_be64(fid_seq(src));
799 dst->f_oid = cpu_to_be32(fid_oid(src));
800 dst->f_ver = cpu_to_be32(fid_ver(src));
803 static inline void fid_be_to_cpu(struct lu_fid *dst, const struct lu_fid *src)
805 dst->f_seq = be64_to_cpu(fid_seq(src));
806 dst->f_oid = be32_to_cpu(fid_oid(src));
807 dst->f_ver = be32_to_cpu(fid_ver(src));
810 static inline bool fid_is_sane(const struct lu_fid *fid)
812 return fid != NULL &&
813 ((fid_seq(fid) >= FID_SEQ_START && fid_ver(fid) == 0) ||
814 fid_is_igif(fid) || fid_is_idif(fid) ||
815 fid_seq_is_rsvd(fid_seq(fid)));
818 extern void lustre_swab_lu_fid(struct lu_fid *fid);
820 static inline bool lu_fid_eq(const struct lu_fid *f0, const struct lu_fid *f1)
822 return memcmp(f0, f1, sizeof *f0) == 0;
825 #define __diff_normalize(val0, val1) \
827 typeof(val0) __val0 = (val0); \
828 typeof(val1) __val1 = (val1); \
830 (__val0 == __val1 ? 0 : __val0 > __val1 ? +1 : -1); \
833 static inline int lu_fid_cmp(const struct lu_fid *f0,
834 const struct lu_fid *f1)
837 __diff_normalize(fid_seq(f0), fid_seq(f1)) ?:
838 __diff_normalize(fid_oid(f0), fid_oid(f1)) ?:
839 __diff_normalize(fid_ver(f0), fid_ver(f1));
842 static inline void ostid_cpu_to_le(const struct ost_id *src_oi,
843 struct ost_id *dst_oi)
845 if (fid_seq_is_mdt0(src_oi->oi.oi_seq)) {
846 dst_oi->oi.oi_id = cpu_to_le64(src_oi->oi.oi_id);
847 dst_oi->oi.oi_seq = cpu_to_le64(src_oi->oi.oi_seq);
849 fid_cpu_to_le(&dst_oi->oi_fid, &src_oi->oi_fid);
853 static inline void ostid_le_to_cpu(const struct ost_id *src_oi,
854 struct ost_id *dst_oi)
856 if (fid_seq_is_mdt0(src_oi->oi.oi_seq)) {
857 dst_oi->oi.oi_id = le64_to_cpu(src_oi->oi.oi_id);
858 dst_oi->oi.oi_seq = le64_to_cpu(src_oi->oi.oi_seq);
860 fid_le_to_cpu(&dst_oi->oi_fid, &src_oi->oi_fid);
864 struct lu_orphan_rec {
865 /* The MDT-object's FID referenced by the orphan OST-object */
866 struct lu_fid lor_fid;
871 struct lu_orphan_ent {
872 /* The orphan OST-object's FID */
873 struct lu_fid loe_key;
874 struct lu_orphan_rec loe_rec;
876 void lustre_swab_orphan_ent(struct lu_orphan_ent *ent);
880 /** \defgroup lu_dir lu_dir
884 * Enumeration of possible directory entry attributes.
886 * Attributes follow directory entry header in the order they appear in this
889 enum lu_dirent_attrs {
892 LUDA_64BITHASH = 0x0004,
894 /* The following attrs are used for MDT internal only,
895 * not visible to client */
897 /* Verify the dirent consistency */
898 LUDA_VERIFY = 0x8000,
899 /* Only check but not repair the dirent inconsistency */
900 LUDA_VERIFY_DRYRUN = 0x4000,
901 /* The dirent has been repaired, or to be repaired (dryrun). */
902 LUDA_REPAIR = 0x2000,
903 /* The system is upgraded, has beed or to be repaired (dryrun). */
904 LUDA_UPGRADE = 0x1000,
905 /* Ignore this record, go to next directly. */
906 LUDA_IGNORE = 0x0800,
907 /* Something in the record is unknown, to be verified in further. */
908 LUDA_UNKNOWN = 0x1000,
911 #define LU_DIRENT_ATTRS_MASK 0xf800
914 * Layout of readdir pages, as transmitted on wire.
917 /** valid if LUDA_FID is set. */
918 struct lu_fid lde_fid;
919 /** a unique entry identifier: a hash or an offset. */
921 /** total record length, including all attributes. */
925 /** optional variable size attributes following this entry.
926 * taken from enum lu_dirent_attrs.
929 /** name is followed by the attributes indicated in ->ldp_attrs, in
930 * their natural order. After the last attribute, padding bytes are
931 * added to make ->lde_reclen a multiple of 8.
937 * Definitions of optional directory entry attributes formats.
939 * Individual attributes do not have their length encoded in a generic way. It
940 * is assumed that consumer of an attribute knows its format. This means that
941 * it is impossible to skip over an unknown attribute, except by skipping over all
942 * remaining attributes (by using ->lde_reclen), which is not too
943 * constraining, because new server versions will append new attributes at
944 * the end of an entry.
948 * Fid directory attribute: a fid of an object referenced by the entry. This
949 * will be almost always requested by the client and supplied by the server.
951 * Aligned to 8 bytes.
953 /* To have compatibility with 1.8, lets have fid in lu_dirent struct. */
958 * Aligned to 2 bytes.
965 __u64 ldp_hash_start;
969 struct lu_dirent ldp_entries[0];
972 enum lu_dirpage_flags {
974 * dirpage contains no entry.
978 * last entry's lde_hash equals ldp_hash_end.
983 static inline struct lu_dirent *lu_dirent_start(struct lu_dirpage *dp)
985 if (le32_to_cpu(dp->ldp_flags) & LDF_EMPTY)
988 return dp->ldp_entries;
991 static inline struct lu_dirent *lu_dirent_next(struct lu_dirent *ent)
993 struct lu_dirent *next;
995 if (le16_to_cpu(ent->lde_reclen) != 0)
996 next = ((void *)ent) + le16_to_cpu(ent->lde_reclen);
1003 static inline size_t lu_dirent_calc_size(size_t namelen, __u16 attr)
1007 if (attr & LUDA_TYPE) {
1008 const size_t align = sizeof(struct luda_type) - 1;
1009 size = (sizeof(struct lu_dirent) + namelen + align) & ~align;
1010 size += sizeof(struct luda_type);
1012 size = sizeof(struct lu_dirent) + namelen;
1014 return (size + 7) & ~7;
1017 #define MDS_DIR_END_OFF 0xfffffffffffffffeULL
1020 * MDS_READPAGE page size
1022 * This is the directory page size packed in MDS_READPAGE RPC.
1023 * It's different than PAGE_CACHE_SIZE because the client needs to
1024 * access the struct lu_dirpage header packed at the beginning of
1025 * the "page" and without this there isn't any way to know find the
1026 * lu_dirpage header is if client and server PAGE_CACHE_SIZE differ.
1028 #define LU_PAGE_SHIFT 12
1029 #define LU_PAGE_SIZE (1UL << LU_PAGE_SHIFT)
1030 #define LU_PAGE_MASK (~(LU_PAGE_SIZE - 1))
1032 #define LU_PAGE_COUNT (1 << (PAGE_CACHE_SHIFT - LU_PAGE_SHIFT))
1036 struct lustre_handle {
1039 #define DEAD_HANDLE_MAGIC 0xdeadbeefcafebabeULL
1041 static inline bool lustre_handle_is_used(const struct lustre_handle *lh)
1043 return lh->cookie != 0;
1046 static inline bool lustre_handle_equal(const struct lustre_handle *lh1,
1047 const struct lustre_handle *lh2)
1049 return lh1->cookie == lh2->cookie;
1052 static inline void lustre_handle_copy(struct lustre_handle *tgt,
1053 const struct lustre_handle *src)
1055 tgt->cookie = src->cookie;
1058 /* flags for lm_flags */
1059 #define MSGHDR_AT_SUPPORT 0x1
1060 #define MSGHDR_CKSUM_INCOMPAT18 0x2
1062 #define lustre_msg lustre_msg_v2
1063 /* we depend on this structure to be 8-byte aligned */
1064 /* this type is only endian-adjusted in lustre_unpack_msg() */
1065 struct lustre_msg_v2 {
1074 __u32 lm_buflens[0];
1077 /* without gss, ptlrpc_body is put at the first buffer. */
1078 #define PTLRPC_NUM_VERSIONS 4
1079 struct ptlrpc_body_v3 {
1080 struct lustre_handle pb_handle;
1085 __u64 pb_last_xid; /* highest replied XID without lower unreplied XID */
1086 __u16 pb_tag; /* virtual slot idx for multiple modifying RPCs */
1089 __u64 pb_last_committed;
1094 __u32 pb_timeout; /* for req, the deadline, for rep, the service est */
1095 __u32 pb_service_time; /* for rep, actual service time */
1098 /* VBR: pre-versions */
1099 __u64 pb_pre_versions[PTLRPC_NUM_VERSIONS];
1100 __u64 pb_mbits; /**< match bits for bulk request */
1101 /* padding for future needs */
1102 __u64 pb_padding64_0;
1103 __u64 pb_padding64_1;
1104 __u64 pb_padding64_2;
1105 char pb_jobid[LUSTRE_JOBID_SIZE];
1107 #define ptlrpc_body ptlrpc_body_v3
1109 struct ptlrpc_body_v2 {
1110 struct lustre_handle pb_handle;
1115 __u64 pb_last_xid; /* highest replied XID without lower unreplied XID */
1116 __u16 pb_tag; /* virtual slot idx for multiple modifying RPCs */
1119 __u64 pb_last_committed;
1124 __u32 pb_timeout; /* for req, the deadline, for rep, the service est */
1125 __u32 pb_service_time; /* for rep, actual service time, also used for
1126 net_latency of req */
1129 /* VBR: pre-versions */
1130 __u64 pb_pre_versions[PTLRPC_NUM_VERSIONS];
1131 __u64 pb_mbits; /**< unused in V2 */
1132 /* padding for future needs */
1133 __u64 pb_padding64_0;
1134 __u64 pb_padding64_1;
1135 __u64 pb_padding64_2;
1138 extern void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb);
1140 /* message body offset for lustre_msg_v2 */
1141 /* ptlrpc body offset in all request/reply messages */
1142 #define MSG_PTLRPC_BODY_OFF 0
1144 /* normal request/reply message record offset */
1145 #define REQ_REC_OFF 1
1146 #define REPLY_REC_OFF 1
1148 /* ldlm request message body offset */
1149 #define DLM_LOCKREQ_OFF 1 /* lockreq offset */
1150 #define DLM_REQ_REC_OFF 2 /* normal dlm request record offset */
1152 /* ldlm intent lock message body offset */
1153 #define DLM_INTENT_IT_OFF 2 /* intent lock it offset */
1154 #define DLM_INTENT_REC_OFF 3 /* intent lock record offset */
1156 /* ldlm reply message body offset */
1157 #define DLM_LOCKREPLY_OFF 1 /* lockrep offset */
1158 #define DLM_REPLY_REC_OFF 2 /* reply record offset */
1160 /** only use in req->rq_{req,rep}_swab_mask */
1161 #define MSG_PTLRPC_HEADER_OFF 31
1163 /* Flags that are operation-specific go in the top 16 bits. */
1164 #define MSG_OP_FLAG_MASK 0xffff0000
1165 #define MSG_OP_FLAG_SHIFT 16
1167 /* Flags that apply to all requests are in the bottom 16 bits */
1168 #define MSG_GEN_FLAG_MASK 0x0000ffff
1169 #define MSG_LAST_REPLAY 0x0001
1170 #define MSG_RESENT 0x0002
1171 #define MSG_REPLAY 0x0004
1172 /* #define MSG_AT_SUPPORT 0x0008
1173 * This was used in early prototypes of adaptive timeouts, and while there
1174 * shouldn't be any users of that code there also isn't a need for using this
1175 * bits. Defer usage until at least 1.10 to avoid potential conflict. */
1176 #define MSG_DELAY_REPLAY 0x0010
1177 #define MSG_VERSION_REPLAY 0x0020
1178 #define MSG_REQ_REPLAY_DONE 0x0040
1179 #define MSG_LOCK_REPLAY_DONE 0x0080
1182 * Flags for all connect opcodes (MDS_CONNECT, OST_CONNECT)
1185 #define MSG_CONNECT_RECOVERING 0x00000001
1186 #define MSG_CONNECT_RECONNECT 0x00000002
1187 #define MSG_CONNECT_REPLAYABLE 0x00000004
1188 //#define MSG_CONNECT_PEER 0x8
1189 #define MSG_CONNECT_LIBCLIENT 0x00000010
1190 #define MSG_CONNECT_INITIAL 0x00000020
1191 #define MSG_CONNECT_ASYNC 0x00000040
1192 #define MSG_CONNECT_NEXT_VER 0x00000080 /* use next version of lustre_msg */
1193 #define MSG_CONNECT_TRANSNO 0x00000100 /* report transno */
1196 #define OBD_CONNECT_RDONLY 0x1ULL /*client has read-only access*/
1197 #define OBD_CONNECT_INDEX 0x2ULL /*connect specific LOV idx */
1198 #define OBD_CONNECT_MDS 0x4ULL /*connect from MDT to OST */
1199 #define OBD_CONNECT_GRANT 0x8ULL /*OSC gets grant at connect */
1200 #define OBD_CONNECT_SRVLOCK 0x10ULL /*server takes locks for cli */
1201 #define OBD_CONNECT_VERSION 0x20ULL /*Lustre versions in ocd */
1202 #define OBD_CONNECT_REQPORTAL 0x40ULL /*Separate non-IO req portal */
1203 #define OBD_CONNECT_ACL 0x80ULL /*access control lists */
1204 #define OBD_CONNECT_XATTR 0x100ULL /*client use extended attr */
1205 #define OBD_CONNECT_CROW 0x200ULL /*MDS+OST create obj on write*/
1206 #define OBD_CONNECT_TRUNCLOCK 0x400ULL /*locks on server for punch */
1207 #define OBD_CONNECT_TRANSNO 0x800ULL /*replay sends init transno */
1208 #define OBD_CONNECT_IBITS 0x1000ULL /*support for inodebits locks*/
1209 #define OBD_CONNECT_JOIN 0x2000ULL /*files can be concatenated.
1210 *We do not support JOIN FILE
1211 *anymore, reserve this flags
1212 *just for preventing such bit
1214 #define OBD_CONNECT_ATTRFID 0x4000ULL /*Server can GetAttr By Fid*/
1215 #define OBD_CONNECT_NODEVOH 0x8000ULL /*No open hndl on specl nodes*/
1216 #define OBD_CONNECT_RMT_CLIENT 0x10000ULL /*Remote client */
1217 #define OBD_CONNECT_RMT_CLIENT_FORCE 0x20000ULL /*Remote client by force */
1218 #define OBD_CONNECT_BRW_SIZE 0x40000ULL /*Max bytes per rpc */
1219 #define OBD_CONNECT_QUOTA64 0x80000ULL /*Not used since 2.4 */
1220 #define OBD_CONNECT_MDS_CAPA 0x100000ULL /*MDS capability */
1221 #define OBD_CONNECT_OSS_CAPA 0x200000ULL /*OSS capability */
1222 #define OBD_CONNECT_CANCELSET 0x400000ULL /*Early batched cancels. */
1223 #define OBD_CONNECT_SOM 0x800000ULL /*Size on MDS */
1224 #define OBD_CONNECT_AT 0x1000000ULL /*client uses AT */
1225 #define OBD_CONNECT_LRU_RESIZE 0x2000000ULL /*LRU resize feature. */
1226 #define OBD_CONNECT_MDS_MDS 0x4000000ULL /*MDS-MDS connection */
1227 #define OBD_CONNECT_REAL 0x8000000ULL /*real connection */
1228 #define OBD_CONNECT_CHANGE_QS 0x10000000ULL /*Not used since 2.4 */
1229 #define OBD_CONNECT_CKSUM 0x20000000ULL /*support several cksum algos*/
1230 #define OBD_CONNECT_FID 0x40000000ULL /*FID is supported by server */
1231 #define OBD_CONNECT_VBR 0x80000000ULL /*version based recovery */
1232 #define OBD_CONNECT_LOV_V3 0x100000000ULL /*client supports LOV v3 EA */
1233 #define OBD_CONNECT_GRANT_SHRINK 0x200000000ULL /* support grant shrink */
1234 #define OBD_CONNECT_SKIP_ORPHAN 0x400000000ULL /* don't reuse orphan objids */
1235 #define OBD_CONNECT_MAX_EASIZE 0x800000000ULL /* preserved for large EA */
1236 #define OBD_CONNECT_FULL20 0x1000000000ULL /* it is 2.0 client */
1237 #define OBD_CONNECT_LAYOUTLOCK 0x2000000000ULL /* client uses layout lock */
1238 #define OBD_CONNECT_64BITHASH 0x4000000000ULL /* client supports 64-bits
1240 #define OBD_CONNECT_MAXBYTES 0x8000000000ULL /* max stripe size */
1241 #define OBD_CONNECT_IMP_RECOV 0x10000000000ULL /* imp recovery support */
1242 #define OBD_CONNECT_JOBSTATS 0x20000000000ULL /* jobid in ptlrpc_body */
1243 #define OBD_CONNECT_UMASK 0x40000000000ULL /* create uses client umask */
1244 #define OBD_CONNECT_EINPROGRESS 0x80000000000ULL /* client handles -EINPROGRESS
1245 * RPC error properly */
1246 #define OBD_CONNECT_GRANT_PARAM 0x100000000000ULL/* extra grant params used for
1247 * finer space reservation */
1248 #define OBD_CONNECT_FLOCK_OWNER 0x200000000000ULL /* for the fixed 1.8
1249 * policy and 2.x server */
1250 #define OBD_CONNECT_LVB_TYPE 0x400000000000ULL /* variable type of LVB */
1251 #define OBD_CONNECT_NANOSEC_TIME 0x800000000000ULL /* nanosecond timestamps */
1252 #define OBD_CONNECT_LIGHTWEIGHT 0x1000000000000ULL/* lightweight connection */
1253 #define OBD_CONNECT_SHORTIO 0x2000000000000ULL/* short io */
1254 #define OBD_CONNECT_PINGLESS 0x4000000000000ULL/* pings not required */
1255 #define OBD_CONNECT_FLOCK_DEAD 0x8000000000000ULL/* improved flock deadlock detection */
1256 #define OBD_CONNECT_DISP_STRIPE 0x10000000000000ULL/* create stripe disposition*/
1257 #define OBD_CONNECT_OPEN_BY_FID 0x20000000000000ULL /* open by fid won't pack
1259 #define OBD_CONNECT_LFSCK 0x40000000000000ULL/* support online LFSCK */
1260 #define OBD_CONNECT_UNLINK_CLOSE 0x100000000000000ULL/* close file in unlink */
1261 #define OBD_CONNECT_MULTIMODRPCS 0x200000000000000ULL /* support multiple modify
1263 #define OBD_CONNECT_DIR_STRIPE 0x400000000000000ULL /* striped DNE dir */
1264 /** bulk matchbits is sent within ptlrpc_body */
1265 #define OBD_CONNECT_BULK_MBITS 0x2000000000000000ULL
1267 * Please DO NOT add flag values here before first ensuring that this same
1268 * flag value is not in use on some other branch. Please clear any such
1269 * changes with senior engineers before starting to use a new flag. Then,
1270 * submit a small patch against EVERY branch that ONLY adds the new flag,
1271 * updates obd_connect_names[] for lprocfs_rd_connect_flags(), adds the
1272 * flag to check_obd_connect_data(), and updates wiretests accordingly, so it
1273 * can be approved and landed easily to reserve the flag for future use. */
1275 /* The MNE_SWAB flag is overloading the MDS_MDS bit only for the MGS
1276 * connection. It is a temporary bug fix for Imperative Recovery interop
1277 * between 2.2 and 2.3 x86/ppc nodes, and can be removed when interop for
1278 * 2.2 clients/servers is no longer needed. LU-1252/LU-1644. */
1279 #define OBD_CONNECT_MNE_SWAB OBD_CONNECT_MDS_MDS
1281 #define OCD_HAS_FLAG(ocd, flg) \
1282 (!!((ocd)->ocd_connect_flags & OBD_CONNECT_##flg))
1285 #ifdef HAVE_LRU_RESIZE_SUPPORT
1286 #define LRU_RESIZE_CONNECT_FLAG OBD_CONNECT_LRU_RESIZE
1288 #define LRU_RESIZE_CONNECT_FLAG 0
1291 #define MDT_CONNECT_SUPPORTED (OBD_CONNECT_RDONLY | OBD_CONNECT_VERSION | \
1292 OBD_CONNECT_ACL | OBD_CONNECT_XATTR | \
1293 OBD_CONNECT_IBITS | \
1294 OBD_CONNECT_NODEVOH | OBD_CONNECT_ATTRFID | \
1295 OBD_CONNECT_CANCELSET | OBD_CONNECT_AT | \
1296 OBD_CONNECT_RMT_CLIENT | \
1297 OBD_CONNECT_RMT_CLIENT_FORCE | \
1298 OBD_CONNECT_BRW_SIZE | OBD_CONNECT_MDS_MDS | \
1299 OBD_CONNECT_FID | LRU_RESIZE_CONNECT_FLAG | \
1300 OBD_CONNECT_VBR | OBD_CONNECT_LOV_V3 | \
1301 OBD_CONNECT_FULL20 | \
1302 OBD_CONNECT_64BITHASH | OBD_CONNECT_JOBSTATS | \
1303 OBD_CONNECT_EINPROGRESS | \
1304 OBD_CONNECT_LIGHTWEIGHT | OBD_CONNECT_UMASK | \
1305 OBD_CONNECT_LVB_TYPE | OBD_CONNECT_LAYOUTLOCK |\
1306 OBD_CONNECT_PINGLESS | OBD_CONNECT_MAX_EASIZE |\
1307 OBD_CONNECT_FLOCK_DEAD | \
1308 OBD_CONNECT_DISP_STRIPE | OBD_CONNECT_LFSCK | \
1309 OBD_CONNECT_OPEN_BY_FID | \
1310 OBD_CONNECT_DIR_STRIPE | \
1311 OBD_CONNECT_BULK_MBITS | \
1312 OBD_CONNECT_MULTIMODRPCS)
1314 #define OST_CONNECT_SUPPORTED (OBD_CONNECT_SRVLOCK | OBD_CONNECT_GRANT | \
1315 OBD_CONNECT_REQPORTAL | OBD_CONNECT_VERSION | \
1316 OBD_CONNECT_TRUNCLOCK | OBD_CONNECT_INDEX | \
1317 OBD_CONNECT_BRW_SIZE | \
1318 OBD_CONNECT_CANCELSET | OBD_CONNECT_AT | \
1319 LRU_RESIZE_CONNECT_FLAG | OBD_CONNECT_CKSUM | \
1320 OBD_CONNECT_RMT_CLIENT | \
1321 OBD_CONNECT_RMT_CLIENT_FORCE | OBD_CONNECT_VBR | \
1322 OBD_CONNECT_MDS | OBD_CONNECT_SKIP_ORPHAN | \
1323 OBD_CONNECT_GRANT_SHRINK | OBD_CONNECT_FULL20 | \
1324 OBD_CONNECT_64BITHASH | OBD_CONNECT_MAXBYTES | \
1325 OBD_CONNECT_MAX_EASIZE | \
1326 OBD_CONNECT_EINPROGRESS | \
1327 OBD_CONNECT_JOBSTATS | \
1328 OBD_CONNECT_LIGHTWEIGHT | OBD_CONNECT_LVB_TYPE|\
1329 OBD_CONNECT_LAYOUTLOCK | OBD_CONNECT_FID | \
1330 OBD_CONNECT_PINGLESS | OBD_CONNECT_LFSCK | \
1331 OBD_CONNECT_BULK_MBITS)
1332 #define ECHO_CONNECT_SUPPORTED (0)
1333 #define MGS_CONNECT_SUPPORTED (OBD_CONNECT_VERSION | OBD_CONNECT_AT | \
1334 OBD_CONNECT_FULL20 | OBD_CONNECT_IMP_RECOV | \
1335 OBD_CONNECT_MNE_SWAB | OBD_CONNECT_PINGLESS |\
1336 OBD_CONNECT_BULK_MBITS)
1338 /* Features required for this version of the client to work with server */
1339 #define CLIENT_CONNECT_MDT_REQD (OBD_CONNECT_IBITS | OBD_CONNECT_FID | \
1342 /* This structure is used for both request and reply.
1344 * If we eventually have separate connect data for different types, which we
1345 * almost certainly will, then perhaps we stick a union in here. */
1346 struct obd_connect_data {
1347 __u64 ocd_connect_flags; /* OBD_CONNECT_* per above */
1348 __u32 ocd_version; /* lustre release version number */
1349 __u32 ocd_grant; /* initial cache grant amount (bytes) */
1350 __u32 ocd_index; /* LOV index to connect to */
1351 __u32 ocd_brw_size; /* Maximum BRW size in bytes */
1352 __u64 ocd_ibits_known; /* inode bits this client understands */
1353 __u8 ocd_blocksize; /* log2 of the backend filesystem blocksize */
1354 __u8 ocd_inodespace; /* log2 of the per-inode space consumption */
1355 __u16 ocd_grant_extent; /* per-extent grant overhead, in 1K blocks */
1356 __u32 ocd_unused; /* also fix lustre_swab_connect */
1357 __u64 ocd_transno; /* first transno from client to be replayed */
1358 __u32 ocd_group; /* MDS group on OST */
1359 __u32 ocd_cksum_types; /* supported checksum algorithms */
1360 __u32 ocd_max_easize; /* How big LOV EA can be on MDS */
1361 __u32 ocd_instance; /* instance # of this target */
1362 __u64 ocd_maxbytes; /* Maximum stripe size in bytes */
1363 /* Fields after ocd_maxbytes are only accessible by the receiver
1364 * if the corresponding flag in ocd_connect_flags is set. Accessing
1365 * any field after ocd_maxbytes on the receiver without a valid flag
1366 * may result in out-of-bound memory access and kernel oops. */
1367 __u16 ocd_maxmodrpcs; /* Maximum modify RPCs in parallel */
1368 __u16 padding0; /* added 2.1.0. also fix lustre_swab_connect */
1369 __u32 padding1; /* added 2.1.0. also fix lustre_swab_connect */
1370 __u64 padding2; /* added 2.1.0. also fix lustre_swab_connect */
1371 __u64 padding3; /* added 2.1.0. also fix lustre_swab_connect */
1372 __u64 padding4; /* added 2.1.0. also fix lustre_swab_connect */
1373 __u64 padding5; /* added 2.1.0. also fix lustre_swab_connect */
1374 __u64 padding6; /* added 2.1.0. also fix lustre_swab_connect */
1375 __u64 padding7; /* added 2.1.0. also fix lustre_swab_connect */
1376 __u64 padding8; /* added 2.1.0. also fix lustre_swab_connect */
1377 __u64 padding9; /* added 2.1.0. also fix lustre_swab_connect */
1378 __u64 paddingA; /* added 2.1.0. also fix lustre_swab_connect */
1379 __u64 paddingB; /* added 2.1.0. also fix lustre_swab_connect */
1380 __u64 paddingC; /* added 2.1.0. also fix lustre_swab_connect */
1381 __u64 paddingD; /* added 2.1.0. also fix lustre_swab_connect */
1382 __u64 paddingE; /* added 2.1.0. also fix lustre_swab_connect */
1383 __u64 paddingF; /* added 2.1.0. also fix lustre_swab_connect */
1386 * Please DO NOT use any fields here before first ensuring that this same
1387 * field is not in use on some other branch. Please clear any such changes
1388 * with senior engineers before starting to use a new field. Then, submit
1389 * a small patch against EVERY branch that ONLY adds the new field along with
1390 * the matching OBD_CONNECT flag, so that can be approved and landed easily to
1391 * reserve the flag for future use. */
1394 extern void lustre_swab_connect(struct obd_connect_data *ocd);
1397 * Supported checksum algorithms. Up to 32 checksum types are supported.
1398 * (32-bit mask stored in obd_connect_data::ocd_cksum_types)
1399 * Please update DECLARE_CKSUM_NAME/OBD_CKSUM_ALL in obd.h when adding a new
1400 * algorithm and also the OBD_FL_CKSUM* flags.
1403 OBD_CKSUM_CRC32 = 0x00000001,
1404 OBD_CKSUM_ADLER = 0x00000002,
1405 OBD_CKSUM_CRC32C= 0x00000004,
1409 * OST requests: OBDO & OBD request records
1414 OST_REPLY = 0, /* reply ? */
1430 OST_QUOTACHECK = 18, /* not used since 2.4 */
1432 OST_QUOTA_ADJUST_QUNIT = 20, /* not used since 2.4 */
1435 #define OST_FIRST_OPC OST_REPLY
1438 OBD_FL_INLINEDATA = 0x00000001,
1439 OBD_FL_OBDMDEXISTS = 0x00000002,
1440 OBD_FL_DELORPHAN = 0x00000004, /* if set in o_flags delete orphans */
1441 OBD_FL_NORPC = 0x00000008, /* set in o_flags do in OSC not OST */
1442 OBD_FL_IDONLY = 0x00000010, /* set in o_flags only adjust obj id*/
1443 OBD_FL_RECREATE_OBJS= 0x00000020, /* recreate missing obj */
1444 OBD_FL_DEBUG_CHECK = 0x00000040, /* echo client/server debug check */
1445 OBD_FL_NO_USRQUOTA = 0x00000100, /* the object's owner is over quota */
1446 OBD_FL_NO_GRPQUOTA = 0x00000200, /* the object's group is over quota */
1447 OBD_FL_CREATE_CROW = 0x00000400, /* object should be create on write */
1448 OBD_FL_SRVLOCK = 0x00000800, /* delegate DLM locking to server */
1449 OBD_FL_CKSUM_CRC32 = 0x00001000, /* CRC32 checksum type */
1450 OBD_FL_CKSUM_ADLER = 0x00002000, /* ADLER checksum type */
1451 OBD_FL_CKSUM_CRC32C = 0x00004000, /* CRC32C checksum type */
1452 OBD_FL_CKSUM_RSVD2 = 0x00008000, /* for future cksum types */
1453 OBD_FL_CKSUM_RSVD3 = 0x00010000, /* for future cksum types */
1454 OBD_FL_SHRINK_GRANT = 0x00020000, /* object shrink the grant */
1455 OBD_FL_MMAP = 0x00040000, /* object is mmapped on the client.
1456 * XXX: obsoleted - reserved for old
1457 * clients prior than 2.2 */
1458 OBD_FL_RECOV_RESEND = 0x00080000, /* recoverable resent */
1459 OBD_FL_NOSPC_BLK = 0x00100000, /* no more block space on OST */
1460 OBD_FL_FLUSH = 0x00200000, /* flush pages on the OST */
1461 OBD_FL_SHORT_IO = 0x00400000, /* short io request */
1463 /* Note that while these checksum values are currently separate bits,
1464 * in 2.x we can actually allow all values from 1-31 if we wanted. */
1465 OBD_FL_CKSUM_ALL = OBD_FL_CKSUM_CRC32 | OBD_FL_CKSUM_ADLER |
1466 OBD_FL_CKSUM_CRC32C,
1468 /* mask for local-only flag, which won't be sent over network */
1469 OBD_FL_LOCAL_MASK = 0xF0000000,
1473 * All LOV EA magics should have the same postfix, if some new version
1474 * Lustre instroduces new LOV EA magic, then when down-grade to an old
1475 * Lustre, even though the old version system does not recognizes such
1476 * new magic, it still can distinguish the corrupted cases by checking
1477 * the magic's postfix.
1479 #define LOV_MAGIC_MAGIC 0x0BD0
1480 #define LOV_MAGIC_MASK 0xFFFF
1482 #define LOV_MAGIC_V1 (0x0BD10000 | LOV_MAGIC_MAGIC)
1483 #define LOV_MAGIC_JOIN_V1 (0x0BD20000 | LOV_MAGIC_MAGIC)
1484 #define LOV_MAGIC_V3 (0x0BD30000 | LOV_MAGIC_MAGIC)
1485 #define LOV_MAGIC_MIGRATE (0x0BD40000 | LOV_MAGIC_MAGIC)
1486 /* reserved for specifying OSTs */
1487 #define LOV_MAGIC_SPECIFIC (0x0BD50000 | LOV_MAGIC_MAGIC)
1488 #define LOV_MAGIC LOV_MAGIC_V1
1491 * magic for fully defined striping
1492 * the idea is that we should have different magics for striping "hints"
1493 * (struct lov_user_md_v[13]) and defined ready-to-use striping (struct
1494 * lov_mds_md_v[13]). at the moment the magics are used in wire protocol,
1495 * we can't just change it w/o long way preparation, but we still need a
1496 * mechanism to allow LOD to differentiate hint versus ready striping.
1497 * so, at the moment we do a trick: MDT knows what to expect from request
1498 * depending on the case (replay uses ready striping, non-replay req uses
1499 * hints), so MDT replaces magic with appropriate one and now LOD can
1500 * easily understand what's inside -bzzz
1502 #define LOV_MAGIC_V1_DEF 0x0CD10BD0
1503 #define LOV_MAGIC_V3_DEF 0x0CD30BD0
1505 #define lov_pattern(pattern) (pattern & ~LOV_PATTERN_F_MASK)
1506 #define lov_pattern_flags(pattern) (pattern & LOV_PATTERN_F_MASK)
1508 #define lov_ost_data lov_ost_data_v1
1509 struct lov_ost_data_v1 { /* per-stripe data structure (little-endian)*/
1510 struct ost_id l_ost_oi; /* OST object ID */
1511 __u32 l_ost_gen; /* generation of this l_ost_idx */
1512 __u32 l_ost_idx; /* OST index in LOV (lov_tgt_desc->tgts) */
1515 #define lov_mds_md lov_mds_md_v1
1516 struct lov_mds_md_v1 { /* LOV EA mds/wire data (little-endian) */
1517 __u32 lmm_magic; /* magic number = LOV_MAGIC_V1 */
1518 __u32 lmm_pattern; /* LOV_PATTERN_RAID0, LOV_PATTERN_RAID1 */
1519 struct ost_id lmm_oi; /* LOV object ID */
1520 __u32 lmm_stripe_size; /* size of stripe in bytes */
1521 /* lmm_stripe_count used to be __u32 */
1522 __u16 lmm_stripe_count; /* num stripes in use for this object */
1523 __u16 lmm_layout_gen; /* layout generation number */
1524 struct lov_ost_data_v1 lmm_objects[0]; /* per-stripe data */
1528 * Sigh, because pre-2.4 uses
1529 * struct lov_mds_md_v1 {
1531 * __u64 lmm_object_id;
1532 * __u64 lmm_object_seq;
1535 * to identify the LOV(MDT) object, and lmm_object_seq will
1536 * be normal_fid, which make it hard to combine these conversion
1537 * to ostid_to FID. so we will do lmm_oi/fid conversion separately
1539 * We can tell the lmm_oi by this way,
1540 * 1.8: lmm_object_id = {inode}, lmm_object_gr = 0
1541 * 2.1: lmm_object_id = {oid < 128k}, lmm_object_seq = FID_SEQ_NORMAL
1542 * 2.4: lmm_oi.f_seq = FID_SEQ_NORMAL, lmm_oi.f_oid = {oid < 128k},
1545 * But currently lmm_oi/lsm_oi does not have any "real" usages,
1546 * except for printing some information, and the user can always
1547 * get the real FID from LMA, besides this multiple case check might
1548 * make swab more complicate. So we will keep using id/seq for lmm_oi.
1551 static inline void fid_to_lmm_oi(const struct lu_fid *fid,
1554 oi->oi.oi_id = fid_oid(fid);
1555 oi->oi.oi_seq = fid_seq(fid);
1558 static inline void lmm_oi_set_seq(struct ost_id *oi, __u64 seq)
1560 oi->oi.oi_seq = seq;
1563 static inline void lmm_oi_set_id(struct ost_id *oi, __u64 oid)
1568 static inline __u64 lmm_oi_id(const struct ost_id *oi)
1570 return oi->oi.oi_id;
1573 static inline __u64 lmm_oi_seq(const struct ost_id *oi)
1575 return oi->oi.oi_seq;
1578 static inline void lmm_oi_le_to_cpu(struct ost_id *dst_oi,
1579 const struct ost_id *src_oi)
1581 dst_oi->oi.oi_id = le64_to_cpu(src_oi->oi.oi_id);
1582 dst_oi->oi.oi_seq = le64_to_cpu(src_oi->oi.oi_seq);
1585 static inline void lmm_oi_cpu_to_le(struct ost_id *dst_oi,
1586 const struct ost_id *src_oi)
1588 dst_oi->oi.oi_id = cpu_to_le64(src_oi->oi.oi_id);
1589 dst_oi->oi.oi_seq = cpu_to_le64(src_oi->oi.oi_seq);
1592 /* extern void lustre_swab_lov_mds_md(struct lov_mds_md *llm); */
1594 #define MAX_MD_SIZE (sizeof(struct lov_mds_md) + 4 * sizeof(struct lov_ost_data))
1595 #define MIN_MD_SIZE (sizeof(struct lov_mds_md) + 1 * sizeof(struct lov_ost_data))
1597 /* This is the default MDT reply size allocated, should the striping be bigger,
1598 * it will be reallocated in mdt_fix_reply.
1599 * 100 stripes is a bit less than 2.5k of data */
1600 #define DEF_REP_MD_SIZE (sizeof(struct lov_mds_md) + \
1601 100 * sizeof(struct lov_ost_data))
1603 #define XATTR_NAME_ACL_ACCESS "system.posix_acl_access"
1604 #define XATTR_NAME_ACL_DEFAULT "system.posix_acl_default"
1605 #define XATTR_USER_PREFIX "user."
1606 #define XATTR_TRUSTED_PREFIX "trusted."
1607 #define XATTR_SECURITY_PREFIX "security."
1609 #define XATTR_NAME_LOV "trusted.lov"
1610 #define XATTR_NAME_LMA "trusted.lma"
1611 #define XATTR_NAME_LMV "trusted.lmv"
1612 #define XATTR_NAME_DEFAULT_LMV "trusted.dmv"
1613 #define XATTR_NAME_LINK "trusted.link"
1614 #define XATTR_NAME_FID "trusted.fid"
1615 #define XATTR_NAME_VERSION "trusted.version"
1616 #define XATTR_NAME_SOM "trusted.som"
1617 #define XATTR_NAME_HSM "trusted.hsm"
1618 #define XATTR_NAME_LFSCK_BITMAP "trusted.lfsck_bitmap"
1619 #define XATTR_NAME_DUMMY "trusted.dummy"
1621 #if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 8, 53, 0)
1622 # define XATTR_NAME_LFSCK_NAMESPACE_OLD "trusted.lfsck_namespace"
1625 #define XATTR_NAME_LFSCK_NAMESPACE "trusted.lfsck_ns"
1626 #define XATTR_NAME_MAX_LEN 32 /* increase this, if there is longer name. */
1628 struct lov_mds_md_v3 { /* LOV EA mds/wire data (little-endian) */
1629 __u32 lmm_magic; /* magic number = LOV_MAGIC_V3 */
1630 __u32 lmm_pattern; /* LOV_PATTERN_RAID0, LOV_PATTERN_RAID1 */
1631 struct ost_id lmm_oi; /* LOV object ID */
1632 __u32 lmm_stripe_size; /* size of stripe in bytes */
1633 /* lmm_stripe_count used to be __u32 */
1634 __u16 lmm_stripe_count; /* num stripes in use for this object */
1635 __u16 lmm_layout_gen; /* layout generation number */
1636 char lmm_pool_name[LOV_MAXPOOLNAME + 1]; /* must be 32bit aligned */
1637 struct lov_ost_data_v1 lmm_objects[0]; /* per-stripe data */
1640 static inline __u32 lov_mds_md_size(__u16 stripes, __u32 lmm_magic)
1642 if (lmm_magic == LOV_MAGIC_V3)
1643 return sizeof(struct lov_mds_md_v3) +
1644 stripes * sizeof(struct lov_ost_data_v1);
1646 return sizeof(struct lov_mds_md_v1) +
1647 stripes * sizeof(struct lov_ost_data_v1);
1651 lov_mds_md_max_stripe_count(size_t buf_size, __u32 lmm_magic)
1653 switch (lmm_magic) {
1654 case LOV_MAGIC_V1: {
1655 struct lov_mds_md_v1 lmm;
1657 if (buf_size < sizeof(lmm))
1660 return (buf_size - sizeof(lmm)) / sizeof(lmm.lmm_objects[0]);
1662 case LOV_MAGIC_V3: {
1663 struct lov_mds_md_v3 lmm;
1665 if (buf_size < sizeof(lmm))
1668 return (buf_size - sizeof(lmm)) / sizeof(lmm.lmm_objects[0]);
1675 #define OBD_MD_FLID (0x00000001ULL) /* object ID */
1676 #define OBD_MD_FLATIME (0x00000002ULL) /* access time */
1677 #define OBD_MD_FLMTIME (0x00000004ULL) /* data modification time */
1678 #define OBD_MD_FLCTIME (0x00000008ULL) /* change time */
1679 #define OBD_MD_FLSIZE (0x00000010ULL) /* size */
1680 #define OBD_MD_FLBLOCKS (0x00000020ULL) /* allocated blocks count */
1681 #define OBD_MD_FLBLKSZ (0x00000040ULL) /* block size */
1682 #define OBD_MD_FLMODE (0x00000080ULL) /* access bits (mode & ~S_IFMT) */
1683 #define OBD_MD_FLTYPE (0x00000100ULL) /* object type (mode & S_IFMT) */
1684 #define OBD_MD_FLUID (0x00000200ULL) /* user ID */
1685 #define OBD_MD_FLGID (0x00000400ULL) /* group ID */
1686 #define OBD_MD_FLFLAGS (0x00000800ULL) /* flags word */
1687 #define OBD_MD_FLNLINK (0x00002000ULL) /* link count */
1688 #define OBD_MD_FLGENER (0x00004000ULL) /* generation number */
1689 /*#define OBD_MD_FLINLINE (0x00008000ULL) inline data. used until 1.6.5 */
1690 #define OBD_MD_FLRDEV (0x00010000ULL) /* device number */
1691 #define OBD_MD_FLEASIZE (0x00020000ULL) /* extended attribute data */
1692 #define OBD_MD_LINKNAME (0x00040000ULL) /* symbolic link target */
1693 #define OBD_MD_FLHANDLE (0x00080000ULL) /* file/lock handle */
1694 #define OBD_MD_FLCKSUM (0x00100000ULL) /* bulk data checksum */
1695 #define OBD_MD_FLQOS (0x00200000ULL) /* quality of service stats */
1696 /*#define OBD_MD_FLOSCOPQ (0x00400000ULL) osc opaque data, never used */
1697 /* OBD_MD_FLCOOKIE (0x00800000ULL) obsolete in 2.8 */
1698 #define OBD_MD_FLGROUP (0x01000000ULL) /* group */
1699 #define OBD_MD_FLFID (0x02000000ULL) /* ->ost write inline fid */
1700 #define OBD_MD_FLEPOCH (0x04000000ULL) /* ->ost write with ioepoch */
1701 /* ->mds if epoch opens or closes */
1702 #define OBD_MD_FLGRANT (0x08000000ULL) /* ost preallocation space grant */
1703 #define OBD_MD_FLDIREA (0x10000000ULL) /* dir's extended attribute data */
1704 #define OBD_MD_FLUSRQUOTA (0x20000000ULL) /* over quota flags sent from ost */
1705 #define OBD_MD_FLGRPQUOTA (0x40000000ULL) /* over quota flags sent from ost */
1706 #define OBD_MD_FLMODEASIZE (0x80000000ULL) /* EA size will be changed */
1708 #define OBD_MD_MDS (0x0000000100000000ULL) /* where an inode lives on */
1709 #define OBD_MD_REINT (0x0000000200000000ULL) /* reintegrate oa */
1710 #define OBD_MD_MEA (0x0000000400000000ULL) /* CMD split EA */
1711 #define OBD_MD_TSTATE (0x0000000800000000ULL) /* transient state field */
1713 #define OBD_MD_FLXATTR (0x0000001000000000ULL) /* xattr */
1714 #define OBD_MD_FLXATTRLS (0x0000002000000000ULL) /* xattr list */
1715 #define OBD_MD_FLXATTRRM (0x0000004000000000ULL) /* xattr remove */
1716 #define OBD_MD_FLACL (0x0000008000000000ULL) /* ACL */
1717 #define OBD_MD_FLRMTPERM (0x0000010000000000ULL) /* remote permission */
1718 #define OBD_MD_FLMDSCAPA (0x0000020000000000ULL) /* MDS capability */
1719 #define OBD_MD_FLOSSCAPA (0x0000040000000000ULL) /* OSS capability */
1720 #define OBD_MD_FLCKSPLIT (0x0000080000000000ULL) /* Check split on server */
1721 #define OBD_MD_FLCROSSREF (0x0000100000000000ULL) /* Cross-ref case */
1722 #define OBD_MD_FLGETATTRLOCK (0x0000200000000000ULL) /* Get IOEpoch attributes
1723 * under lock; for xattr
1724 * requests means the
1725 * client holds the lock */
1726 #define OBD_MD_FLOBJCOUNT (0x0000400000000000ULL) /* for multiple destroy */
1728 #define OBD_MD_FLRMTLSETFACL (0x0001000000000000ULL) /* lfs lsetfacl case */
1729 #define OBD_MD_FLRMTLGETFACL (0x0002000000000000ULL) /* lfs lgetfacl case */
1730 #define OBD_MD_FLRMTRSETFACL (0x0004000000000000ULL) /* lfs rsetfacl case */
1731 #define OBD_MD_FLRMTRGETFACL (0x0008000000000000ULL) /* lfs rgetfacl case */
1733 #define OBD_MD_FLDATAVERSION (0x0010000000000000ULL) /* iversion sum */
1734 #define OBD_MD_CLOSE_INTENT_EXECED (0x0020000000000000ULL) /* close intent
1737 #define OBD_MD_DEFAULT_MEA (0x0040000000000000ULL) /* default MEA */
1739 #define OBD_MD_FLGETATTR (OBD_MD_FLID | OBD_MD_FLATIME | OBD_MD_FLMTIME | \
1740 OBD_MD_FLCTIME | OBD_MD_FLSIZE | OBD_MD_FLBLKSZ | \
1741 OBD_MD_FLMODE | OBD_MD_FLTYPE | OBD_MD_FLUID | \
1742 OBD_MD_FLGID | OBD_MD_FLFLAGS | OBD_MD_FLNLINK | \
1743 OBD_MD_FLGENER | OBD_MD_FLRDEV | OBD_MD_FLGROUP)
1745 #define OBD_MD_FLXATTRALL (OBD_MD_FLXATTR | OBD_MD_FLXATTRLS)
1747 /* don't forget obdo_fid which is way down at the bottom so it can
1748 * come after the definition of llog_cookie */
1752 HSS_CLEARMASK = 0x02,
1753 HSS_ARCHIVE_ID = 0x04,
1756 struct hsm_state_set {
1758 __u32 hss_archive_id;
1760 __u64 hss_clearmask;
1763 extern void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
1764 extern void lustre_swab_hsm_state_set(struct hsm_state_set *hss);
1766 extern void lustre_swab_obd_statfs (struct obd_statfs *os);
1768 /* ost_body.data values for OST_BRW */
1770 #define OBD_BRW_READ 0x01
1771 #define OBD_BRW_WRITE 0x02
1772 #define OBD_BRW_RWMASK (OBD_BRW_READ | OBD_BRW_WRITE)
1773 #define OBD_BRW_SYNC 0x08 /* this page is a part of synchronous
1774 * transfer and is not accounted in
1776 #define OBD_BRW_CHECK 0x10
1777 #define OBD_BRW_FROM_GRANT 0x20 /* the osc manages this under llite */
1778 #define OBD_BRW_GRANTED 0x40 /* the ost manages this */
1779 #define OBD_BRW_NOCACHE 0x80 /* this page is a part of non-cached IO */
1780 #define OBD_BRW_NOQUOTA 0x100
1781 #define OBD_BRW_SRVLOCK 0x200 /* Client holds no lock over this page */
1782 #define OBD_BRW_ASYNC 0x400 /* Server may delay commit to disk */
1783 #define OBD_BRW_MEMALLOC 0x800 /* Client runs in the "kswapd" context */
1784 #define OBD_BRW_OVER_USRQUOTA 0x1000 /* Running out of user quota */
1785 #define OBD_BRW_OVER_GRPQUOTA 0x2000 /* Running out of group quota */
1786 #define OBD_BRW_SOFT_SYNC 0x4000 /* This flag notifies the server
1787 * that the client is running low on
1788 * space for unstable pages; asking
1789 * it to sync quickly */
1791 #define OBD_OBJECT_EOF LUSTRE_EOF
1793 #define OST_MIN_PRECREATE 32
1794 #define OST_MAX_PRECREATE 20000
1797 struct ost_id ioo_oid; /* object ID, if multi-obj BRW */
1798 __u32 ioo_max_brw; /* low 16 bits were o_mode before 2.4,
1799 * now (PTLRPC_BULK_OPS_COUNT - 1) in
1800 * high 16 bits in 2.4 and later */
1801 __u32 ioo_bufcnt; /* number of niobufs for this object */
1804 #define IOOBJ_MAX_BRW_BITS 16
1805 #define IOOBJ_TYPE_MASK ((1U << IOOBJ_MAX_BRW_BITS) - 1)
1806 #define ioobj_max_brw_get(ioo) (((ioo)->ioo_max_brw >> IOOBJ_MAX_BRW_BITS) + 1)
1807 #define ioobj_max_brw_set(ioo, num) \
1808 do { (ioo)->ioo_max_brw = ((num) - 1) << IOOBJ_MAX_BRW_BITS; } while (0)
1810 extern void lustre_swab_obd_ioobj (struct obd_ioobj *ioo);
1812 /* multiple of 8 bytes => can array */
1813 struct niobuf_remote {
1819 void lustre_swab_niobuf_remote(struct niobuf_remote *nbr);
1821 /* lock value block communicated between the filter and llite */
1823 /* OST_LVB_ERR_INIT is needed because the return code in rc is
1824 * negative, i.e. because ((MASK + rc) & MASK) != MASK. */
1825 #define OST_LVB_ERR_INIT 0xffbadbad80000000ULL
1826 #define OST_LVB_ERR_MASK 0xffbadbad00000000ULL
1827 #define OST_LVB_IS_ERR(blocks) \
1828 ((blocks & OST_LVB_ERR_MASK) == OST_LVB_ERR_MASK)
1829 #define OST_LVB_SET_ERR(blocks, rc) \
1830 do { blocks = OST_LVB_ERR_INIT + rc; } while (0)
1831 #define OST_LVB_GET_ERR(blocks) (int)(blocks - OST_LVB_ERR_INIT)
1841 extern void lustre_swab_ost_lvb_v1(struct ost_lvb_v1 *lvb);
1855 extern void lustre_swab_ost_lvb(struct ost_lvb *lvb);
1858 * lquota data structures
1861 #ifndef QUOTABLOCK_BITS
1862 # define QUOTABLOCK_BITS LUSTRE_QUOTABLOCK_BITS
1865 #ifndef QUOTABLOCK_SIZE
1866 # define QUOTABLOCK_SIZE LUSTRE_QUOTABLOCK_SIZE
1870 # define toqb lustre_stoqb
1873 /* The lquota_id structure is an union of all the possible identifier types that
1874 * can be used with quota, this includes:
1877 * - a FID which can be used for per-directory quota in the future */
1879 struct lu_fid qid_fid; /* FID for per-directory quota */
1880 __u64 qid_uid; /* user identifier */
1881 __u64 qid_gid; /* group identifier */
1884 /* quotactl management */
1885 struct obd_quotactl {
1887 __u32 qc_type; /* see Q_* flag below */
1890 struct obd_dqinfo qc_dqinfo;
1891 struct obd_dqblk qc_dqblk;
1894 extern void lustre_swab_obd_quotactl(struct obd_quotactl *q);
1896 #define Q_COPY(out, in, member) (out)->member = (in)->member
1898 #define QCTL_COPY(out, in) \
1900 Q_COPY(out, in, qc_cmd); \
1901 Q_COPY(out, in, qc_type); \
1902 Q_COPY(out, in, qc_id); \
1903 Q_COPY(out, in, qc_stat); \
1904 Q_COPY(out, in, qc_dqinfo); \
1905 Q_COPY(out, in, qc_dqblk); \
1908 /* Body of quota request used for quota acquire/release RPCs between quota
1909 * master (aka QMT) and slaves (ak QSD). */
1911 struct lu_fid qb_fid; /* FID of global index packing the pool ID
1912 * and type (data or metadata) as well as
1913 * the quota type (user or group). */
1914 union lquota_id qb_id; /* uid or gid or directory FID */
1915 __u32 qb_flags; /* see below */
1917 __u64 qb_count; /* acquire/release count (kbytes/inodes) */
1918 __u64 qb_usage; /* current slave usage (kbytes/inodes) */
1919 __u64 qb_slv_ver; /* slave index file version */
1920 struct lustre_handle qb_lockh; /* per-ID lock handle */
1921 struct lustre_handle qb_glb_lockh; /* global lock handle */
1922 __u64 qb_padding1[4];
1925 /* When the quota_body is used in the reply of quota global intent
1926 * lock (IT_QUOTA_CONN) reply, qb_fid contains slave index file FID. */
1927 #define qb_slv_fid qb_fid
1928 /* qb_usage is the current qunit (in kbytes/inodes) when quota_body is used in
1930 #define qb_qunit qb_usage
1932 #define QUOTA_DQACQ_FL_ACQ 0x1 /* acquire quota */
1933 #define QUOTA_DQACQ_FL_PREACQ 0x2 /* pre-acquire */
1934 #define QUOTA_DQACQ_FL_REL 0x4 /* release quota */
1935 #define QUOTA_DQACQ_FL_REPORT 0x8 /* report usage */
1937 extern void lustre_swab_quota_body(struct quota_body *b);
1939 /* Quota types currently supported */
1941 LQUOTA_TYPE_USR = 0x00, /* maps to USRQUOTA */
1942 LQUOTA_TYPE_GRP = 0x01, /* maps to GRPQUOTA */
1946 /* There are 2 different resource types on which a quota limit can be enforced:
1947 * - inodes on the MDTs
1948 * - blocks on the OSTs */
1950 LQUOTA_RES_MD = 0x01, /* skip 0 to avoid null oid in FID */
1951 LQUOTA_RES_DT = 0x02,
1953 LQUOTA_FIRST_RES = LQUOTA_RES_MD
1955 #define LQUOTA_NR_RES (LQUOTA_LAST_RES - LQUOTA_FIRST_RES + 1)
1958 * Space accounting support
1959 * Format of an accounting record, providing disk usage information for a given
1962 struct lquota_acct_rec { /* 16 bytes */
1963 __u64 bspace; /* current space in use */
1964 __u64 ispace; /* current # inodes in use */
1968 * Global quota index support
1969 * Format of a global record, providing global quota settings for a given quota
1972 struct lquota_glb_rec { /* 32 bytes */
1973 __u64 qbr_hardlimit; /* quota hard limit, in #inodes or kbytes */
1974 __u64 qbr_softlimit; /* quota soft limit, in #inodes or kbytes */
1975 __u64 qbr_time; /* grace time, in seconds */
1976 __u64 qbr_granted; /* how much is granted to slaves, in #inodes or
1981 * Slave index support
1982 * Format of a slave record, recording how much space is granted to a given
1985 struct lquota_slv_rec { /* 8 bytes */
1986 __u64 qsr_granted; /* space granted to the slave for the key=ID,
1987 * in #inodes or kbytes */
1990 /* Data structures associated with the quota locks */
1992 /* Glimpse descriptor used for the index & per-ID quota locks */
1993 struct ldlm_gl_lquota_desc {
1994 union lquota_id gl_id; /* quota ID subject to the glimpse */
1995 __u64 gl_flags; /* see LQUOTA_FL* below */
1996 __u64 gl_ver; /* new index version */
1997 __u64 gl_hardlimit; /* new hardlimit or qunit value */
1998 __u64 gl_softlimit; /* new softlimit */
2002 #define gl_qunit gl_hardlimit /* current qunit value used when
2003 * glimpsing per-ID quota locks */
2005 /* quota glimpse flags */
2006 #define LQUOTA_FL_EDQUOT 0x1 /* user/group out of quota space on QMT */
2008 /* LVB used with quota (global and per-ID) locks */
2010 __u64 lvb_flags; /* see LQUOTA_FL* above */
2011 __u64 lvb_id_may_rel; /* space that might be released later */
2012 __u64 lvb_id_rel; /* space released by the slave for this ID */
2013 __u64 lvb_id_qunit; /* current qunit value */
2017 extern void lustre_swab_lquota_lvb(struct lquota_lvb *lvb);
2019 /* LVB used with global quota lock */
2020 #define lvb_glb_ver lvb_id_may_rel /* current version of the global index */
2028 #define QUOTA_FIRST_OPC QUOTA_DQACQ
2037 MDS_GETATTR_NAME = 34,
2042 MDS_DISCONNECT = 39,
2045 MDS_PIN = 42, /* obsolete, never used in a release */
2046 MDS_UNPIN = 43, /* obsolete, never used in a release */
2048 MDS_DONE_WRITING = 45, /* obsolete since 2.8.0 */
2050 MDS_QUOTACHECK = 47, /* not used since 2.4 */
2053 MDS_SETXATTR = 50, /* obsolete, now it's MDS_REINT op */
2055 MDS_IS_SUBDIR = 52, /* obsolete, never used in a release */
2057 MDS_HSM_STATE_GET = 54,
2058 MDS_HSM_STATE_SET = 55,
2059 MDS_HSM_ACTION = 56,
2060 MDS_HSM_PROGRESS = 57,
2061 MDS_HSM_REQUEST = 58,
2062 MDS_HSM_CT_REGISTER = 59,
2063 MDS_HSM_CT_UNREGISTER = 60,
2064 MDS_SWAP_LAYOUTS = 61,
2068 #define MDS_FIRST_OPC MDS_GETATTR
2071 /* opcodes for object update */
2077 #define OUT_UPDATE_FIRST_OPC OUT_UPDATE
2094 } mds_reint_t, mdt_reint_t;
2096 extern void lustre_swab_generic_32s (__u32 *val);
2098 /* the disposition of the intent outlines what was executed */
2099 #define DISP_IT_EXECD 0x00000001
2100 #define DISP_LOOKUP_EXECD 0x00000002
2101 #define DISP_LOOKUP_NEG 0x00000004
2102 #define DISP_LOOKUP_POS 0x00000008
2103 #define DISP_OPEN_CREATE 0x00000010
2104 #define DISP_OPEN_OPEN 0x00000020
2105 #define DISP_ENQ_COMPLETE 0x00400000 /* obsolete and unused */
2106 #define DISP_ENQ_OPEN_REF 0x00800000
2107 #define DISP_ENQ_CREATE_REF 0x01000000
2108 #define DISP_OPEN_LOCK 0x02000000
2109 #define DISP_OPEN_LEASE 0x04000000
2110 #define DISP_OPEN_STRIPE 0x08000000
2111 #define DISP_OPEN_DENY 0x10000000
2113 /* INODE LOCK PARTS */
2114 #define MDS_INODELOCK_LOOKUP 0x000001 /* For namespace, dentry etc, and also
2115 * was used to protect permission (mode,
2116 * owner, group etc) before 2.4. */
2117 #define MDS_INODELOCK_UPDATE 0x000002 /* size, links, timestamps */
2118 #define MDS_INODELOCK_OPEN 0x000004 /* For opened files */
2119 #define MDS_INODELOCK_LAYOUT 0x000008 /* for layout */
2121 /* The PERM bit is added int 2.4, and it is used to protect permission(mode,
2122 * owner, group, acl etc), so to separate the permission from LOOKUP lock.
2123 * Because for remote directories(in DNE), these locks will be granted by
2124 * different MDTs(different ldlm namespace).
2126 * For local directory, MDT will always grant UPDATE_LOCK|PERM_LOCK together.
2127 * For Remote directory, the master MDT, where the remote directory is, will
2128 * grant UPDATE_LOCK|PERM_LOCK, and the remote MDT, where the name entry is,
2129 * will grant LOOKUP_LOCK. */
2130 #define MDS_INODELOCK_PERM 0x000010
2131 #define MDS_INODELOCK_XATTR 0x000020 /* extended attributes */
2133 #define MDS_INODELOCK_MAXSHIFT 5
2134 /* This FULL lock is useful to take on unlink sort of operations */
2135 #define MDS_INODELOCK_FULL ((1<<(MDS_INODELOCK_MAXSHIFT+1))-1)
2137 extern void lustre_swab_ll_fid (struct ll_fid *fid);
2139 /* NOTE: until Lustre 1.8.7/2.1.1 the fid_ver() was packed into name[2],
2140 * but was moved into name[1] along with the OID to avoid consuming the
2141 * name[2,3] fields that need to be used for the quota id (also a FID). */
2143 LUSTRE_RES_ID_SEQ_OFF = 0,
2144 LUSTRE_RES_ID_VER_OID_OFF = 1,
2145 LUSTRE_RES_ID_WAS_VER_OFF = 2, /* see note above */
2146 LUSTRE_RES_ID_QUOTA_SEQ_OFF = 2,
2147 LUSTRE_RES_ID_QUOTA_VER_OID_OFF = 3,
2148 LUSTRE_RES_ID_HSH_OFF = 3
2151 #define MDS_STATUS_CONN 1
2152 #define MDS_STATUS_LOV 2
2154 #define LUSTRE_BFLAG_UNCOMMITTED_WRITES 0x1
2156 /* these should be identical to their EXT4_*_FL counterparts, they are
2157 * redefined here only to avoid dragging in fs/ext4/ext4.h */
2158 #define LUSTRE_SYNC_FL 0x00000008 /* Synchronous updates */
2159 #define LUSTRE_IMMUTABLE_FL 0x00000010 /* Immutable file */
2160 #define LUSTRE_APPEND_FL 0x00000020 /* writes to file may only append */
2161 #define LUSTRE_NOATIME_FL 0x00000080 /* do not update atime */
2162 #define LUSTRE_DIRSYNC_FL 0x00010000 /* dirsync behaviour (dir only) */
2165 /* Convert wire LUSTRE_*_FL to corresponding client local VFS S_* values
2166 * for the client inode i_flags. The LUSTRE_*_FL are the Lustre wire
2167 * protocol equivalents of LDISKFS_*_FL values stored on disk, while
2168 * the S_* flags are kernel-internal values that change between kernel
2169 * versions. These flags are set/cleared via FSFILT_IOC_{GET,SET}_FLAGS.
2170 * See b=16526 for a full history. */
2171 static inline int ll_ext_to_inode_flags(int flags)
2173 return (((flags & LUSTRE_SYNC_FL) ? S_SYNC : 0) |
2174 ((flags & LUSTRE_NOATIME_FL) ? S_NOATIME : 0) |
2175 ((flags & LUSTRE_APPEND_FL) ? S_APPEND : 0) |
2176 #if defined(S_DIRSYNC)
2177 ((flags & LUSTRE_DIRSYNC_FL) ? S_DIRSYNC : 0) |
2179 ((flags & LUSTRE_IMMUTABLE_FL) ? S_IMMUTABLE : 0));
2182 static inline int ll_inode_to_ext_flags(int iflags)
2184 return (((iflags & S_SYNC) ? LUSTRE_SYNC_FL : 0) |
2185 ((iflags & S_NOATIME) ? LUSTRE_NOATIME_FL : 0) |
2186 ((iflags & S_APPEND) ? LUSTRE_APPEND_FL : 0) |
2187 #if defined(S_DIRSYNC)
2188 ((iflags & S_DIRSYNC) ? LUSTRE_DIRSYNC_FL : 0) |
2190 ((iflags & S_IMMUTABLE) ? LUSTRE_IMMUTABLE_FL : 0));
2194 /* 64 possible states */
2195 enum md_transient_state {
2196 MS_RESTORE = (1 << 0), /* restore is running */
2200 struct lu_fid mbo_fid1;
2201 struct lu_fid mbo_fid2;
2202 struct lustre_handle mbo_handle;
2204 __u64 mbo_size; /* Offset, in the case of MDS_READPAGE */
2208 __u64 mbo_blocks; /* XID, in the case of MDS_READPAGE */
2210 __u64 mbo_t_state; /* transient file state defined in
2211 * enum md_transient_state
2212 * was "ino" until 2.4.0 */
2215 __u32 mbo_capability;
2221 __u32 mbo_nlink; /* #bytes to read in the case of MDS_READPAGE */
2222 __u32 mbo_unused2; /* was "generation" until 2.4.0 */
2224 __u32 mbo_eadatasize;
2226 __u32 mbo_max_mdsize;
2227 __u32 mbo_unused3; /* was max_cookiesize until 2.8 */
2228 __u32 mbo_uid_h; /* high 32-bits of uid, for FUID */
2229 __u32 mbo_gid_h; /* high 32-bits of gid, for FUID */
2230 __u32 mbo_padding_5; /* also fix lustre_swab_mdt_body */
2231 __u64 mbo_padding_6;
2232 __u64 mbo_padding_7;
2233 __u64 mbo_padding_8;
2234 __u64 mbo_padding_9;
2235 __u64 mbo_padding_10;
2238 extern void lustre_swab_mdt_body (struct mdt_body *b);
2240 struct mdt_ioepoch {
2241 struct lustre_handle mio_handle;
2242 __u64 mio_unused1; /* was ioepoch */
2243 __u32 mio_unused2; /* was flags */
2247 extern void lustre_swab_mdt_ioepoch (struct mdt_ioepoch *b);
2249 /* permissions for md_perm.mp_perm */
2251 CFS_SETUID_PERM = 0x01,
2252 CFS_SETGID_PERM = 0x02,
2253 CFS_SETGRP_PERM = 0x04,
2254 CFS_RMTACL_PERM = 0x08,
2255 CFS_RMTOWN_PERM = 0x10
2258 /* inode access permission for remote user, the inode info are omitted,
2259 * for client knows them. */
2260 struct mdt_remote_perm {
2267 __u32 rp_access_perm; /* MAY_READ/WRITE/EXEC */
2271 extern void lustre_swab_mdt_remote_perm(struct mdt_remote_perm *p);
2273 struct mdt_rec_setattr {
2283 __u32 sa_padding_1_h;
2284 struct lu_fid sa_fid;
2293 __u32 sa_attr_flags;
2295 __u32 sa_bias; /* some operation flags */
2301 extern void lustre_swab_mdt_rec_setattr (struct mdt_rec_setattr *sa);
2304 * Attribute flags used in mdt_rec_setattr::sa_valid.
2305 * The kernel's #defines for ATTR_* should not be used over the network
2306 * since the client and MDS may run different kernels (see bug 13828)
2307 * Therefore, we should only use MDS_ATTR_* attributes for sa_valid.
2309 #define MDS_ATTR_MODE 0x1ULL /* = 1 */
2310 #define MDS_ATTR_UID 0x2ULL /* = 2 */
2311 #define MDS_ATTR_GID 0x4ULL /* = 4 */
2312 #define MDS_ATTR_SIZE 0x8ULL /* = 8 */
2313 #define MDS_ATTR_ATIME 0x10ULL /* = 16 */
2314 #define MDS_ATTR_MTIME 0x20ULL /* = 32 */
2315 #define MDS_ATTR_CTIME 0x40ULL /* = 64 */
2316 #define MDS_ATTR_ATIME_SET 0x80ULL /* = 128 */
2317 #define MDS_ATTR_MTIME_SET 0x100ULL /* = 256 */
2318 #define MDS_ATTR_FORCE 0x200ULL /* = 512, Not a change, but a change it */
2319 #define MDS_ATTR_ATTR_FLAG 0x400ULL /* = 1024 */
2320 #define MDS_ATTR_KILL_SUID 0x800ULL /* = 2048 */
2321 #define MDS_ATTR_KILL_SGID 0x1000ULL /* = 4096 */
2322 #define MDS_ATTR_CTIME_SET 0x2000ULL /* = 8192 */
2323 #define MDS_ATTR_FROM_OPEN 0x4000ULL /* = 16384, called from open path, ie O_TRUNC */
2324 #define MDS_ATTR_BLOCKS 0x8000ULL /* = 32768 */
2327 #define FMODE_READ 00000001
2328 #define FMODE_WRITE 00000002
2331 #define MDS_FMODE_CLOSED 00000000
2332 #define MDS_FMODE_EXEC 00000004
2333 /* MDS_FMODE_EPOCH 01000000 obsolete since 2.8.0 */
2334 /* MDS_FMODE_TRUNC 02000000 obsolete since 2.8.0 */
2335 /* MDS_FMODE_SOM 04000000 obsolete since 2.8.0 */
2337 #define MDS_OPEN_CREATED 00000010
2338 #define MDS_OPEN_CROSS 00000020
2340 #define MDS_OPEN_CREAT 00000100
2341 #define MDS_OPEN_EXCL 00000200
2342 #define MDS_OPEN_TRUNC 00001000
2343 #define MDS_OPEN_APPEND 00002000
2344 #define MDS_OPEN_SYNC 00010000
2345 #define MDS_OPEN_DIRECTORY 00200000
2347 #define MDS_OPEN_BY_FID 040000000 /* open_by_fid for known object */
2348 #define MDS_OPEN_DELAY_CREATE 0100000000 /* delay initial object create */
2349 #define MDS_OPEN_OWNEROVERRIDE 0200000000 /* NFSD rw-reopen ro file for owner */
2350 #define MDS_OPEN_JOIN_FILE 0400000000 /* open for join file.
2351 * We do not support JOIN FILE
2352 * anymore, reserve this flags
2353 * just for preventing such bit
2356 #define MDS_OPEN_LOCK 04000000000 /* This open requires open lock */
2357 #define MDS_OPEN_HAS_EA 010000000000 /* specify object create pattern */
2358 #define MDS_OPEN_HAS_OBJS 020000000000 /* Just set the EA the obj exist */
2359 #define MDS_OPEN_NORESTORE 0100000000000ULL /* Do not restore file at open */
2360 #define MDS_OPEN_NEWSTRIPE 0200000000000ULL /* New stripe needed (restripe or
2362 #define MDS_OPEN_VOLATILE 0400000000000ULL /* File is volatile = created
2364 #define MDS_OPEN_LEASE 01000000000000ULL /* Open the file and grant lease
2365 * delegation, succeed if it's not
2366 * being opened with conflict mode.
2368 #define MDS_OPEN_RELEASE 02000000000000ULL /* Open the file for HSM release */
2370 /* lustre internal open flags, which should not be set from user space */
2371 #define MDS_OPEN_FL_INTERNAL (MDS_OPEN_HAS_EA | MDS_OPEN_HAS_OBJS | \
2372 MDS_OPEN_OWNEROVERRIDE | MDS_OPEN_LOCK | \
2373 MDS_OPEN_BY_FID | MDS_OPEN_LEASE | \
2377 MDS_CHECK_SPLIT = 1 << 0,
2378 MDS_CROSS_REF = 1 << 1,
2379 MDS_VTX_BYPASS = 1 << 2,
2380 MDS_PERM_BYPASS = 1 << 3,
2381 /* MDS_SOM = 1 << 4, obsolete since 2.8.0 */
2382 MDS_QUOTA_IGNORE = 1 << 5,
2383 /* Was MDS_CLOSE_CLEANUP (1 << 6), No more used */
2384 MDS_KEEP_ORPHAN = 1 << 7,
2385 MDS_RECOV_OPEN = 1 << 8,
2386 MDS_DATA_MODIFIED = 1 << 9,
2387 MDS_CREATE_VOLATILE = 1 << 10,
2388 MDS_OWNEROVERRIDE = 1 << 11,
2389 MDS_HSM_RELEASE = 1 << 12,
2390 MDS_RENAME_MIGRATE = 1 << 13,
2391 MDS_CLOSE_LAYOUT_SWAP = 1 << 14,
2394 /* instance of mdt_reint_rec */
2395 struct mdt_rec_create {
2403 __u32 cr_suppgid1_h;
2405 __u32 cr_suppgid2_h;
2406 struct lu_fid cr_fid1;
2407 struct lu_fid cr_fid2;
2408 struct lustre_handle cr_old_handle; /* handle in case of open replay */
2412 __u64 cr_padding_1; /* rr_blocks */
2415 /* use of helpers set/get_mrc_cr_flags() is needed to access
2416 * 64 bits cr_flags [cr_flags_l, cr_flags_h], this is done to
2417 * extend cr_flags size without breaking 1.8 compat */
2418 __u32 cr_flags_l; /* for use with open, low 32 bits */
2419 __u32 cr_flags_h; /* for use with open, high 32 bits */
2420 __u32 cr_umask; /* umask for create */
2421 __u32 cr_padding_4; /* rr_padding_4 */
2424 static inline void set_mrc_cr_flags(struct mdt_rec_create *mrc, __u64 flags)
2426 mrc->cr_flags_l = (__u32)(flags & 0xFFFFFFFFUll);
2427 mrc->cr_flags_h = (__u32)(flags >> 32);
2430 static inline __u64 get_mrc_cr_flags(struct mdt_rec_create *mrc)
2432 return ((__u64)(mrc->cr_flags_l) | ((__u64)mrc->cr_flags_h << 32));
2435 /* instance of mdt_reint_rec */
2436 struct mdt_rec_link {
2444 __u32 lk_suppgid1_h;
2446 __u32 lk_suppgid2_h;
2447 struct lu_fid lk_fid1;
2448 struct lu_fid lk_fid2;
2450 __u64 lk_padding_1; /* rr_atime */
2451 __u64 lk_padding_2; /* rr_ctime */
2452 __u64 lk_padding_3; /* rr_size */
2453 __u64 lk_padding_4; /* rr_blocks */
2455 __u32 lk_padding_5; /* rr_mode */
2456 __u32 lk_padding_6; /* rr_flags */
2457 __u32 lk_padding_7; /* rr_padding_2 */
2458 __u32 lk_padding_8; /* rr_padding_3 */
2459 __u32 lk_padding_9; /* rr_padding_4 */
2462 /* instance of mdt_reint_rec */
2463 struct mdt_rec_unlink {
2471 __u32 ul_suppgid1_h;
2473 __u32 ul_suppgid2_h;
2474 struct lu_fid ul_fid1;
2475 struct lu_fid ul_fid2;
2477 __u64 ul_padding_2; /* rr_atime */
2478 __u64 ul_padding_3; /* rr_ctime */
2479 __u64 ul_padding_4; /* rr_size */
2480 __u64 ul_padding_5; /* rr_blocks */
2483 __u32 ul_padding_6; /* rr_flags */
2484 __u32 ul_padding_7; /* rr_padding_2 */
2485 __u32 ul_padding_8; /* rr_padding_3 */
2486 __u32 ul_padding_9; /* rr_padding_4 */
2489 /* instance of mdt_reint_rec */
2490 struct mdt_rec_rename {
2498 __u32 rn_suppgid1_h;
2500 __u32 rn_suppgid2_h;
2501 struct lu_fid rn_fid1;
2502 struct lu_fid rn_fid2;
2504 __u64 rn_padding_1; /* rr_atime */
2505 __u64 rn_padding_2; /* rr_ctime */
2506 __u64 rn_padding_3; /* rr_size */
2507 __u64 rn_padding_4; /* rr_blocks */
2508 __u32 rn_bias; /* some operation flags */
2509 __u32 rn_mode; /* cross-ref rename has mode */
2510 __u32 rn_padding_5; /* rr_flags */
2511 __u32 rn_padding_6; /* rr_padding_2 */
2512 __u32 rn_padding_7; /* rr_padding_3 */
2513 __u32 rn_padding_8; /* rr_padding_4 */
2516 /* instance of mdt_reint_rec */
2517 struct mdt_rec_setxattr {
2525 __u32 sx_suppgid1_h;
2527 __u32 sx_suppgid2_h;
2528 struct lu_fid sx_fid;
2529 __u64 sx_padding_1; /* These three are rr_fid2 */
2534 __u64 sx_padding_5; /* rr_ctime */
2535 __u64 sx_padding_6; /* rr_size */
2536 __u64 sx_padding_7; /* rr_blocks */
2539 __u32 sx_padding_8; /* rr_flags */
2540 __u32 sx_padding_9; /* rr_padding_2 */
2541 __u32 sx_padding_10; /* rr_padding_3 */
2542 __u32 sx_padding_11; /* rr_padding_4 */
2546 * mdt_rec_reint is the template for all mdt_reint_xxx structures.
2547 * Do NOT change the size of various members, otherwise the value
2548 * will be broken in lustre_swab_mdt_rec_reint().
2550 * If you add new members in other mdt_reint_xxx structres and need to use the
2551 * rr_padding_x fields, then update lustre_swab_mdt_rec_reint() also.
2553 struct mdt_rec_reint {
2561 __u32 rr_suppgid1_h;
2563 __u32 rr_suppgid2_h;
2564 struct lu_fid rr_fid1;
2565 struct lu_fid rr_fid2;
2576 __u32 rr_padding_4; /* also fix lustre_swab_mdt_rec_reint */
2579 extern void lustre_swab_mdt_rec_reint(struct mdt_rec_reint *rr);
2581 /* lmv structures */
2583 __u32 ld_tgt_count; /* how many MDS's */
2584 __u32 ld_active_tgt_count; /* how many active */
2585 __u32 ld_default_stripe_count; /* how many objects are used */
2586 __u32 ld_pattern; /* default hash pattern */
2587 __u64 ld_default_hash_size;
2588 __u64 ld_padding_1; /* also fix lustre_swab_lmv_desc */
2589 __u32 ld_padding_2; /* also fix lustre_swab_lmv_desc */
2590 __u32 ld_qos_maxage; /* in second */
2591 __u32 ld_padding_3; /* also fix lustre_swab_lmv_desc */
2592 __u32 ld_padding_4; /* also fix lustre_swab_lmv_desc */
2593 struct obd_uuid ld_uuid;
2596 extern void lustre_swab_lmv_desc (struct lmv_desc *ld);
2598 /* LMV layout EA, and it will be stored both in master and slave object */
2599 struct lmv_mds_md_v1 {
2601 __u32 lmv_stripe_count;
2602 __u32 lmv_master_mdt_index; /* On master object, it is master
2603 * MDT index, on slave object, it
2604 * is stripe index of the slave obj */
2605 __u32 lmv_hash_type; /* dir stripe policy, i.e. indicate
2606 * which hash function to be used,
2607 * Note: only lower 16 bits is being
2608 * used for now. Higher 16 bits will
2609 * be used to mark the object status,
2610 * for example migrating or dead. */
2611 __u32 lmv_layout_version; /* Used for directory restriping */
2615 char lmv_pool_name[LOV_MAXPOOLNAME + 1]; /* pool name */
2616 struct lu_fid lmv_stripe_fids[0]; /* FIDs for each stripe */
2619 #define LMV_MAGIC_V1 0x0CD20CD0 /* normal stripe lmv magic */
2620 #define LMV_MAGIC LMV_MAGIC_V1
2622 /* #define LMV_USER_MAGIC 0x0CD30CD0 */
2623 #define LMV_MAGIC_STRIPE 0x0CD40CD0 /* magic for dir sub_stripe */
2625 /* Right now only the lower part(0-16bits) of lmv_hash_type is being used,
2626 * and the higher part will be the flag to indicate the status of object,
2627 * for example the object is being migrated. And the hash function
2628 * might be interpreted differently with different flags. */
2629 #define LMV_HASH_TYPE_MASK 0x0000ffff
2631 #define LMV_HASH_FLAG_MIGRATION 0x80000000
2632 #define LMV_HASH_FLAG_DEAD 0x40000000
2633 #define LMV_HASH_FLAG_BAD_TYPE 0x20000000
2635 /* The striped directory has ever lost its master LMV EA, then LFSCK
2636 * re-generated it. This flag is used to indicate such case. It is an
2638 #define LMV_HASH_FLAG_LOST_LMV 0x10000000
2641 * The FNV-1a hash algorithm is as follows:
2642 * hash = FNV_offset_basis
2643 * for each octet_of_data to be hashed
2644 * hash = hash XOR octet_of_data
2645 * hash = hash × FNV_prime
2647 * http://en.wikipedia.org/wiki/Fowler–Noll–Vo_hash_function#FNV-1a_hash
2649 * http://www.isthe.com/chongo/tech/comp/fnv/index.html#FNV-reference-source
2650 * FNV_prime is 2^40 + 2^8 + 0xb3 = 0x100000001b3ULL
2652 #define LUSTRE_FNV_1A_64_PRIME 0x100000001b3ULL
2653 #define LUSTRE_FNV_1A_64_OFFSET_BIAS 0xcbf29ce484222325ULL
2654 static inline __u64 lustre_hash_fnv_1a_64(const void *buf, size_t size)
2656 __u64 hash = LUSTRE_FNV_1A_64_OFFSET_BIAS;
2657 const unsigned char *p = buf;
2660 for (i = 0; i < size; i++) {
2662 hash *= LUSTRE_FNV_1A_64_PRIME;
2670 struct lmv_mds_md_v1 lmv_md_v1;
2671 struct lmv_user_md lmv_user_md;
2674 extern void lustre_swab_lmv_mds_md(union lmv_mds_md *lmm);
2676 static inline int lmv_mds_md_size(int stripe_count, unsigned int lmm_magic)
2678 switch (lmm_magic) {
2680 struct lmv_mds_md_v1 *lmm1;
2682 return sizeof(*lmm1) + stripe_count *
2683 sizeof(lmm1->lmv_stripe_fids[0]);
2690 static inline int lmv_mds_md_stripe_count_get(const union lmv_mds_md *lmm)
2692 switch (le32_to_cpu(lmm->lmv_magic)) {
2694 return le32_to_cpu(lmm->lmv_md_v1.lmv_stripe_count);
2695 case LMV_USER_MAGIC:
2696 return le32_to_cpu(lmm->lmv_user_md.lum_stripe_count);
2702 static inline int lmv_mds_md_stripe_count_set(union lmv_mds_md *lmm,
2703 unsigned int stripe_count)
2705 switch (le32_to_cpu(lmm->lmv_magic)) {
2707 lmm->lmv_md_v1.lmv_stripe_count = cpu_to_le32(stripe_count);
2709 case LMV_USER_MAGIC:
2710 lmm->lmv_user_md.lum_stripe_count = cpu_to_le32(stripe_count);
2722 FLD_FIRST_OPC = FLD_QUERY
2728 SEQ_FIRST_OPC = SEQ_QUERY
2732 SEQ_ALLOC_SUPER = 0,
2744 LFSCK_NOTIFY = 1101,
2747 LFSCK_FIRST_OPC = LFSCK_NOTIFY
2751 * LOV data structures
2754 #define LOV_MAX_UUID_BUFFER_SIZE 8192
2755 /* The size of the buffer the lov/mdc reserves for the
2756 * array of UUIDs returned by the MDS. With the current
2757 * protocol, this will limit the max number of OSTs per LOV */
2759 #define LOV_DESC_MAGIC 0xB0CCDE5C
2760 #define LOV_DESC_QOS_MAXAGE_DEFAULT 5 /* Seconds */
2761 #define LOV_DESC_STRIPE_SIZE_DEFAULT (1 << LNET_MTU_BITS)
2763 /* LOV settings descriptor (should only contain static info) */
2765 __u32 ld_tgt_count; /* how many OBD's */
2766 __u32 ld_active_tgt_count; /* how many active */
2767 __u32 ld_default_stripe_count; /* how many objects are used */
2768 __u32 ld_pattern; /* default PATTERN_RAID0 */
2769 __u64 ld_default_stripe_size; /* in bytes */
2770 __u64 ld_default_stripe_offset; /* in bytes */
2771 __u32 ld_padding_0; /* unused */
2772 __u32 ld_qos_maxage; /* in second */
2773 __u32 ld_padding_1; /* also fix lustre_swab_lov_desc */
2774 __u32 ld_padding_2; /* also fix lustre_swab_lov_desc */
2775 struct obd_uuid ld_uuid;
2778 #define ld_magic ld_active_tgt_count /* for swabbing from llogs */
2780 extern void lustre_swab_lov_desc (struct lov_desc *ld);
2785 /* opcodes -- MUST be distinct from OST/MDS opcodes */
2790 LDLM_BL_CALLBACK = 104,
2791 LDLM_CP_CALLBACK = 105,
2792 LDLM_GL_CALLBACK = 106,
2793 LDLM_SET_INFO = 107,
2796 #define LDLM_FIRST_OPC LDLM_ENQUEUE
2798 #define RES_NAME_SIZE 4
2799 struct ldlm_res_id {
2800 __u64 name[RES_NAME_SIZE];
2803 #define DLDLMRES "[%#llx:%#llx:%#llx].%#llx"
2804 #define PLDLMRES(res) (unsigned long long)(res)->lr_name.name[0], \
2805 (unsigned long long)(res)->lr_name.name[1], \
2806 (unsigned long long)(res)->lr_name.name[2], \
2807 (unsigned long long)(res)->lr_name.name[3]
2809 extern void lustre_swab_ldlm_res_id (struct ldlm_res_id *id);
2811 static inline bool ldlm_res_eq(const struct ldlm_res_id *res0,
2812 const struct ldlm_res_id *res1)
2814 return memcmp(res0, res1, sizeof(*res0)) == 0;
2818 typedef enum ldlm_mode {
2831 #define LCK_MODE_NUM 8
2833 typedef enum ldlm_type {
2841 #define LDLM_MIN_TYPE LDLM_PLAIN
2843 struct ldlm_extent {
2849 static inline int ldlm_extent_overlap(const struct ldlm_extent *ex1,
2850 const struct ldlm_extent *ex2)
2852 return ex1->start <= ex2->end && ex2->start <= ex1->end;
2855 /* check if @ex1 contains @ex2 */
2856 static inline int ldlm_extent_contain(const struct ldlm_extent *ex1,
2857 const struct ldlm_extent *ex2)
2859 return ex1->start <= ex2->start && ex1->end >= ex2->end;
2862 struct ldlm_inodebits {
2866 struct ldlm_flock_wire {
2874 /* it's important that the fields of the ldlm_extent structure match
2875 * the first fields of the ldlm_flock structure because there is only
2876 * one ldlm_swab routine to process the ldlm_policy_data_t union. if
2877 * this ever changes we will need to swab the union differently based
2878 * on the resource type. */
2880 typedef union ldlm_wire_policy_data {
2881 struct ldlm_extent l_extent;
2882 struct ldlm_flock_wire l_flock;
2883 struct ldlm_inodebits l_inodebits;
2884 } ldlm_wire_policy_data_t;
2886 extern void lustre_swab_ldlm_policy_data(union ldlm_wire_policy_data *d);
2888 union ldlm_gl_desc {
2889 struct ldlm_gl_lquota_desc lquota_desc;
2892 extern void lustre_swab_gl_desc(union ldlm_gl_desc *);
2894 enum ldlm_intent_flags {
2895 IT_OPEN = 0x00000001,
2896 IT_CREAT = 0x00000002,
2897 IT_OPEN_CREAT = 0x00000003,
2898 IT_READDIR = 0x00000004,
2899 IT_GETATTR = 0x00000008,
2900 IT_LOOKUP = 0x00000010,
2901 IT_UNLINK = 0x00000020,
2902 IT_TRUNC = 0x00000040,
2903 IT_GETXATTR = 0x00000080,
2904 IT_EXEC = 0x00000100,
2905 IT_PIN = 0x00000200,
2906 IT_LAYOUT = 0x00000400,
2907 IT_QUOTA_DQACQ = 0x00000800,
2908 IT_QUOTA_CONN = 0x00001000,
2909 IT_SETXATTR = 0x00002000,
2912 struct ldlm_intent {
2916 extern void lustre_swab_ldlm_intent (struct ldlm_intent *i);
2918 struct ldlm_resource_desc {
2919 enum ldlm_type lr_type;
2920 __u32 lr_pad; /* also fix lustre_swab_ldlm_resource_desc */
2921 struct ldlm_res_id lr_name;
2924 extern void lustre_swab_ldlm_resource_desc (struct ldlm_resource_desc *r);
2926 struct ldlm_lock_desc {
2927 struct ldlm_resource_desc l_resource;
2928 enum ldlm_mode l_req_mode;
2929 enum ldlm_mode l_granted_mode;
2930 union ldlm_wire_policy_data l_policy_data;
2933 extern void lustre_swab_ldlm_lock_desc(struct ldlm_lock_desc *l);
2935 #define LDLM_LOCKREQ_HANDLES 2
2936 #define LDLM_ENQUEUE_CANCEL_OFF 1
2938 struct ldlm_request {
2941 struct ldlm_lock_desc lock_desc;
2942 struct lustre_handle lock_handle[LDLM_LOCKREQ_HANDLES];
2945 extern void lustre_swab_ldlm_request (struct ldlm_request *rq);
2947 /* If LDLM_ENQUEUE, 1 slot is already occupied, 1 is available.
2948 * Otherwise, 2 are available. */
2949 #define ldlm_request_bufsize(count,type) \
2951 int _avail = LDLM_LOCKREQ_HANDLES; \
2952 _avail -= (type == LDLM_ENQUEUE ? LDLM_ENQUEUE_CANCEL_OFF : 0); \
2953 sizeof(struct ldlm_request) + \
2954 (count > _avail ? count - _avail : 0) * \
2955 sizeof(struct lustre_handle); \
2960 __u32 lock_padding; /* also fix lustre_swab_ldlm_reply */
2961 struct ldlm_lock_desc lock_desc;
2962 struct lustre_handle lock_handle;
2963 __u64 lock_policy_res1;
2964 __u64 lock_policy_res2;
2967 extern void lustre_swab_ldlm_reply (struct ldlm_reply *r);
2969 #define ldlm_flags_to_wire(flags) ((__u32)(flags))
2970 #define ldlm_flags_from_wire(flags) ((__u64)(flags))
2973 * Opcodes for mountconf (mgs and mgc)
2978 MGS_EXCEPTION, /* node died, etc. */
2979 MGS_TARGET_REG, /* whenever target starts up */
2985 #define MGS_FIRST_OPC MGS_CONNECT
2987 #define MGS_PARAM_MAXLEN 1024
2988 #define KEY_SET_INFO "set_info"
2990 struct mgs_send_param {
2991 char mgs_param[MGS_PARAM_MAXLEN];
2994 /* We pass this info to the MGS so it can write config logs */
2995 #define MTI_NAME_MAXLEN 64
2996 #define MTI_PARAM_MAXLEN 4096
2997 #define MTI_NIDS_MAX 32
2998 struct mgs_target_info {
2999 __u32 mti_lustre_ver;
3000 __u32 mti_stripe_index;
3001 __u32 mti_config_ver;
3003 __u32 mti_nid_count;
3004 __u32 mti_instance; /* Running instance of target */
3005 char mti_fsname[MTI_NAME_MAXLEN];
3006 char mti_svname[MTI_NAME_MAXLEN];
3007 char mti_uuid[sizeof(struct obd_uuid)];
3008 __u64 mti_nids[MTI_NIDS_MAX]; /* host nids (lnet_nid_t)*/
3009 char mti_params[MTI_PARAM_MAXLEN];
3011 extern void lustre_swab_mgs_target_info(struct mgs_target_info *oinfo);
3013 struct mgs_nidtbl_entry {
3014 __u64 mne_version; /* table version of this entry */
3015 __u32 mne_instance; /* target instance # */
3016 __u32 mne_index; /* target index */
3017 __u32 mne_length; /* length of this entry - by bytes */
3018 __u8 mne_type; /* target type LDD_F_SV_TYPE_OST/MDT */
3019 __u8 mne_nid_type; /* type of nid(mbz). for ipv6. */
3020 __u8 mne_nid_size; /* size of each NID, by bytes */
3021 __u8 mne_nid_count; /* # of NIDs in buffer */
3023 lnet_nid_t nids[0]; /* variable size buffer for NIDs. */
3026 extern void lustre_swab_mgs_nidtbl_entry(struct mgs_nidtbl_entry *oinfo);
3028 struct mgs_config_body {
3029 char mcb_name[MTI_NAME_MAXLEN]; /* logname */
3030 __u64 mcb_offset; /* next index of config log to request */
3031 __u16 mcb_type; /* type of log: CONFIG_T_[CONFIG|RECOVER] */
3033 __u8 mcb_bits; /* bits unit size of config log */
3034 __u32 mcb_units; /* # of units for bulk transfer */
3036 extern void lustre_swab_mgs_config_body(struct mgs_config_body *body);
3038 struct mgs_config_res {
3039 __u64 mcr_offset; /* index of last config log */
3040 __u64 mcr_size; /* size of the log */
3042 extern void lustre_swab_mgs_config_res(struct mgs_config_res *body);
3044 /* Config marker flags (in config log) */
3045 #define CM_START 0x01
3047 #define CM_SKIP 0x04
3048 #define CM_UPGRADE146 0x08
3049 #define CM_EXCLUDE 0x10
3050 #define CM_START_SKIP (CM_START | CM_SKIP)
3053 __u32 cm_step; /* aka config version */
3055 __u32 cm_vers; /* lustre release version number */
3056 __u32 cm_padding; /* 64 bit align */
3057 __s64 cm_createtime; /*when this record was first created */
3058 __s64 cm_canceltime; /*when this record is no longer valid*/
3059 char cm_tgtname[MTI_NAME_MAXLEN];
3060 char cm_comment[MTI_NAME_MAXLEN];
3063 extern void lustre_swab_cfg_marker(struct cfg_marker *marker,
3064 int swab, int size);
3067 * Opcodes for multiple servers.
3073 OBD_QC_CALLBACK, /* not used since 2.4 */
3077 #define OBD_FIRST_OPC OBD_PING
3080 * llog contexts indices.
3082 * There is compatibility problem with indexes below, they are not
3083 * continuous and must keep their numbers for compatibility needs.
3084 * See LU-5218 for details.
3087 LLOG_CONFIG_ORIG_CTXT = 0,
3088 LLOG_CONFIG_REPL_CTXT = 1,
3089 LLOG_MDS_OST_ORIG_CTXT = 2,
3090 LLOG_MDS_OST_REPL_CTXT = 3, /* kept just to avoid re-assignment */
3091 LLOG_SIZE_ORIG_CTXT = 4,
3092 LLOG_SIZE_REPL_CTXT = 5,
3093 LLOG_TEST_ORIG_CTXT = 8,
3094 LLOG_TEST_REPL_CTXT = 9, /* kept just to avoid re-assignment */
3095 LLOG_CHANGELOG_ORIG_CTXT = 12, /**< changelog generation on mdd */
3096 LLOG_CHANGELOG_REPL_CTXT = 13, /**< changelog access on clients */
3097 /* for multiple changelog consumers */
3098 LLOG_CHANGELOG_USER_ORIG_CTXT = 14,
3099 LLOG_AGENT_ORIG_CTXT = 15, /**< agent requests generation on cdt */
3100 LLOG_UPDATELOG_ORIG_CTXT = 16, /* update log */
3101 LLOG_UPDATELOG_REPL_CTXT = 17, /* update log */
3105 /** Identifier for a single log object */
3107 struct ost_id lgl_oi;
3109 } __attribute__((packed));
3111 /** Records written to the CATALOGS list */
3112 #define CATLIST "CATALOGS"
3114 struct llog_logid lci_logid;
3118 } __attribute__((packed));
3120 /* Log data record types - there is no specific reason that these need to
3121 * be related to the RPC opcodes, but no reason not to (may be handy later?)
3123 #define LLOG_OP_MAGIC 0x10600000
3124 #define LLOG_OP_MASK 0xfff00000
3127 LLOG_PAD_MAGIC = LLOG_OP_MAGIC | 0x00000,
3128 OST_SZ_REC = LLOG_OP_MAGIC | 0x00f00,
3129 /* OST_RAID1_REC = LLOG_OP_MAGIC | 0x01000, never used */
3130 MDS_UNLINK_REC = LLOG_OP_MAGIC | 0x10000 | (MDS_REINT << 8) |
3131 REINT_UNLINK, /* obsolete after 2.5.0 */
3132 MDS_UNLINK64_REC = LLOG_OP_MAGIC | 0x90000 | (MDS_REINT << 8) |
3134 /* MDS_SETATTR_REC = LLOG_OP_MAGIC | 0x12401, obsolete 1.8.0 */
3135 MDS_SETATTR64_REC = LLOG_OP_MAGIC | 0x90000 | (MDS_REINT << 8) |
3137 OBD_CFG_REC = LLOG_OP_MAGIC | 0x20000,
3138 /* PTL_CFG_REC = LLOG_OP_MAGIC | 0x30000, obsolete 1.4.0 */
3139 LLOG_GEN_REC = LLOG_OP_MAGIC | 0x40000,
3140 /* LLOG_JOIN_REC = LLOG_OP_MAGIC | 0x50000, obsolete 1.8.0 */
3141 CHANGELOG_REC = LLOG_OP_MAGIC | 0x60000,
3142 CHANGELOG_USER_REC = LLOG_OP_MAGIC | 0x70000,
3143 HSM_AGENT_REC = LLOG_OP_MAGIC | 0x80000,
3144 UPDATE_REC = LLOG_OP_MAGIC | 0xa0000,
3145 LLOG_HDR_MAGIC = LLOG_OP_MAGIC | 0x45539,
3146 LLOG_LOGID_MAGIC = LLOG_OP_MAGIC | 0x4553b,
3149 #define LLOG_REC_HDR_NEEDS_SWABBING(r) \
3150 (((r)->lrh_type & __swab32(LLOG_OP_MASK)) == __swab32(LLOG_OP_MAGIC))
3152 /** Log record header - stored in little endian order.
3153 * Each record must start with this struct, end with a llog_rec_tail,
3154 * and be a multiple of 256 bits in size.
3156 struct llog_rec_hdr {
3163 struct llog_rec_tail {
3168 /* Where data follow just after header */
3169 #define REC_DATA(ptr) \
3170 ((void *)((char *)ptr + sizeof(struct llog_rec_hdr)))
3172 #define REC_DATA_LEN(rec) \
3173 (rec->lrh_len - sizeof(struct llog_rec_hdr) - \
3174 sizeof(struct llog_rec_tail))
3176 static inline void *rec_tail(struct llog_rec_hdr *rec)
3178 return (void *)((char *)rec + rec->lrh_len -
3179 sizeof(struct llog_rec_tail));
3182 struct llog_logid_rec {
3183 struct llog_rec_hdr lid_hdr;
3184 struct llog_logid lid_id;
3188 struct llog_rec_tail lid_tail;
3189 } __attribute__((packed));
3191 struct llog_unlink_rec {
3192 struct llog_rec_hdr lur_hdr;
3196 struct llog_rec_tail lur_tail;
3197 } __attribute__((packed));
3199 struct llog_unlink64_rec {
3200 struct llog_rec_hdr lur_hdr;
3201 struct lu_fid lur_fid;
3202 __u32 lur_count; /* to destroy the lost precreated */
3206 struct llog_rec_tail lur_tail;
3207 } __attribute__((packed));
3209 struct llog_setattr64_rec {
3210 struct llog_rec_hdr lsr_hdr;
3211 struct ost_id lsr_oi;
3217 struct llog_rec_tail lsr_tail;
3218 } __attribute__((packed));
3220 struct llog_size_change_rec {
3221 struct llog_rec_hdr lsc_hdr;
3222 struct ll_fid lsc_fid;
3227 struct llog_rec_tail lsc_tail;
3228 } __attribute__((packed));
3230 #define CHANGELOG_MAGIC 0xca103000
3232 /** \a changelog_rec_type's that can't be masked */
3233 #define CHANGELOG_MINMASK (1 << CL_MARK)
3234 /** bits covering all \a changelog_rec_type's */
3235 #define CHANGELOG_ALLMASK 0XFFFFFFFF
3236 /** default \a changelog_rec_type mask. Allow all of them, except
3237 * CL_ATIME since it can really be time consuming, and not necessary
3238 * under normal use. */
3239 #define CHANGELOG_DEFMASK (CHANGELOG_ALLMASK & ~(1 << CL_ATIME))
3241 /* changelog llog name, needed by client replicators */
3242 #define CHANGELOG_CATALOG "changelog_catalog"
3244 struct changelog_setinfo {
3247 } __attribute__((packed));
3249 /** changelog record */
3250 struct llog_changelog_rec {
3251 struct llog_rec_hdr cr_hdr;
3252 struct changelog_rec cr; /**< Variable length field */
3253 struct llog_rec_tail cr_do_not_use; /**< for_sizeof_only */
3254 } __attribute__((packed));
3256 #define CHANGELOG_USER_PREFIX "cl"
3258 struct llog_changelog_user_rec {
3259 struct llog_rec_hdr cur_hdr;
3263 struct llog_rec_tail cur_tail;
3264 } __attribute__((packed));
3266 enum agent_req_status {
3274 static inline const char *agent_req_status2name(enum agent_req_status ars)
3292 static inline bool agent_req_in_final_state(enum agent_req_status ars)
3294 return ((ars == ARS_SUCCEED) || (ars == ARS_FAILED) ||
3295 (ars == ARS_CANCELED));
3298 struct llog_agent_req_rec {
3299 struct llog_rec_hdr arr_hdr; /**< record header */
3300 __u32 arr_status; /**< status of the request */
3302 * agent_req_status */
3303 __u32 arr_archive_id; /**< backend archive number */
3304 __u64 arr_flags; /**< req flags */
3305 __u64 arr_compound_id; /**< compound cookie */
3306 __u64 arr_req_create; /**< req. creation time */
3307 __u64 arr_req_change; /**< req. status change time */
3308 struct hsm_action_item arr_hai; /**< req. to the agent */
3309 struct llog_rec_tail arr_tail; /**< record tail for_sizezof_only */
3310 } __attribute__((packed));
3312 /* Old llog gen for compatibility */
3316 } __attribute__((packed));
3318 struct llog_gen_rec {
3319 struct llog_rec_hdr lgr_hdr;
3320 struct llog_gen lgr_gen;
3324 struct llog_rec_tail lgr_tail;
3327 /* flags for the logs */
3329 LLOG_F_ZAP_WHEN_EMPTY = 0x1,
3330 LLOG_F_IS_CAT = 0x2,
3331 LLOG_F_IS_PLAIN = 0x4,
3332 LLOG_F_EXT_JOBID = 0x8,
3333 LLOG_F_IS_FIXSIZE = 0x10,
3335 /* Note: Flags covered by LLOG_F_EXT_MASK will be inherited from
3336 * catlog to plain log, so do not add LLOG_F_IS_FIXSIZE here,
3337 * because the catlog record is usually fixed size, but its plain
3338 * log record can be variable */
3339 LLOG_F_EXT_MASK = LLOG_F_EXT_JOBID,
3342 /* On-disk header structure of each log object, stored in little endian order */
3343 #define LLOG_MIN_CHUNK_SIZE 8192
3344 #define LLOG_HEADER_SIZE (96) /* sizeof (llog_log_hdr) + sizeof(llh_tail)
3345 * - sizeof(llh_bitmap) */
3346 #define LLOG_BITMAP_BYTES (LLOG_MIN_CHUNK_SIZE - LLOG_HEADER_SIZE)
3347 #define LLOG_MIN_REC_SIZE (24) /* round(llog_rec_hdr + llog_rec_tail) */
3349 struct llog_log_hdr {
3350 struct llog_rec_hdr llh_hdr;
3351 __s64 llh_timestamp;
3353 __u32 llh_bitmap_offset;
3356 /* for a catalog the first/oldest and still in-use plain slot is just
3357 * next to it. It will serve as the upper limit after Catalog has
3360 struct obd_uuid llh_tgtuuid;
3361 __u32 llh_reserved[LLOG_HEADER_SIZE/sizeof(__u32)-23];
3362 /* These fields must always be at the end of the llog_log_hdr.
3363 * Note: llh_bitmap size is variable because llog chunk size could be
3364 * bigger than LLOG_MIN_CHUNK_SIZE, i.e. sizeof(llog_log_hdr) > 8192
3365 * bytes, and the real size is stored in llh_hdr.lrh_len, which means
3366 * llh_tail should only be refered by LLOG_HDR_TAIL().
3367 * But this structure is also used by client/server llog interface
3368 * (see llog_client.c), it will be kept in its original way to avoid
3369 * compatiblity issue. */
3370 __u32 llh_bitmap[LLOG_BITMAP_BYTES / sizeof(__u32)];
3371 struct llog_rec_tail llh_tail;
3372 } __attribute__((packed));
3373 #undef LLOG_HEADER_SIZE
3374 #undef LLOG_BITMAP_BYTES
3376 #define LLOG_HDR_BITMAP_SIZE(llh) (__u32)((llh->llh_hdr.lrh_len - \
3377 llh->llh_bitmap_offset - \
3378 sizeof(llh->llh_tail)) * 8)
3379 #define LLOG_HDR_BITMAP(llh) (__u32 *)((char *)(llh) + \
3380 (llh)->llh_bitmap_offset)
3381 #define LLOG_HDR_TAIL(llh) ((struct llog_rec_tail *)((char *)llh + \
3382 llh->llh_hdr.lrh_len - \
3383 sizeof(llh->llh_tail)))
3385 /** log cookies are used to reference a specific log file and a record therein */
3386 struct llog_cookie {
3387 struct llog_logid lgc_lgl;
3391 } __attribute__((packed));
3393 /** llog protocol */
3394 enum llogd_rpc_ops {
3395 LLOG_ORIGIN_HANDLE_CREATE = 501,
3396 LLOG_ORIGIN_HANDLE_NEXT_BLOCK = 502,
3397 LLOG_ORIGIN_HANDLE_READ_HEADER = 503,
3398 LLOG_ORIGIN_HANDLE_WRITE_REC = 504,
3399 LLOG_ORIGIN_HANDLE_CLOSE = 505,
3400 LLOG_ORIGIN_CONNECT = 506,
3401 LLOG_CATINFO = 507, /* deprecated */
3402 LLOG_ORIGIN_HANDLE_PREV_BLOCK = 508,
3403 LLOG_ORIGIN_HANDLE_DESTROY = 509, /* for destroy llog object*/
3405 LLOG_FIRST_OPC = LLOG_ORIGIN_HANDLE_CREATE
3409 struct llog_logid lgd_logid;
3411 __u32 lgd_llh_flags;
3413 __u32 lgd_saved_index;
3415 __u64 lgd_cur_offset;
3416 } __attribute__((packed));
3418 struct llogd_conn_body {
3419 struct llog_gen lgdc_gen;
3420 struct llog_logid lgdc_logid;
3421 __u32 lgdc_ctxt_idx;
3422 } __attribute__((packed));
3424 /* Note: 64-bit types are 64-bit aligned in structure */
3426 __u64 o_valid; /* hot fields in this obdo */
3429 __u64 o_size; /* o_size-o_blocks == ost_lvb */
3433 __u64 o_blocks; /* brw: cli sent cached bytes */
3436 /* 32-bit fields start here: keep an even number of them via padding */
3437 __u32 o_blksize; /* optimal IO blocksize */
3438 __u32 o_mode; /* brw: cli sent cache remain */
3442 __u32 o_nlink; /* brw: checksum */
3444 __u32 o_misc; /* brw: o_dropped */
3446 __u64 o_ioepoch; /* epoch in ost writes */
3447 __u32 o_stripe_idx; /* holds stripe idx */
3449 struct lustre_handle o_handle; /* brw: lock handle to prolong
3451 struct llog_cookie o_lcookie; /* destroy: unlink cookie from
3452 * MDS, obsolete in 2.8, reused
3457 __u64 o_data_version; /* getattr: sum of iversion for
3459 * brw: grant space consumed on
3460 * the client for the write */
3466 #define o_dirty o_blocks
3467 #define o_undirty o_mode
3468 #define o_dropped o_misc
3469 #define o_cksum o_nlink
3470 #define o_grant_used o_data_version
3472 struct lfsck_request {
3485 __u16 lr_async_windows;
3487 struct lu_fid lr_fid;
3488 struct lu_fid lr_fid2;
3489 struct lu_fid lr_fid3;
3494 void lustre_swab_lfsck_request(struct lfsck_request *lr);
3496 struct lfsck_reply {
3502 void lustre_swab_lfsck_reply(struct lfsck_reply *lr);
3505 LE_LASTID_REBUILDING = 1,
3506 LE_LASTID_REBUILT = 2,
3512 LE_FID_ACCESSED = 8,
3514 LE_CONDITIONAL_DESTROY = 10,
3515 LE_PAIRS_VERIFY = 11,
3516 LE_SKIP_NLINK_DECLARE = 13,
3518 LE_SET_LMV_MASTER = 15,
3519 LE_SET_LMV_SLAVE = 16,
3522 enum lfsck_event_flags {
3523 LEF_TO_OST = 0x00000001,
3524 LEF_FROM_OST = 0x00000002,
3525 LEF_SET_LMV_HASH = 0x00000004,
3526 LEF_SET_LMV_ALL = 0x00000008,
3527 LEF_RECHECK_NAME_HASH = 0x00000010,
3530 static inline void lustre_set_wire_obdo(const struct obd_connect_data *ocd,
3532 const struct obdo *lobdo)
3535 wobdo->o_flags &= ~OBD_FL_LOCAL_MASK;
3539 if (unlikely(!(ocd->ocd_connect_flags & OBD_CONNECT_FID)) &&
3540 fid_seq_is_echo(ostid_seq(&lobdo->o_oi))) {
3541 /* Currently OBD_FL_OSTID will only be used when 2.4 echo
3542 * client communicate with pre-2.4 server */
3543 wobdo->o_oi.oi.oi_id = fid_oid(&lobdo->o_oi.oi_fid);
3544 wobdo->o_oi.oi.oi_seq = fid_seq(&lobdo->o_oi.oi_fid);
3548 static inline void lustre_get_wire_obdo(const struct obd_connect_data *ocd,
3550 const struct obdo *wobdo)
3552 __u32 local_flags = 0;
3554 if (lobdo->o_valid & OBD_MD_FLFLAGS)
3555 local_flags = lobdo->o_flags & OBD_FL_LOCAL_MASK;
3558 if (local_flags != 0) {
3559 lobdo->o_valid |= OBD_MD_FLFLAGS;
3560 lobdo->o_flags &= ~OBD_FL_LOCAL_MASK;
3561 lobdo->o_flags |= local_flags;
3566 if (unlikely(!(ocd->ocd_connect_flags & OBD_CONNECT_FID)) &&
3567 fid_seq_is_echo(wobdo->o_oi.oi.oi_seq)) {
3569 lobdo->o_oi.oi_fid.f_seq = wobdo->o_oi.oi.oi_seq;
3570 lobdo->o_oi.oi_fid.f_oid = wobdo->o_oi.oi.oi_id;
3571 lobdo->o_oi.oi_fid.f_ver = 0;
3575 extern void lustre_swab_obdo (struct obdo *o);
3577 /* request structure for OST's */
3582 /* Key for FIEMAP to be used in get_info calls */
3583 struct ll_fiemap_info_key {
3585 struct obdo lfik_oa;
3586 struct fiemap lfik_fiemap;
3589 extern void lustre_swab_ost_body (struct ost_body *b);
3590 extern void lustre_swab_ost_last_id(__u64 *id);
3591 extern void lustre_swab_fiemap(struct fiemap *fiemap);
3593 extern void lustre_swab_lov_user_md_v1(struct lov_user_md_v1 *lum);
3594 extern void lustre_swab_lov_user_md_v3(struct lov_user_md_v3 *lum);
3595 extern void lustre_swab_lov_user_md_objects(struct lov_user_ost_data *lod,
3597 extern void lustre_swab_lov_mds_md(struct lov_mds_md *lmm);
3598 void lustre_print_user_md(unsigned int level, struct lov_user_md *lum,
3602 extern void lustre_swab_llogd_body (struct llogd_body *d);
3603 extern void lustre_swab_llog_hdr (struct llog_log_hdr *h);
3604 extern void lustre_swab_llogd_conn_body (struct llogd_conn_body *d);
3605 extern void lustre_swab_llog_rec(struct llog_rec_hdr *rec);
3606 extern void lustre_swab_llog_id(struct llog_logid *lid);
3609 extern void lustre_swab_lustre_cfg(struct lustre_cfg *lcfg);
3611 /* Functions for dumping PTLRPC fields */
3612 void dump_rniobuf(struct niobuf_remote *rnb);
3613 void dump_ioo(struct obd_ioobj *nb);
3614 void dump_obdo(struct obdo *oa);
3615 void dump_ost_body(struct ost_body *ob);
3616 void dump_rcs(__u32 *rc);
3618 #define IDX_INFO_MAGIC 0x3D37CC37
3620 /* Index file transfer through the network. The server serializes the index into
3621 * a byte stream which is sent to the client via a bulk transfer */
3625 /* reply: see idx_info_flags below */
3628 /* request & reply: number of lu_idxpage (to be) transferred */
3632 /* request: requested attributes passed down to the iterator API */
3635 /* request & reply: index file identifier (FID) */
3636 struct lu_fid ii_fid;
3638 /* reply: version of the index file before starting to walk the index.
3639 * Please note that the version can be modified at any time during the
3643 /* request: hash to start with:
3644 * reply: hash of the first entry of the first lu_idxpage and hash
3645 * of the entry to read next if any */
3646 __u64 ii_hash_start;
3649 /* reply: size of keys in lu_idxpages, minimal one if II_FL_VARKEY is
3653 /* reply: size of records in lu_idxpages, minimal one if II_FL_VARREC
3661 extern void lustre_swab_idx_info(struct idx_info *ii);
3663 #define II_END_OFF MDS_DIR_END_OFF /* all entries have been read */
3665 /* List of flags used in idx_info::ii_flags */
3666 enum idx_info_flags {
3667 II_FL_NOHASH = 1 << 0, /* client doesn't care about hash value */
3668 II_FL_VARKEY = 1 << 1, /* keys can be of variable size */
3669 II_FL_VARREC = 1 << 2, /* records can be of variable size */
3670 II_FL_NONUNQ = 1 << 3, /* index supports non-unique keys */
3671 II_FL_NOKEY = 1 << 4, /* client doesn't care about key */
3674 #define LIP_MAGIC 0x8A6D6B6C
3676 /* 4KB (= LU_PAGE_SIZE) container gathering key/record pairs */
3678 /* 16-byte header */
3681 __u16 lip_nr; /* number of entries in the container */
3682 __u64 lip_pad0; /* additional padding for future use */
3684 /* key/record pairs are stored in the remaining 4080 bytes.
3685 * depending upon the flags in idx_info::ii_flags, each key/record
3686 * pair might be preceded by:
3688 * - the key size (II_FL_VARKEY is set)
3689 * - the record size (II_FL_VARREC is set)
3691 * For the time being, we only support fixed-size key & record. */
3692 char lip_entries[0];
3694 extern void lustre_swab_lip_header(struct lu_idxpage *lip);
3696 #define LIP_HDR_SIZE (offsetof(struct lu_idxpage, lip_entries))
3698 /* Gather all possible type associated with a 4KB container */
3700 struct lu_dirpage lp_dir; /* for MDS_READPAGE */
3701 struct lu_idxpage lp_idx; /* for OBD_IDX_READ */
3702 char lp_array[LU_PAGE_SIZE];
3705 /* security opcodes */
3708 SEC_CTX_INIT_CONT = 802,
3711 SEC_FIRST_OPC = SEC_CTX_INIT
3715 * capa related definitions
3717 #define CAPA_HMAC_MAX_LEN 64
3718 #define CAPA_HMAC_KEY_MAX_LEN 56
3720 /* NB take care when changing the sequence of elements this struct,
3721 * because the offset info is used in find_capa() */
3722 struct lustre_capa {
3723 struct lu_fid lc_fid; /** fid */
3724 __u64 lc_opc; /** operations allowed */
3725 __u64 lc_uid; /** file owner */
3726 __u64 lc_gid; /** file group */
3727 __u32 lc_flags; /** HMAC algorithm & flags */
3728 __u32 lc_keyid; /** key# used for the capability */
3729 __u32 lc_timeout; /** capa timeout value (sec) */
3730 __u32 lc_expiry; /** expiry time (sec) */
3731 __u8 lc_hmac[CAPA_HMAC_MAX_LEN]; /** HMAC */
3732 } __attribute__((packed));
3734 extern void lustre_swab_lustre_capa(struct lustre_capa *c);
3736 /** lustre_capa::lc_opc */
3738 CAPA_OPC_BODY_WRITE = 1<<0, /**< write object data */
3739 CAPA_OPC_BODY_READ = 1<<1, /**< read object data */
3740 CAPA_OPC_INDEX_LOOKUP = 1<<2, /**< lookup object fid */
3741 CAPA_OPC_INDEX_INSERT = 1<<3, /**< insert object fid */
3742 CAPA_OPC_INDEX_DELETE = 1<<4, /**< delete object fid */
3743 CAPA_OPC_OSS_WRITE = 1<<5, /**< write oss object data */
3744 CAPA_OPC_OSS_READ = 1<<6, /**< read oss object data */
3745 CAPA_OPC_OSS_TRUNC = 1<<7, /**< truncate oss object */
3746 CAPA_OPC_OSS_DESTROY = 1<<8, /**< destroy oss object */
3747 CAPA_OPC_META_WRITE = 1<<9, /**< write object meta data */
3748 CAPA_OPC_META_READ = 1<<10, /**< read object meta data */
3751 #define CAPA_OPC_OSS_RW (CAPA_OPC_OSS_READ | CAPA_OPC_OSS_WRITE)
3752 #define CAPA_OPC_MDS_ONLY \
3753 (CAPA_OPC_BODY_WRITE | CAPA_OPC_BODY_READ | CAPA_OPC_INDEX_LOOKUP | \
3754 CAPA_OPC_INDEX_INSERT | CAPA_OPC_INDEX_DELETE)
3755 #define CAPA_OPC_OSS_ONLY \
3756 (CAPA_OPC_OSS_WRITE | CAPA_OPC_OSS_READ | CAPA_OPC_OSS_TRUNC | \
3757 CAPA_OPC_OSS_DESTROY)
3758 #define CAPA_OPC_MDS_DEFAULT ~CAPA_OPC_OSS_ONLY
3759 #define CAPA_OPC_OSS_DEFAULT ~(CAPA_OPC_MDS_ONLY | CAPA_OPC_OSS_ONLY)
3761 static inline bool lovea_slot_is_dummy(const struct lov_ost_data_v1 *obj)
3763 /* zero area does not care about the bytes-order. */
3764 if (obj->l_ost_oi.oi.oi_id == 0 && obj->l_ost_oi.oi.oi_seq == 0 &&
3765 obj->l_ost_idx == 0 && obj->l_ost_gen == 0)
3771 /* lustre_capa::lc_hmac_alg */
3773 CAPA_HMAC_ALG_SHA1 = 1, /**< sha1 algorithm */
3777 #define CAPA_FL_MASK 0x00ffffff
3778 #define CAPA_HMAC_ALG_MASK 0xff000000
3780 struct lustre_capa_key {
3781 __u64 lk_seq; /**< mds# */
3782 __u32 lk_keyid; /**< key# */
3784 __u8 lk_key[CAPA_HMAC_KEY_MAX_LEN]; /**< key */
3785 } __attribute__((packed));
3787 extern void lustre_swab_lustre_capa_key(struct lustre_capa_key *k);
3789 /** The link ea holds 1 \a link_ea_entry for each hardlink */
3790 #define LINK_EA_MAGIC 0x11EAF1DFUL
3791 struct link_ea_header {
3794 __u64 leh_len; /* total size */
3800 /** Hardlink data is name and parent fid.
3801 * Stored in this crazy struct for maximum packing and endian-neutrality
3803 struct link_ea_entry {
3804 /** __u16 stored big-endian, unaligned */
3805 unsigned char lee_reclen[2];
3806 unsigned char lee_parent_fid[sizeof(struct lu_fid)];
3808 }__attribute__((packed));
3810 /** fid2path request/reply structure */
3811 struct getinfo_fid2path {
3812 struct lu_fid gf_fid;
3817 } __attribute__((packed));
3819 void lustre_swab_fid2path (struct getinfo_fid2path *gf);
3821 /** path2parent request/reply structures */
3823 struct lu_fid gp_fid; /**< parent FID */
3824 __u32 gp_linkno; /**< hardlink number */
3825 __u32 gp_name_size; /**< size of the name field */
3826 char gp_name[0]; /**< zero-terminated link name */
3827 } __attribute__((packed));
3830 LAYOUT_INTENT_ACCESS = 0,
3831 LAYOUT_INTENT_READ = 1,
3832 LAYOUT_INTENT_WRITE = 2,
3833 LAYOUT_INTENT_GLIMPSE = 3,
3834 LAYOUT_INTENT_TRUNC = 4,
3835 LAYOUT_INTENT_RELEASE = 5,
3836 LAYOUT_INTENT_RESTORE = 6
3839 /* enqueue layout lock with intent */
3840 struct layout_intent {
3841 __u32 li_opc; /* intent operation for enqueue, read, write etc */
3847 void lustre_swab_layout_intent(struct layout_intent *li);
3850 * On the wire version of hsm_progress structure.
3852 * Contains the userspace hsm_progress and some internal fields.
3854 struct hsm_progress_kernel {
3855 /* Field taken from struct hsm_progress */
3858 struct hsm_extent hpk_extent;
3860 __u16 hpk_errval; /* positive val */
3862 /* Additional fields */
3863 __u64 hpk_data_version;
3865 } __attribute__((packed));
3867 extern void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
3868 extern void lustre_swab_hsm_current_action(struct hsm_current_action *action);
3869 extern void lustre_swab_hsm_progress_kernel(struct hsm_progress_kernel *hpk);
3870 extern void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
3871 extern void lustre_swab_hsm_user_item(struct hsm_user_item *hui);
3872 extern void lustre_swab_hsm_request(struct hsm_request *hr);
3875 * OUT_UPDATE RPC Format
3877 * During the cross-ref operation, the Master MDT, which the client send the
3878 * request to, will disassembly the operation into object updates, then OSP
3879 * will send these updates to the remote MDT to be executed.
3881 * An UPDATE_OBJ RPC does a list of updates. Each update belongs to an
3882 * operation and does a type of modification to an object.
3890 * update (ub_count-th)
3892 * ub_count must be less than or equal to UPDATE_PER_RPC_MAX.
3897 * rc [+ buffers] (1st)
3898 * rc [+ buffers] (2st)
3900 * rc [+ buffers] (nr_count-th)
3902 * ur_count must be less than or equal to UPDATE_PER_RPC_MAX and should usually
3903 * be equal to ub_count.
3907 * Type of each update, if adding/deleting update, please also update
3908 * update_opcode in lustre/target/out_lib.c.
3920 OUT_INDEX_LOOKUP = 9,
3921 OUT_INDEX_INSERT = 10,
3922 OUT_INDEX_DELETE = 11,
3932 UPDATE_FL_OST = 0x00000001, /* op from OST (not MDT) */
3933 UPDATE_FL_SYNC = 0x00000002, /* commit before replying */
3934 UPDATE_FL_COMMITTED = 0x00000004, /* op committed globally */
3935 UPDATE_FL_NOLOG = 0x00000008 /* for idempotent updates */
3938 struct object_update_param {
3939 __u16 oup_len; /* length of this parameter */
3945 static inline size_t
3946 object_update_param_size(const struct object_update_param *param)
3948 return cfs_size_round(sizeof(*param) + param->oup_len);
3952 struct object_update {
3953 __u16 ou_type; /* enum update_type */
3954 __u16 ou_params_count; /* update parameters count */
3955 __u32 ou_master_index; /* master MDT/OST index */
3956 __u32 ou_flags; /* enum update_flag */
3957 __u32 ou_padding1; /* padding 1 */
3958 __u64 ou_batchid; /* op transno on master */
3959 struct lu_fid ou_fid; /* object to be updated */
3960 struct object_update_param ou_params[0]; /* update params */
3963 #define UPDATE_REQUEST_MAGIC_V1 0xBDDE0001
3964 #define UPDATE_REQUEST_MAGIC_V2 0xBDDE0002
3965 #define UPDATE_REQUEST_MAGIC UPDATE_REQUEST_MAGIC_V2
3966 /* Hold object_updates sending to the remote OUT in single RPC */
3967 struct object_update_request {
3969 __u16 ourq_count; /* number of ourq_updates[] */
3971 struct object_update ourq_updates[0];
3974 #define OUT_UPDATE_HEADER_MAGIC 0xBDDF0001
3975 #define OUT_UPDATE_MAX_INLINE_SIZE 4096
3976 /* Header for updates request between MDTs */
3977 struct out_update_header {
3980 __u32 ouh_inline_length;
3982 __u32 ouh_inline_data[0];
3985 struct out_update_buffer {
3990 void lustre_swab_object_update(struct object_update *ou);
3991 void lustre_swab_object_update_request(struct object_update_request *our);
3992 void lustre_swab_out_update_header(struct out_update_header *ouh);
3993 void lustre_swab_out_update_buffer(struct out_update_buffer *oub);
3995 static inline size_t
3996 object_update_params_size(const struct object_update *update)
3998 const struct object_update_param *param;
3999 size_t total_size = 0;
4002 param = &update->ou_params[0];
4003 for (i = 0; i < update->ou_params_count; i++) {
4004 size_t size = object_update_param_size(param);
4006 param = (struct object_update_param *)((char *)param + size);
4013 static inline size_t
4014 object_update_size(const struct object_update *update)
4016 return offsetof(struct object_update, ou_params[0]) +
4017 object_update_params_size(update);
4020 static inline struct object_update *
4021 object_update_request_get(const struct object_update_request *our,
4022 unsigned int index, size_t *size)
4027 if (index >= our->ourq_count)
4030 ptr = (void *)&our->ourq_updates[0];
4031 for (i = 0; i < index; i++)
4032 ptr += object_update_size(ptr);
4035 *size = object_update_size(ptr);
4041 /* the result of object update */
4042 struct object_update_result {
4049 #define UPDATE_REPLY_MAGIC_V1 0x00BD0001
4050 #define UPDATE_REPLY_MAGIC_V2 0x00BD0002
4051 #define UPDATE_REPLY_MAGIC UPDATE_REPLY_MAGIC_V2
4052 /* Hold object_update_results being replied from the remote OUT. */
4053 struct object_update_reply {
4060 void lustre_swab_object_update_result(struct object_update_result *our);
4061 void lustre_swab_object_update_reply(struct object_update_reply *our);
4063 static inline struct object_update_result *
4064 object_update_result_get(const struct object_update_reply *reply,
4065 unsigned int index, size_t *size)
4067 __u16 count = reply->ourp_count;
4074 ptr = (char *)reply +
4075 cfs_size_round(offsetof(struct object_update_reply,
4077 for (i = 0; i < index; i++) {
4078 if (reply->ourp_lens[i] == 0)
4081 ptr += cfs_size_round(reply->ourp_lens[i]);
4085 *size = reply->ourp_lens[index];
4090 /* read update result */
4091 struct out_read_reply {
4098 static inline void orr_cpu_to_le(struct out_read_reply *orr_dst,
4099 const struct out_read_reply *orr_src)
4101 orr_dst->orr_size = cpu_to_le32(orr_src->orr_size);
4102 orr_dst->orr_padding = cpu_to_le32(orr_src->orr_padding);
4103 orr_dst->orr_offset = cpu_to_le64(orr_dst->orr_offset);
4106 static inline void orr_le_to_cpu(struct out_read_reply *orr_dst,
4107 const struct out_read_reply *orr_src)
4109 orr_dst->orr_size = le32_to_cpu(orr_src->orr_size);
4110 orr_dst->orr_padding = le32_to_cpu(orr_src->orr_padding);
4111 orr_dst->orr_offset = le64_to_cpu(orr_dst->orr_offset);
4114 /** layout swap request structure
4115 * fid1 and fid2 are in mdt_body
4117 struct mdc_swap_layouts {
4121 void lustre_swab_swap_layouts(struct mdc_swap_layouts *msl);
4124 struct lustre_handle cd_handle;
4125 struct lu_fid cd_fid;
4126 __u64 cd_data_version;
4127 __u64 cd_reserved[8];
4130 void lustre_swab_close_data(struct close_data *data);
4133 void lustre_swab_update_ops(struct update_ops *uops, unsigned int op_count);
4135 /* Update llog format */
4137 struct lu_fid uop_fid;
4139 __u16 uop_param_count;
4140 __u16 uop_params_off[0];
4144 struct update_op uops_op[0];
4147 struct update_params {
4148 struct object_update_param up_params[0];
4151 enum update_records_flag {
4152 UPDATE_RECORD_CONTINUE = 1 >> 0,
4155 * This is the update record format used to store the updates in
4156 * disk. All updates of the operation will be stored in ur_ops.
4157 * All of parameters for updates of the operation will be stored
4159 * To save the space of the record, parameters in ur_ops will only
4160 * remember their offset in ur_params, so to avoid storing duplicate
4161 * parameters in ur_params, which can help us save a lot space for
4162 * operation like creating striped directory.
4164 struct update_records {
4165 __u64 ur_master_transno;
4168 /* If the operation includes multiple updates, then ur_index
4169 * means the index of the update inside the whole updates. */
4171 __u32 ur_update_count;
4172 __u32 ur_param_count;
4173 struct update_ops ur_ops;
4174 /* Note ur_ops has a variable size, so comment out
4175 * the following ur_params, in case some use it directly
4176 * update_records->ur_params
4178 * struct update_params ur_params;
4182 struct llog_update_record {
4183 struct llog_rec_hdr lur_hdr;
4184 struct update_records lur_update_rec;
4185 /* Note ur_update_rec has a variable size, so comment out
4186 * the following ur_tail, in case someone use it directly
4188 * struct llog_rec_tail lur_tail;