4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
31 * lustre/include/lustre_fid.h
33 * Author: Yury Umanets <umka@clusterfs.com>
36 #ifndef __LUSTRE_FID_H
37 #define __LUSTRE_FID_H
43 * http://wiki.lustre.org/index.php/Architecture_-_Interoperability_fids_zfs
44 * describes the FID namespace and interoperability requirements for FIDs.
45 * The important parts of that document are included here for reference.
48 * File IDentifier generated by client from range allocated by the SEQuence
49 * service and stored in struct lu_fid. The FID is composed of three parts:
50 * SEQuence, ObjectID, and VERsion. The SEQ component is a filesystem
51 * unique 64-bit integer, and only one client is ever assigned any SEQ value.
52 * The first 0x400 FID_SEQ_NORMAL [2^33, 2^33 + 0x400] values are reserved
53 * for system use. The OID component is a 32-bit value generated by the
54 * client on a per-SEQ basis to allow creating many unique FIDs without
55 * communication with the server. The VER component is a 32-bit value that
56 * distinguishes between different FID instantiations, such as snapshots or
57 * separate subtrees within the filesystem. FIDs with the same VER field
58 * are considered part of the same namespace.
60 * OLD filesystems are those upgraded from Lustre 1.x that predate FIDs, and
61 * MDTs use 32-bit ldiskfs internal inode/generation numbers (IGIFs), while
62 * OSTs use 64-bit Lustre object IDs and generation numbers.
64 * NEW filesystems are those formatted since the introduction of FIDs.
67 * Inode and Generation In FID, a surrogate FID used to globally identify
68 * an existing object on OLD formatted MDT file system. This would only be
69 * used on MDT0 in a DNE filesystem, because there cannot be more than one
70 * MDT in an OLD formatted filesystem. Belongs to sequence in [12, 2^32 - 1]
71 * range, where inode number is stored in SEQ, and inode generation is in OID.
72 * NOTE: This assumes no more than 2^32-1 inodes exist in the MDT filesystem,
73 * which is the maximum possible for an ldiskfs backend. It also assumes
74 * that the reserved ext3/ext4/ldiskfs inode numbers [0-11] are never visible
75 * to clients, which has always been true.
78 * object ID In FID, a surrogate FID used to globally identify an existing
79 * OST object on OLD formatted OST file system. Belongs to a sequence in
80 * [2^32, 2^33 - 1]. Sequence number is calculated as:
82 * 1 << 32 | (ost_index << 16) | ((objid >> 32) & 0xffff)
84 * that is, SEQ consists of 16-bit OST index, and higher 16 bits of object
85 * ID. The generation of unique SEQ values per OST allows the IDIF FIDs to
86 * be identified in the FLD correctly. The OID field is calculated as:
90 * that is, it consists of lower 32 bits of object ID. For objects within
91 * the IDIF range, object ID extraction will be:
93 * o_id = (fid->f_seq & 0x7fff) << 16 | fid->f_oid;
94 * o_seq = 0; // formerly group number
96 * NOTE: This assumes that no more than 2^48-1 objects have ever been created
97 * on any OST, and that no more than 65535 OSTs are in use. Both are very
98 * reasonable assumptions, i.e. an IDIF can uniquely map all objects assuming
99 * a maximum creation rate of 1M objects per second for a maximum of 9 years,
100 * or combinations thereof.
103 * Surrogate FID used to identify an existing object on OLD formatted OST
104 * filesystem. Belongs to the reserved SEQuence 0, and is used prior to
105 * the introduction of FID-on-OST, at which point IDIF will be used to
106 * identify objects as residing on a specific OST.
109 * For Lustre Log objects the object sequence 1 is used. This is compatible
110 * with both OLD and NEW namespaces, as this SEQ number is in the
111 * ext3/ldiskfs reserved inode range and does not conflict with IGIF
115 * For testing OST IO performance the object sequence 2 is used. This is
116 * compatible with both OLD and NEW namespaces, as this SEQ number is in
117 * the ext3/ldiskfs reserved inode range and does not conflict with IGIF
120 * OST_MDT1 .. OST_MAX
121 * For testing with multiple MDTs the object sequence 3 through 9 is used,
122 * allowing direct mapping of MDTs 1 through 7 respectively, for a total
123 * of 8 MDTs including OST_MDT0. This matches the legacy CMD project "group"
124 * mappings. However, this SEQ range is only for testing prior to any
125 * production DNE release, as the objects in this range conflict across all
126 * OSTs, as the OST index is not part of the FID. For production DNE usage,
127 * OST objects created by MDT1+ will use FID_SEQ_NORMAL FIDs.
129 * DLM OST objid to IDIF mapping
130 * For compatibility with existing OLD OST network protocol structures, the
131 * FID must map onto the o_id and o_seq in a manner that ensures existing
132 * objects are identified consistently for IO, as well as onto the LDLM
133 * namespace to ensure IDIFs there is only a single resource name for any
134 * object in the DLM. The OLD OST object DLM resource mapping is:
136 * resource[] = {o_id, o_seq, 0, 0}; // o_seq == 0 for production releases
138 * The NEW OST object DLM resource mapping is the same for both MDT and OST:
140 * resource[] = {SEQ, OID, VER, HASH};
142 * NOTE: for mapping IDIF values to DLM resource names the o_id may be
143 * larger than the 2^33 reserved sequence numbers for IDIF, so it is possible
144 * for the o_id numbers to overlap FID SEQ numbers in the resource. However,
145 * in all production releases the OLD o_seq field is always zero, and all
146 * valid FID OID values are non-zero, so the lock resources will not collide.
147 * Even so, the MDT and OST resources are also in different LDLM namespaces.
150 #include <libcfs/libcfs.h>
151 #include <uapi/linux/lustre/lustre_fid.h>
152 #include <uapi/linux/lustre/lustre_idl.h>
153 #include <uapi/linux/lustre/lustre_ostid.h>
155 /* Lustre service names are following the format
156 * service name + MDT + seq name
158 #define LUSTRE_MDT_MAXNAMELEN 80
166 /* Whole sequences space range and zero range definitions */
167 extern const struct lu_seq_range LUSTRE_SEQ_SPACE_RANGE;
168 extern const struct lu_seq_range LUSTRE_SEQ_ZERO_RANGE;
169 extern const struct lu_fid LUSTRE_BFL_FID;
170 extern const struct lu_fid LU_OBF_FID;
171 extern const struct lu_fid LU_LPF_FID;
172 extern const struct lu_fid LU_DOT_LUSTRE_FID;
173 extern const struct lu_fid LU_BACKEND_LPF_FID;
177 * This is how may metadata FIDs may be allocated in one sequence(128k)
179 LUSTRE_METADATA_SEQ_MAX_WIDTH = 0x0000000000020000ULL,
182 * This is how many data FIDs could be allocated in one sequence(4B - 1)
184 LUSTRE_DATA_SEQ_MAX_WIDTH = 0x00000000FFFFFFFFULL,
187 * How many sequences to allocate to a client at once.
189 LUSTRE_SEQ_META_WIDTH = 0x0000000000000001ULL,
192 * seq allocation pool size.
194 LUSTRE_SEQ_BATCH_WIDTH = LUSTRE_SEQ_META_WIDTH * 1000,
197 * This is how many sequences may be in one super-sequence allocated to
200 LUSTRE_SEQ_SUPER_WIDTH = ((1ULL << 30ULL) * LUSTRE_SEQ_META_WIDTH)
203 /** special OID for local objects */
205 /** \see fld_mod_init */
207 /** \see fid_mod_init */
208 FID_SEQ_CTL_OID = 4UL,
209 FID_SEQ_SRV_OID = 5UL,
210 /** \see mdd_mod_init */
211 MDD_ROOT_INDEX_OID = 6UL, /* deprecated in 2.4 */
212 MDD_ORPHAN_OID = 7UL, /* deprecated in 2.4 */
213 MDD_LOV_OBJ_OID = 8UL,
214 MDD_CAPA_KEYS_OID = 9UL,
215 /** \see mdt_mod_init */
216 LAST_RECV_OID = 11UL,
217 OSD_FS_ROOT_OID = 13UL,
218 ACCT_USER_OID = 15UL,
219 ACCT_GROUP_OID = 16UL,
220 LFSCK_BOOKMARK_OID = 17UL,
221 OTABLE_IT_OID = 18UL,
223 REPLY_DATA_OID = 21UL,
224 ACCT_PROJECT_OID = 22UL,
225 INDEX_BACKUP_OID = 4116UL,
226 OFD_LAST_GROUP_OID = 4117UL,
227 LLOG_CATALOGS_OID = 4118UL,
228 MGS_CONFIGS_OID = 4119UL,
229 OFD_HEALTH_CHECK_OID = 4120UL,
230 MDD_LOV_OBJ_OSEQ = 4121UL,
231 LFSCK_NAMESPACE_OID = 4122UL,
232 REMOTE_PARENT_DIR_OID = 4123UL,
233 /* This definition is obsolete
234 * SLAVE_LLOG_CATALOGS_OID = 4124UL,
236 BATCHID_COMMITTED_OID = 4125UL,
239 static inline void lu_local_obj_fid(struct lu_fid *fid, __u32 oid)
241 fid->f_seq = FID_SEQ_LOCAL_FILE;
246 static inline void lu_local_name_obj_fid(struct lu_fid *fid, __u32 oid)
248 fid->f_seq = FID_SEQ_LOCAL_NAME;
253 /* For new FS (>= 2.4), the root FID will be changed to
254 * [FID_SEQ_ROOT:1:0], for existing FS, (upgraded to 2.4),
255 * the root FID will still be IGIF */
256 static inline int fid_is_root(const struct lu_fid *fid)
258 return unlikely((fid_seq(fid) == FID_SEQ_ROOT &&
259 fid_oid(fid) == FID_OID_ROOT));
262 static inline int fid_is_dot_lustre(const struct lu_fid *fid)
264 return unlikely(fid_seq(fid) == FID_SEQ_DOT_LUSTRE &&
265 fid_oid(fid) == FID_OID_DOT_LUSTRE);
268 static inline int fid_is_obf(const struct lu_fid *fid)
270 return unlikely(fid_seq(fid) == FID_SEQ_DOT_LUSTRE &&
271 fid_oid(fid) == FID_OID_DOT_LUSTRE_OBF);
274 static inline int fid_is_otable_it(const struct lu_fid *fid)
276 return unlikely(fid_seq(fid) == FID_SEQ_LOCAL_FILE &&
277 fid_oid(fid) == OTABLE_IT_OID);
280 static inline int fid_oid_is_quota(const struct lu_fid *fid)
282 switch (fid_oid(fid)) {
285 case ACCT_PROJECT_OID:
292 static inline int fid_is_acct(const struct lu_fid *fid)
294 return fid_seq(fid) == FID_SEQ_LOCAL_FILE &&
295 fid_oid_is_quota(fid);
298 static inline int fid_is_quota(const struct lu_fid *fid)
300 return fid_seq(fid) == FID_SEQ_QUOTA ||
301 fid_seq(fid) == FID_SEQ_QUOTA_GLB;
304 static inline int fid_is_name_llog(const struct lu_fid *fid)
306 return fid_seq(fid) == FID_SEQ_LLOG_NAME;
309 static inline int fid_is_namespace_visible(const struct lu_fid *fid)
311 const __u64 seq = fid_seq(fid);
313 /* Here, we cannot distinguish whether the normal FID is for OST
314 * object or not. It is caller's duty to check more if needed. */
315 return (!fid_is_last_id(fid) &&
316 (fid_seq_is_norm(seq) || fid_seq_is_igif(seq))) ||
317 fid_is_root(fid) || fid_seq_is_dot(seq);
320 static inline int fid_seq_in_fldb(__u64 seq)
322 return fid_seq_is_igif(seq) || fid_seq_is_norm(seq) ||
323 fid_seq_is_root(seq) || fid_seq_is_dot(seq);
326 static inline void ost_layout_cpu_to_le(struct ost_layout *dst,
327 const struct ost_layout *src)
329 dst->ol_stripe_size = __cpu_to_le32(src->ol_stripe_size);
330 dst->ol_stripe_count = __cpu_to_le32(src->ol_stripe_count);
331 dst->ol_comp_start = __cpu_to_le64(src->ol_comp_start);
332 dst->ol_comp_end = __cpu_to_le64(src->ol_comp_end);
333 dst->ol_comp_id = __cpu_to_le32(src->ol_comp_id);
336 static inline void ost_layout_le_to_cpu(struct ost_layout *dst,
337 const struct ost_layout *src)
339 dst->ol_stripe_size = __le32_to_cpu(src->ol_stripe_size);
340 dst->ol_stripe_count = __le32_to_cpu(src->ol_stripe_count);
341 dst->ol_comp_start = __le64_to_cpu(src->ol_comp_start);
342 dst->ol_comp_end = __le64_to_cpu(src->ol_comp_end);
343 dst->ol_comp_id = __le32_to_cpu(src->ol_comp_id);
346 static inline void filter_fid_cpu_to_le(struct filter_fid *dst,
347 const struct filter_fid *src, int size)
349 fid_cpu_to_le(&dst->ff_parent, &src->ff_parent);
351 if (size < sizeof(struct filter_fid)) {
352 memset(&dst->ff_layout, 0, sizeof(dst->ff_layout));
354 ost_layout_cpu_to_le(&dst->ff_layout, &src->ff_layout);
355 dst->ff_layout_version = cpu_to_le32(src->ff_layout_version);
356 dst->ff_range = cpu_to_le32(src->ff_range);
359 /* XXX: Add more if filter_fid is enlarged in the future. */
362 static inline void filter_fid_le_to_cpu(struct filter_fid *dst,
363 const struct filter_fid *src, int size)
365 fid_le_to_cpu(&dst->ff_parent, &src->ff_parent);
367 if (size < sizeof(struct filter_fid)) {
368 memset(&dst->ff_layout, 0, sizeof(dst->ff_layout));
370 ost_layout_le_to_cpu(&dst->ff_layout, &src->ff_layout);
371 dst->ff_layout_version = le32_to_cpu(src->ff_layout_version);
372 dst->ff_range = le32_to_cpu(src->ff_range);
375 /* XXX: Add more if filter_fid is enlarged in the future. */
378 static inline void lu_last_id_fid(struct lu_fid *fid, __u64 seq, __u32 ost_idx)
380 if (fid_seq_is_mdt0(seq)) {
381 fid->f_seq = fid_idif_seq(0, ost_idx);
383 LASSERTF(fid_seq_is_norm(seq) || fid_seq_is_echo(seq) ||
384 fid_seq_is_idif(seq), "%#llx\n", seq);
391 static inline bool fid_is_md_operative(const struct lu_fid *fid)
393 return fid_is_mdt0(fid) || fid_is_igif(fid) ||
394 fid_is_norm(fid) || fid_is_root(fid);
397 /* seq client type */
399 LUSTRE_SEQ_METADATA = 1,
405 LUSTRE_SEQ_CONTROLLER
408 struct lu_server_seq;
410 /* Client sequence manager interface. */
411 struct lu_client_seq {
412 /* Sequence-controller export. */
413 struct obd_export *lcs_exp;
414 struct mutex lcs_mutex;
417 * Range of allowed for allocation sequeces. When using lu_client_seq on
418 * clients, this contains meta-sequence range. And for servers this
419 * contains super-sequence range.
421 struct lu_seq_range lcs_space;
423 /* Seq related debugfs */
424 struct dentry *lcs_debugfs_entry;
426 /* This holds last allocated fid in last obtained seq */
427 struct lu_fid lcs_fid;
429 /* LUSTRE_SEQ_METADATA or LUSTRE_SEQ_DATA */
430 enum lu_cli_type lcs_type;
433 * Service uuid, passed from MDT + seq name to form unique seq name to
434 * use it with debugfs.
436 char lcs_name[LUSTRE_MDT_MAXNAMELEN];
439 * Sequence width, that is how many objects may be allocated in one
440 * sequence. Default value for it is LUSTRE_SEQ_MAX_WIDTH.
444 /* Seq-server for direct talking */
445 struct lu_server_seq *lcs_srv;
448 /* server sequence manager interface */
449 struct lu_server_seq {
450 /* Available sequences space */
451 struct lu_seq_range lss_space;
453 /* keeps highwater in lsr_end for seq allocation algorithm */
454 struct lu_seq_range lss_lowater_set;
455 struct lu_seq_range lss_hiwater_set;
458 * Device for server side seq manager needs (saving sequences to backing
461 struct dt_device *lss_dev;
463 /* /seq file object device */
464 struct dt_object *lss_obj;
466 /* Seq related debugfs */
467 struct dentry *lss_debugfs_entry;
469 /* LUSTRE_SEQ_SERVER or LUSTRE_SEQ_CONTROLLER */
470 enum lu_mgr_type lss_type;
472 /* Client interface to request controller */
473 struct lu_client_seq *lss_cli;
475 /* Mutex for protecting allocation */
476 struct mutex lss_mutex;
479 * Service uuid, passed from MDT + seq name to form unique seq name to
480 * use it with debugfs.
482 char lss_name[LUSTRE_MDT_MAXNAMELEN];
485 * Allocation chunks for super and meta sequences. Default values are
486 * LUSTRE_SEQ_SUPER_WIDTH and LUSTRE_SEQ_META_WIDTH.
491 * minimum lss_alloc_set size that should be allocated from
496 /* sync is needed for update operation */
500 * Pointer to site object, required to access site fld.
502 struct seq_server_site *lss_site;
505 struct seq_server_site {
506 struct lu_site *ss_lu;
508 * mds number of this site.
512 * Fid location database
514 struct lu_server_fld *ss_server_fld;
515 struct lu_client_fld *ss_client_fld;
520 struct lu_server_seq *ss_server_seq;
523 * Controller Seq Manager
525 struct lu_server_seq *ss_control_seq;
526 struct obd_export *ss_control_exp;
531 struct lu_client_seq *ss_client_seq;
536 int seq_server_init(const struct lu_env *env,
537 struct lu_server_seq *seq,
538 struct dt_device *dev,
540 enum lu_mgr_type type,
541 struct seq_server_site *ss);
543 void seq_server_fini(struct lu_server_seq *seq,
544 const struct lu_env *env);
546 int seq_server_alloc_super(struct lu_server_seq *seq,
547 struct lu_seq_range *out,
548 const struct lu_env *env);
550 int seq_server_alloc_meta(struct lu_server_seq *seq,
551 struct lu_seq_range *out,
552 const struct lu_env *env);
554 int seq_server_set_cli(const struct lu_env *env,
555 struct lu_server_seq *seq,
556 struct lu_client_seq *cli);
558 int seq_server_check_and_alloc_super(const struct lu_env *env,
559 struct lu_server_seq *seq);
561 void seq_client_init(struct lu_client_seq *seq,
562 struct obd_export *exp,
563 enum lu_cli_type type,
565 struct lu_server_seq *srv);
567 void seq_client_fini(struct lu_client_seq *seq);
569 void seq_client_flush(struct lu_client_seq *seq);
571 int seq_client_alloc_fid(const struct lu_env *env, struct lu_client_seq *seq,
573 int seq_client_get_seq(const struct lu_env *env, struct lu_client_seq *seq,
575 int seq_site_fini(const struct lu_env *env, struct seq_server_site *ss);
576 /* Fids common stuff */
577 int fid_is_local(const struct lu_env *env,
578 struct lu_site *site, const struct lu_fid *fid);
581 int client_fid_init(struct obd_device *obd, struct obd_export *exp,
582 enum lu_cli_type type);
583 int client_fid_fini(struct obd_device *obd);
587 struct ldlm_namespace;
590 * Build (DLM) resource name from FID.
592 * NOTE: until Lustre 1.8.7/2.1.1 the fid_ver() was packed into name[2],
593 * but was moved into name[1] along with the OID to avoid consuming the
594 * renaming name[2,3] fields that need to be used for the quota identifier.
597 fid_build_reg_res_name(const struct lu_fid *fid, struct ldlm_res_id *res)
599 memset(res, 0, sizeof(*res));
600 res->name[LUSTRE_RES_ID_SEQ_OFF] = fid_seq(fid);
601 res->name[LUSTRE_RES_ID_VER_OID_OFF] = fid_ver_oid(fid);
605 * Return true if resource is for object identified by FID.
607 static inline int fid_res_name_eq(const struct lu_fid *fid,
608 const struct ldlm_res_id *res)
610 return res->name[LUSTRE_RES_ID_SEQ_OFF] == fid_seq(fid) &&
611 res->name[LUSTRE_RES_ID_VER_OID_OFF] == fid_ver_oid(fid);
615 * Extract FID from LDLM resource. Reverse of fid_build_reg_res_name().
618 fid_extract_from_res_name(struct lu_fid *fid, const struct ldlm_res_id *res)
620 fid->f_seq = res->name[LUSTRE_RES_ID_SEQ_OFF];
621 fid->f_oid = (__u32)(res->name[LUSTRE_RES_ID_VER_OID_OFF]);
622 fid->f_ver = (__u32)(res->name[LUSTRE_RES_ID_VER_OID_OFF] >> 32);
623 LASSERT(fid_res_name_eq(fid, res));
627 * Build (DLM) resource identifier from global quota FID and quota ID.
630 fid_build_quota_res_name(const struct lu_fid *glb_fid, union lquota_id *qid,
631 struct ldlm_res_id *res)
633 fid_build_reg_res_name(glb_fid, res);
634 res->name[LUSTRE_RES_ID_QUOTA_SEQ_OFF] = fid_seq(&qid->qid_fid);
635 res->name[LUSTRE_RES_ID_QUOTA_VER_OID_OFF] = fid_ver_oid(&qid->qid_fid);
639 * Extract global FID and quota ID from resource name
641 static inline void fid_extract_from_quota_res(struct lu_fid *glb_fid,
642 union lquota_id *qid,
643 const struct ldlm_res_id *res)
645 fid_extract_from_res_name(glb_fid, res);
646 qid->qid_fid.f_seq = res->name[LUSTRE_RES_ID_QUOTA_SEQ_OFF];
647 qid->qid_fid.f_oid = (__u32)res->name[LUSTRE_RES_ID_QUOTA_VER_OID_OFF];
649 (__u32)(res->name[LUSTRE_RES_ID_QUOTA_VER_OID_OFF] >> 32);
653 fid_build_pdo_res_name(const struct lu_fid *fid, unsigned int hash,
654 struct ldlm_res_id *res)
656 fid_build_reg_res_name(fid, res);
657 res->name[LUSTRE_RES_ID_HSH_OFF] = hash;
661 * Build DLM resource name from object id & seq, which will be removed
662 * finally, when we replace ost_id with FID in data stack.
664 * Currently, resid from the old client, whose res[0] = object_id,
665 * res[1] = object_seq, is just oposite with Metatdata
666 * resid, where, res[0] = fid->f_seq, res[1] = fid->f_oid.
667 * To unifiy the resid identification, we will reverse the data
668 * resid to keep it same with Metadata resid, i.e.
670 * For resid from the old client,
671 * res[0] = objid, res[1] = 0, still keep the original order,
675 * res will be built from normal FID directly, i.e. res[0] = f_seq,
676 * res[1] = f_oid + f_ver.
678 static inline void ostid_build_res_name(const struct ost_id *oi,
679 struct ldlm_res_id *name)
681 memset(name, 0, sizeof *name);
682 if (fid_seq_is_mdt0(ostid_seq(oi))) {
683 name->name[LUSTRE_RES_ID_SEQ_OFF] = ostid_id(oi);
684 name->name[LUSTRE_RES_ID_VER_OID_OFF] = ostid_seq(oi);
686 fid_build_reg_res_name(&oi->oi_fid, name);
691 * Return true if the resource is for the object identified by this id & group.
693 static inline bool ostid_res_name_eq(const struct ost_id *oi,
694 const struct ldlm_res_id *name)
696 /* Note: it is just a trick here to save some effort, probably the
697 * correct way would be turn them into the FID and compare */
698 if (fid_seq_is_mdt0(ostid_seq(oi))) {
699 return name->name[LUSTRE_RES_ID_SEQ_OFF] == ostid_id(oi) &&
700 name->name[LUSTRE_RES_ID_VER_OID_OFF] == ostid_seq(oi);
702 return name->name[LUSTRE_RES_ID_SEQ_OFF] == ostid_seq(oi) &&
703 name->name[LUSTRE_RES_ID_VER_OID_OFF] == ostid_id(oi);
708 * Note: we need check oi_seq to decide where to set oi_id,
709 * so oi_seq should always be set ahead of oi_id.
711 static inline int ostid_set_id(struct ost_id *oi, __u64 oid)
713 if (fid_seq_is_mdt0(oi->oi.oi_seq)) {
714 if (oid >= IDIF_MAX_OID)
717 } else if (fid_is_idif(&oi->oi_fid)) {
718 if (oid >= IDIF_MAX_OID)
720 oi->oi_fid.f_seq = fid_idif_seq(oid,
721 fid_idif_ost_idx(&oi->oi_fid));
722 oi->oi_fid.f_oid = oid;
723 oi->oi_fid.f_ver = oid >> 48;
725 if (oid >= OBIF_MAX_OID)
727 oi->oi_fid.f_oid = oid;
732 /* pack any OST FID into an ostid (id/seq) for the wire/disk */
733 static inline int fid_to_ostid(const struct lu_fid *fid, struct ost_id *ostid)
737 if (fid_seq_is_igif(fid->f_seq))
740 if (fid_is_idif(fid)) {
741 ostid_set_seq_mdt0(ostid);
742 rc = ostid_set_id(ostid, fid_idif_id(fid_seq(fid),
743 fid_oid(fid), fid_ver(fid)));
745 ostid->oi_fid = *fid;
751 /* The same as osc_build_res_name() */
752 static inline void ost_fid_build_resid(const struct lu_fid *fid,
753 struct ldlm_res_id *resname)
755 if (fid_is_mdt0(fid) || fid_is_idif(fid)) {
757 oi.oi.oi_id = 0; /* gcc 4.7.2 complains otherwise */
758 if (fid_to_ostid(fid, &oi) != 0)
760 ostid_build_res_name(&oi, resname);
762 fid_build_reg_res_name(fid, resname);
766 static inline void ost_fid_from_resid(struct lu_fid *fid,
767 const struct ldlm_res_id *name,
770 if (fid_seq_is_mdt0(name->name[LUSTRE_RES_ID_VER_OID_OFF])) {
774 memset(&oi, 0, sizeof(oi));
775 ostid_set_seq(&oi, name->name[LUSTRE_RES_ID_VER_OID_OFF]);
776 if (ostid_set_id(&oi, name->name[LUSTRE_RES_ID_SEQ_OFF])) {
777 CERROR("Bad %llu to set " DOSTID "\n",
778 name->name[LUSTRE_RES_ID_SEQ_OFF], POSTID(&oi));
780 ostid_to_fid(fid, &oi, ost_idx);
783 fid_extract_from_res_name(fid, name);
788 * Flatten 128-bit FID values into a 64-bit value for use as an inode number.
789 * For non-IGIF FIDs this starts just over 2^32, and continues without
790 * conflict until 2^64, at which point we wrap the high 24 bits of the SEQ
791 * into the range where there may not be many OID values in use, to minimize
792 * the risk of conflict.
794 * Suppose LUSTRE_SEQ_MAX_WIDTH less than (1 << 24) which is currently true,
795 * the time between re-used inode numbers is very long - 2^40 SEQ numbers,
796 * or about 2^40 client mounts, if clients create less than 2^24 files/mount.
798 static inline __u64 fid_flatten(const struct lu_fid *fid)
803 if (fid_is_igif(fid)) {
804 ino = lu_igif_ino(fid);
810 ino = (seq << 24) + ((seq >> 24) & 0xffffff0000ULL) + fid_oid(fid);
812 return ino ?: fid_oid(fid);
815 static inline __u32 fid_hash(const struct lu_fid *f, int bits)
817 /* all objects with same id and different versions will belong to same
818 * collisions list. */
819 return hash_long(fid_flatten(f), bits);
823 * map fid to 32 bit value for ino on 32bit systems. */
824 static inline __u32 fid_flatten32(const struct lu_fid *fid)
829 if (fid_is_igif(fid)) {
830 ino = lu_igif_ino(fid);
834 seq = fid_seq(fid) - FID_SEQ_START;
836 /* Map the high bits of the OID into higher bits of the inode number so
837 * that inodes generated at about the same time have a reduced chance
838 * of collisions. This will give a period of 2^12 = 1024 unique clients
839 * (from SEQ) and up to min(LUSTRE_SEQ_MAX_WIDTH, 2^20) = 128k objects
840 * (from OID), or up to 128M inodes without collisions for new files. */
841 ino = ((seq & 0x000fffffULL) << 12) + ((seq >> 8) & 0xfffff000) +
842 (seq >> (64 - (40-8)) & 0xffffff00) +
843 (fid_oid(fid) & 0xff000fff) + ((fid_oid(fid) & 0x00fff000) << 8);
845 return ino ?: fid_oid(fid);
849 lu_fid_diff(const struct lu_fid *fid1, const struct lu_fid *fid2)
851 LASSERTF(fid_seq(fid1) == fid_seq(fid2), "fid1:"DFID", fid2:"DFID"\n",
852 PFID(fid1), PFID(fid2));
854 if (fid_is_idif(fid1) && fid_is_idif(fid2))
855 return fid_idif_id(fid1->f_seq, fid1->f_oid, fid1->f_ver) -
856 fid_idif_id(fid2->f_seq, fid2->f_oid, fid2->f_ver);
858 return fid_oid(fid1) - fid_oid(fid2);
861 static inline int fid_set_id(struct lu_fid *fid, u64 oid)
863 if (unlikely(fid_seq_is_igif(fid->f_seq))) {
864 CERROR("bad IGIF, "DFID"\n", PFID(fid));
868 if (fid_is_idif(fid)) {
869 if (oid >= IDIF_MAX_OID) {
870 CERROR("Too large OID %#llx to set IDIF "DFID"\n",
871 (unsigned long long)oid, PFID(fid));
874 fid->f_seq = fid_idif_seq(oid, fid_idif_ost_idx(fid));
876 fid->f_ver = oid >> 48;
878 if (oid > OBIF_MAX_OID) {
879 CERROR("Too large OID %#llx to set REG "DFID"\n",
880 (unsigned long long)oid, PFID(fid));
888 #define LUSTRE_SEQ_SRV_NAME "seq_srv"
889 #define LUSTRE_SEQ_CTL_NAME "seq_ctl"
891 /* Range common stuff */
893 range_cpu_to_le(struct lu_seq_range *dst, const struct lu_seq_range *src)
895 dst->lsr_start = cpu_to_le64(src->lsr_start);
896 dst->lsr_end = cpu_to_le64(src->lsr_end);
897 dst->lsr_index = cpu_to_le32(src->lsr_index);
898 dst->lsr_flags = cpu_to_le32(src->lsr_flags);
902 range_le_to_cpu(struct lu_seq_range *dst, const struct lu_seq_range *src)
904 dst->lsr_start = le64_to_cpu(src->lsr_start);
905 dst->lsr_end = le64_to_cpu(src->lsr_end);
906 dst->lsr_index = le32_to_cpu(src->lsr_index);
907 dst->lsr_flags = le32_to_cpu(src->lsr_flags);
911 range_cpu_to_be(struct lu_seq_range *dst, const struct lu_seq_range *src)
913 dst->lsr_start = cpu_to_be64(src->lsr_start);
914 dst->lsr_end = cpu_to_be64(src->lsr_end);
915 dst->lsr_index = cpu_to_be32(src->lsr_index);
916 dst->lsr_flags = cpu_to_be32(src->lsr_flags);
920 range_be_to_cpu(struct lu_seq_range *dst, const struct lu_seq_range *src)
922 dst->lsr_start = be64_to_cpu(src->lsr_start);
923 dst->lsr_end = be64_to_cpu(src->lsr_end);
924 dst->lsr_index = be32_to_cpu(src->lsr_index);
925 dst->lsr_flags = be32_to_cpu(src->lsr_flags);
928 static inline void range_array_cpu_to_le(struct lu_seq_range_array *dst,
929 const struct lu_seq_range_array *src)
933 for (i = 0; i < src->lsra_count; i++)
934 range_cpu_to_le(&dst->lsra_lsr[i], &src->lsra_lsr[i]);
936 dst->lsra_count = cpu_to_le32(src->lsra_count);
939 static inline void range_array_le_to_cpu(struct lu_seq_range_array *dst,
940 const struct lu_seq_range_array *src)
944 dst->lsra_count = le32_to_cpu(src->lsra_count);
945 for (i = 0; i < dst->lsra_count; i++)
946 range_le_to_cpu(&dst->lsra_lsr[i], &src->lsra_lsr[i]);
951 #endif /* __LUSTRE_FID_H */