1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
30 * Use is subject to license terms.
32 * Copyright (c) 2011, 2012, Whamcloud, Inc.
35 * This file is part of Lustre, http://www.lustre.org/
36 * Lustre is a trademark of Sun Microsystems, Inc.
38 * lustre/include/lustre_fid.h
40 * Author: Yury Umanets <umka@clusterfs.com>
54 #include <libcfs/libcfs.h>
55 #include <lustre/lustre_idl.h>
56 #include <lustre_req_layout.h>
57 #include <lustre_mdt.h>
63 /* Whole sequences space range and zero range definitions */
64 extern const struct lu_seq_range LUSTRE_SEQ_SPACE_RANGE;
65 extern const struct lu_seq_range LUSTRE_SEQ_ZERO_RANGE;
66 extern const struct lu_fid LUSTRE_BFL_FID;
67 extern const struct lu_fid LU_OBF_FID;
68 extern const struct lu_fid LU_DOT_LUSTRE_FID;
72 * This is how may FIDs may be allocated in one sequence(128k)
74 LUSTRE_SEQ_MAX_WIDTH = 0x0000000000020000ULL,
77 * How many sequences to allocate to a client at once.
79 LUSTRE_SEQ_META_WIDTH = 0x0000000000000001ULL,
82 * seq allocation pool size.
84 LUSTRE_SEQ_BATCH_WIDTH = LUSTRE_SEQ_META_WIDTH * 1000,
87 * This is how many sequences may be in one super-sequence allocated to
90 LUSTRE_SEQ_SUPER_WIDTH = ((1ULL << 30ULL) * LUSTRE_SEQ_META_WIDTH)
94 /** 2^6 FIDs for OI containers */
95 OSD_OI_FID_OID_BITS = 6,
96 /** reserve enough FIDs in case we want more in the future */
97 OSD_OI_FID_OID_BITS_MAX = 10,
100 /** special OID for local objects */
102 /** \see fld_mod_init */
104 /** \see fid_mod_init */
105 FID_SEQ_CTL_OID = 4UL,
106 FID_SEQ_SRV_OID = 5UL,
107 /** \see mdd_mod_init */
108 MDD_ROOT_INDEX_OID = 6UL,
109 MDD_ORPHAN_OID = 7UL,
110 MDD_LOV_OBJ_OID = 8UL,
111 MDD_CAPA_KEYS_OID = 9UL,
112 /** \see mdt_mod_init */
113 MDT_LAST_RECV_OID = 11UL,
114 OSD_FS_ROOT_OID = 13UL,
115 ACCT_USER_OID = 15UL,
116 ACCT_GROUP_OID = 16UL,
117 OFD_LAST_RECV_OID = 19UL,
118 OFD_GROUP0_LAST_OID = 20UL,
119 OFD_GROUP4K_LAST_OID = 20UL+4096,
120 OFD_LAST_GROUP_OID = 4117UL,
121 LLOG_CATALOGS_OID = 4118UL,
122 MGS_CONFIGS_OID = 4119UL,
123 OFD_HEALTH_CHECK_OID = 4120UL,
126 static inline void lu_local_obj_fid(struct lu_fid *fid, __u32 oid)
128 fid->f_seq = FID_SEQ_LOCAL_FILE;
135 LUSTRE_SEQ_CONTROLLER
143 struct lu_server_seq;
145 /* Client sequence manager interface. */
146 struct lu_client_seq {
147 /* Sequence-controller export. */
148 struct obd_export *lcs_exp;
149 cfs_mutex_t lcs_mutex;
152 * Range of allowed for allocation sequeces. When using lu_client_seq on
153 * clients, this contains meta-sequence range. And for servers this
154 * contains super-sequence range.
156 struct lu_seq_range lcs_space;
158 /* Seq related proc */
159 cfs_proc_dir_entry_t *lcs_proc_dir;
161 /* This holds last allocated fid in last obtained seq */
162 struct lu_fid lcs_fid;
164 /* LUSTRE_SEQ_METADATA or LUSTRE_SEQ_DATA */
165 enum lu_cli_type lcs_type;
168 * Service uuid, passed from MDT + seq name to form unique seq name to
169 * use it with procfs.
174 * Sequence width, that is how many objects may be allocated in one
175 * sequence. Default value for it is LUSTRE_SEQ_MAX_WIDTH.
179 /* Seq-server for direct talking */
180 struct lu_server_seq *lcs_srv;
182 /* wait queue for fid allocation and update indicator */
183 cfs_waitq_t lcs_waitq;
187 /* server sequence manager interface */
188 struct lu_server_seq {
189 /* Available sequences space */
190 struct lu_seq_range lss_space;
192 /* keeps highwater in lsr_end for seq allocation algorithm */
193 struct lu_seq_range lss_lowater_set;
194 struct lu_seq_range lss_hiwater_set;
197 * Device for server side seq manager needs (saving sequences to backing
200 struct dt_device *lss_dev;
202 /* /seq file object device */
203 struct dt_object *lss_obj;
205 /* Seq related proc */
206 cfs_proc_dir_entry_t *lss_proc_dir;
208 /* LUSTRE_SEQ_SERVER or LUSTRE_SEQ_CONTROLLER */
209 enum lu_mgr_type lss_type;
211 /* Client interafce to request controller */
212 struct lu_client_seq *lss_cli;
214 /* Mutex for protecting allocation */
215 cfs_mutex_t lss_mutex;
218 * Service uuid, passed from MDT + seq name to form unique seq name to
219 * use it with procfs.
224 * Allocation chunks for super and meta sequences. Default values are
225 * LUSTRE_SEQ_SUPER_WIDTH and LUSTRE_SEQ_META_WIDTH.
230 * minimum lss_alloc_set size that should be allocated from
235 /* sync is needed for update operation */
238 * Pointer to site object, required to access site fld.
240 struct md_site *lss_site;
243 int seq_query(struct com_thread_info *info);
246 int seq_server_init(struct lu_server_seq *seq,
247 struct dt_device *dev,
249 enum lu_mgr_type type,
251 const struct lu_env *env);
253 void seq_server_fini(struct lu_server_seq *seq,
254 const struct lu_env *env);
256 int seq_server_alloc_super(struct lu_server_seq *seq,
257 struct lu_seq_range *out,
258 const struct lu_env *env);
260 int seq_server_alloc_meta(struct lu_server_seq *seq,
261 struct lu_seq_range *out,
262 const struct lu_env *env);
264 int seq_server_set_cli(struct lu_server_seq *seq,
265 struct lu_client_seq *cli,
266 const struct lu_env *env);
269 int seq_client_init(struct lu_client_seq *seq,
270 struct obd_export *exp,
271 enum lu_cli_type type,
273 struct lu_server_seq *srv);
275 void seq_client_fini(struct lu_client_seq *seq);
277 void seq_client_flush(struct lu_client_seq *seq);
279 int seq_client_alloc_fid(const struct lu_env *env, struct lu_client_seq *seq,
281 int seq_client_get_seq(const struct lu_env *env, struct lu_client_seq *seq,
284 /* Fids common stuff */
285 int fid_is_local(const struct lu_env *env,
286 struct lu_site *site, const struct lu_fid *fid);
290 struct ldlm_namespace;
293 * Build (DLM) resource name from FID.
295 * NOTE: until Lustre 1.8.7/2.1.1 the fid_ver() was packed into name[2],
296 * but was moved into name[1] along with the OID to avoid consuming the
297 * renaming name[2,3] fields that need to be used for the quota identifier.
299 static inline struct ldlm_res_id *
300 fid_build_reg_res_name(const struct lu_fid *f,
301 struct ldlm_res_id *name)
303 memset(name, 0, sizeof *name);
304 name->name[LUSTRE_RES_ID_SEQ_OFF] = fid_seq(f);
305 name->name[LUSTRE_RES_ID_VER_OID_OFF] = fid_ver_oid(f);
310 * Return true if resource is for object identified by fid.
312 static inline int fid_res_name_eq(const struct lu_fid *f,
313 const struct ldlm_res_id *name)
315 return name->name[LUSTRE_RES_ID_SEQ_OFF] == fid_seq(f) &&
316 name->name[LUSTRE_RES_ID_VER_OID_OFF] == fid_ver_oid(f);
320 static inline struct ldlm_res_id *
321 fid_build_pdo_res_name(const struct lu_fid *f,
323 struct ldlm_res_id *name)
325 fid_build_reg_res_name(f, name);
326 name->name[LUSTRE_RES_ID_HSH_OFF] = hash;
332 * Flatten 128-bit FID values into a 64-bit value for use as an inode number.
333 * For non-IGIF FIDs this starts just over 2^32, and continues without
334 * conflict until 2^64, at which point we wrap the high 24 bits of the SEQ
335 * into the range where there may not be many OID values in use, to minimize
336 * the risk of conflict.
338 * Suppose LUSTRE_SEQ_MAX_WIDTH less than (1 << 24) which is currently true,
339 * the time between re-used inode numbers is very long - 2^40 SEQ numbers,
340 * or about 2^40 client mounts, if clients create less than 2^24 files/mount.
342 static inline __u64 fid_flatten(const struct lu_fid *fid)
347 if (fid_is_igif(fid)) {
348 ino = lu_igif_ino(fid);
354 ino = (seq << 24) + ((seq >> 24) & 0xffffff0000ULL) + fid_oid(fid);
356 RETURN(ino ? ino : fid_oid(fid));
359 static inline __u32 fid_hash(const struct lu_fid *f, int bits)
361 /* all objects with same id and different versions will belong to same
362 * collisions list. */
363 return cfs_hash_long(fid_flatten(f), bits);
367 * map fid to 32 bit value for ino on 32bit systems. */
368 static inline __u32 fid_flatten32(const struct lu_fid *fid)
373 if (fid_is_igif(fid)) {
374 ino = lu_igif_ino(fid);
378 seq = fid_seq(fid) - FID_SEQ_START;
380 /* Map the high bits of the OID into higher bits of the inode number so
381 * that inodes generated at about the same time have a reduced chance
382 * of collisions. This will give a period of 2^12 = 1024 unique clients
383 * (from SEQ) and up to min(LUSTRE_SEQ_MAX_WIDTH, 2^20) = 128k objects
384 * (from OID), or up to 128M inodes without collisions for new files. */
385 ino = ((seq & 0x000fffffULL) << 12) + ((seq >> 8) & 0xfffff000) +
386 (seq >> (64 - (40-8)) & 0xffffff00) +
387 (fid_oid(fid) & 0xff000fff) + ((fid_oid(fid) & 0x00fff000) << 8);
389 RETURN(ino ? ino : fid_oid(fid));
392 #define LUSTRE_SEQ_SRV_NAME "seq_srv"
393 #define LUSTRE_SEQ_CTL_NAME "seq_ctl"
395 /* Range common stuff */
396 static inline void range_cpu_to_le(struct lu_seq_range *dst, const struct lu_seq_range *src)
398 dst->lsr_start = cpu_to_le64(src->lsr_start);
399 dst->lsr_end = cpu_to_le64(src->lsr_end);
400 dst->lsr_index = cpu_to_le32(src->lsr_index);
401 dst->lsr_flags = cpu_to_le32(src->lsr_flags);
404 static inline void range_le_to_cpu(struct lu_seq_range *dst, const struct lu_seq_range *src)
406 dst->lsr_start = le64_to_cpu(src->lsr_start);
407 dst->lsr_end = le64_to_cpu(src->lsr_end);
408 dst->lsr_index = le32_to_cpu(src->lsr_index);
409 dst->lsr_flags = le32_to_cpu(src->lsr_flags);
412 static inline void range_cpu_to_be(struct lu_seq_range *dst, const struct lu_seq_range *src)
414 dst->lsr_start = cpu_to_be64(src->lsr_start);
415 dst->lsr_end = cpu_to_be64(src->lsr_end);
416 dst->lsr_index = cpu_to_be32(src->lsr_index);
417 dst->lsr_flags = cpu_to_be32(src->lsr_flags);
420 static inline void range_be_to_cpu(struct lu_seq_range *dst, const struct lu_seq_range *src)
422 dst->lsr_start = be64_to_cpu(src->lsr_start);
423 dst->lsr_end = be64_to_cpu(src->lsr_end);
424 dst->lsr_index = be32_to_cpu(src->lsr_index);
425 dst->lsr_flags = be32_to_cpu(src->lsr_flags);
430 #endif /* __LINUX_FID_H */