/*
* This is how may FIDs may be allocated in one sequence.
*/
- LUSTRE_SEQ_MAX_WIDTH = 0x0000000000004000ULL,
+ LUSTRE_SEQ_MAX_WIDTH = 0x0000000000020000ULL,
};
enum lu_cli_type {
static int ll_readdir_20(struct file *filp, void *cookie, filldir_t filldir)
{
struct inode *inode = filp->f_dentry->d_inode;
- struct ll_sb_info *sbi = ll_i2sbi(inode);
__u64 pos = filp->f_pos;
struct page *page;
struct ll_dir_chain chain;
char *name;
int namelen;
struct lu_fid fid;
- ino_t ino;
+ __u64 ino;
hash = le64_to_cpu(ent->lde_hash);
namelen = le16_to_cpu(ent->lde_namelen);
fid = ent->lde_fid;
name = ent->lde_name;
fid_le_to_cpu(&fid, &fid);
- ino = ll_fid_build_ino(sbi, (struct ll_fid*)&fid);
+ if (cfs_curproc_is_32bit())
+ ino = ll_fid_build_ino32((struct ll_fid *)&fid);
+ else
+ ino = ll_fid_build_ino((struct ll_fid *)&fid);
+
type = ll_dirent_type_get(ent);
done = filldir(cookie, name, namelen,
(loff_t)hash, ino, type);
struct lookup_intent *it, struct kstat *stat)
{
struct inode *inode = de->d_inode;
+ struct ll_inode_info *lli = ll_i2info(inode);
int res = 0;
res = ll_inode_revalidate_it(de, it);
return res;
stat->dev = inode->i_sb->s_dev;
- stat->ino = inode->i_ino;
+ if (cfs_curproc_is_32bit())
+ stat->ino = ll_fid_build_ino32((struct ll_fid *)&lli->lli_fid);
+ else
+ stat->ino = inode->i_ino;
stat->mode = inode->i_mode;
stat->nlink = inode->i_nlink;
stat->uid = inode->i_uid;
void *ll_iocontrol_register(llioc_callback_t cb, int count, unsigned int *cmd);
void ll_iocontrol_unregister(void *magic);
-ino_t ll_fid_build_ino(struct ll_sb_info *sbi,
- struct ll_fid *fid);
+__u64 ll_fid_build_ino(const struct ll_fid *fid);
+__u32 ll_fid_build_ino32(const struct ll_fid *fid);
__u32 ll_fid_build_gen(struct ll_sb_info *sbi,
struct ll_fid *fid);
}
LASSERT(sbi->ll_rootino != 0);
- root = ll_iget(sb, ll_fid_build_ino(sbi, &rootfid), &md);
+ root = ll_iget(sb, ll_fid_build_ino(&rootfid), &md);
ptlrpc_req_finished(request);
}
#endif
- inode->i_ino = ll_fid_build_ino(sbi, &body->fid1);
+ inode->i_ino = ll_fid_build_ino(&body->fid1);
inode->i_generation = ll_fid_build_gen(sbi, &body->fid1);
*ll_inode_lu_fid(inode) = *((struct lu_fid*)&md->body->fid1);
/** hashing VFS inode by FIDs.
* IGIF will be used for for compatibility if needed.
*/
- *inode =ll_iget(sb, ll_fid_build_ino(sbi, &md.body->fid1), &md);
+ *inode =ll_iget(sb, ll_fid_build_ino(&md.body->fid1), &md);
if (*inode == NULL || is_bad_inode(*inode)) {
mdc_free_lustre_md(exp, &md);
rc = -ENOMEM;
RETURN(0);
}
-/* Get an inode by inode number (already instantiated by the intent lookup).
- * Returns inode or NULL
- */
-
+/**
+ * Flatten 128-bit FID values into a 64-bit value for
+ * use as an inode number. For non-IGIF FIDs this
+ * starts just over 2^32, and continues without conflict
+ * until 2^64, at which point we wrap the high 32 bits
+ * of the SEQ into the range where there may not be many
+ * OID values in use, to minimize the risk of conflict.
+ *
+ * The time between re-used inode numbers is very long -
+ * 2^32 SEQ numbers, or about 2^32 client mounts. */
static inline __u64 fid_flatten(const struct lu_fid *fid)
-{
- return (fid_seq(fid) - 1) * LUSTRE_SEQ_MAX_WIDTH + fid_oid(fid);
+{
+ __u64 ino;
+ __u64 seq;
+
+ if (fid_is_igif(fid)) {
+ ino = lu_igif_ino(fid);
+ RETURN(ino);
+ }
+
+ seq = fid_seq(fid);
+
+ ino = (seq << 24) + ((seq >> (64-8)) & 0xffffff0000ULL) + fid_oid(fid);
+
+ RETURN(ino ? ino : fid_oid(fid));
}
-/* Build inode number on passed @fid */
-ino_t ll_fid_build_ino(struct ll_sb_info *sbi,
- struct ll_fid *fid)
+
+/**
+ * map fid to 32 bit value for ino on 32bit systems. */
+static inline __u32 fid_flatten32(const struct lu_fid *fid)
{
- ino_t ino;
- ENTRY;
+ __u32 ino;
+ __u64 seq;
- if (fid_is_igif((struct lu_fid*)fid)) {
- ino = lu_igif_ino((struct lu_fid*)fid);
+ if (fid_is_igif(fid)) {
+ ino = lu_igif_ino(fid);
RETURN(ino);
}
+ seq = fid_seq(fid) - FID_SEQ_START;
+
/*
- * Very stupid and having many downsides inode allocation algorithm
- * based on fid.
- */
- ino = fid_flatten((struct lu_fid*)fid) & 0xFFFFFFFF;
+ map the high bits of the OID into higher bits of the inode number so that
+ inodes generated at about the same time have a reduced chance of collisions.
+ This will give a period of 1024 clients and 128 k = 128M inodes without collisions.
+ */
+
+ ino = ((seq & 0x000fffffULL) << 12) + ((seq >> 8) & 0xfffff000) +
+ (seq >> (64 - (40-8)) & 0xffffff00) +
+ (fid_oid(fid) & 0xff000fff) + ((fid_oid(fid) & 0x00fff000) << 16);
- if (unlikely(ino == 0))
- /* the first result ino is 0xFFC001, so this is rarely used */
- ino = 0xffbcde;
- ino = ino | 0x80000000;
- RETURN(ino);
+ RETURN(ino ? ino : fid_oid(fid));
+}
+/**
+ * for 32 bit inode numbers directly map seq+oid to 32bit number.
+ */
+__u32 ll_fid_build_ino32(const struct ll_fid *fid)
+{
+ RETURN(fid_flatten32((struct lu_fid *)fid));
+}
+
+/**
+ * build inode number from passed @fid */
+__u64 ll_fid_build_ino(const struct ll_fid *fid)
+{
+#if BITS_PER_LONG == 32
+ RETURN(fid_flatten32((struct lu_fid *)fid));
+#else
+ RETURN(fid_flatten((struct lu_fid *)fid));
+#endif
}
__u32 ll_fid_build_gen(struct ll_sb_info *sbi, struct ll_fid *fid)