* returned page, page hash collision has to be handled. Pages in the
* hash chain, except first one, are termed "overflow pages".
*
- * Solution to index uniqueness problem is to not cache overflow
- * pages. Instead, when page hash collision is detected, all overflow pages
- * from emerging chain are immediately requested from the server and placed in
- * a special data structure (struct ll_dir_chain). This data structure is used
- * by ll_readdir() to process entries from overflow pages. When readdir
- * invocation finishes, overflow pages are discarded. If page hash collision
- * chain weren't completely processed, next call to readdir will again detect
- * page hash collision, again read overflow pages in, process next portion of
- * entries and again discard the pages. This is not as wasteful as it looks,
- * because, given reasonable hash, page hash collisions are extremely rare.
+ * Proposed (unimplimented) solution to index uniqueness problem is to
+ * not cache overflow pages. Instead, when page hash collision is
+ * detected, all overflow pages from emerging chain should be
+ * immediately requested from the server and placed in a special data
+ * structure. This data structure can be used by ll_readdir() to
+ * process entries from overflow pages. When readdir invocation
+ * finishes, overflow pages are discarded. If page hash collision chain
+ * weren't completely processed, next call to readdir will again detect
+ * page hash collision, again read overflow pages in, process next
+ * portion of entries and again discard the pages. This is not as
+ * wasteful as it looks, because, given reasonable hash, page hash
+ * collisions are extremely rare.
*
* 1. directory positioning
*
*
*/
struct page *ll_get_dir_page(struct inode *dir, struct md_op_data *op_data,
- __u64 offset, struct ll_dir_chain *chain)
+ __u64 offset)
{
struct md_callback cb_op;
struct page *page;
bool is_api32 = ll_need_32bit_api(sbi);
bool is_hash64 = sbi->ll_flags & LL_SBI_64BIT_HASH;
struct page *page;
- struct ll_dir_chain chain;
bool done = false;
int rc = 0;
ENTRY;
- ll_dir_chain_init(&chain);
-
- page = ll_get_dir_page(inode, op_data, pos, &chain);
+ page = ll_get_dir_page(inode, op_data, pos);
while (rc == 0 && !done) {
struct lu_dirpage *dp;
lhash = hash;
fid_le_to_cpu(&fid, &ent->lde_fid);
ino = cl_fid_build_ino(&fid, is_api32);
- type = IFTODT(lu_dirent_type_get(ent));
+ type = S_DT(lu_dirent_type_get(ent));
/* For ll_nfs_get_name_filldir(), it will try to access
* 'ent' through 'lde_name', so the parameter 'name'
* for 'filldir()' must be part of the 'ent'. */
le32_to_cpu(dp->ldp_flags) &
LDF_COLLIDE);
next = pos;
- page = ll_get_dir_page(inode, op_data, pos,
- &chain);
+ page = ll_get_dir_page(inode, op_data, pos);
}
}
#ifdef HAVE_DIR_CONTEXT
#else
*ppos = pos;
#endif
- ll_dir_chain_fini(&chain);
RETURN(rc);
}
}
out_inode:
- if (inode != NULL)
- iput(inode);
+ iput(inode);
out_request:
ptlrpc_req_finished(request);
out_op_data:
case LUSTRE_Q_SETDEFAULT:
case LUSTRE_Q_SETQUOTAPOOL:
case LUSTRE_Q_SETINFOPOOL:
- if (!cfs_capable(CAP_SYS_ADMIN))
+ if (!capable(CAP_SYS_ADMIN))
RETURN(-EPERM);
break;
case Q_GETQUOTA:
case LUSTRE_Q_GETDEFAULT:
case LUSTRE_Q_GETQUOTAPOOL:
if (check_owner(type, id) &&
- (!cfs_capable(CAP_SYS_ADMIN)))
+ (!capable(CAP_SYS_ADMIN)))
RETURN(-EPERM);
break;
case Q_GETINFO:
int i, rc, *rcs = NULL;
ENTRY;
- if (!cfs_capable(CAP_DAC_READ_SEARCH) &&
+ if (!capable(CAP_DAC_READ_SEARCH) &&
!(ll_i2sbi(file_inode(file))->ll_flags & LL_SBI_USER_FID2PATH))
RETURN(-EPERM);
/* Only need to get the buflen */
if (copy_from_user(&lumv1, lumv1p, sizeof(lumv1)))
RETURN(-EFAULT);
- if (inode->i_sb->s_root == file_dentry(file))
+ if (is_root_inode(inode))
set_default = 1;
switch (lumv1.lmm_magic) {
RETURN(rc);
}
case LL_IOC_HSM_CT_START:
- if (!cfs_capable(CAP_SYS_ADMIN))
+ if (!capable(CAP_SYS_ADMIN))
RETURN(-EPERM);
rc = copy_and_ct_start(cmd, sbi->ll_md_exp,