* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2013, Intel Corporation.
+ * Copyright (c) 2011, 2014, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#define DEBUG_SUBSYSTEM S_LLITE
#include <linux/module.h>
+#include <linux/statfs.h>
#include <linux/types.h>
#include <linux/version.h>
#include <linux/mm.h>
#include <lustre_ioctl.h>
-#include <lustre_lite.h>
#include <lustre_ha.h>
#include <lustre_dlm.h>
#include <lprocfs_status.h>
/* initialize ll_cache data */
atomic_set(&sbi->ll_cache.ccc_users, 0);
sbi->ll_cache.ccc_lru_max = lru_page_max;
- atomic_set(&sbi->ll_cache.ccc_lru_left, lru_page_max);
+ atomic_long_set(&sbi->ll_cache.ccc_lru_left, lru_page_max);
spin_lock_init(&sbi->ll_cache.ccc_lru_lock);
INIT_LIST_HEAD(&sbi->ll_cache.ccc_lru);
/* turn unstable check off by default as it impacts performance */
sbi->ll_cache.ccc_unstable_check = 0;
- atomic_set(&sbi->ll_cache.ccc_unstable_nr, 0);
+ atomic_long_set(&sbi->ll_cache.ccc_unstable_nr, 0);
init_waitqueue_head(&sbi->ll_cache.ccc_unstable_waitq);
sbi->ll_ra_info.ra_max_pages_per_file = min(pages / 32,
sbi->ll_sa_max = LL_SA_RPC_DEF;
atomic_set(&sbi->ll_sa_total, 0);
atomic_set(&sbi->ll_sa_wrong, 0);
+ atomic_set(&sbi->ll_sa_running, 0);
atomic_set(&sbi->ll_agl_total, 0);
sbi->ll_flags |= LL_SBI_AGL_ENABLED;
static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
struct vfsmount *mnt)
{
- struct inode *root = 0;
+ struct inode *root = NULL;
struct ll_sb_info *sbi = ll_s2sbi(sb);
struct obd_device *obd;
struct obd_capa *oc = NULL;
struct obd_uuid *uuid;
struct md_op_data *op_data;
struct lustre_md lmd;
- obd_valid valid;
+ u64 valid;
int size, err, checksum;
ENTRY;
OBD_CONNECT_MAX_EASIZE |
OBD_CONNECT_FLOCK_DEAD |
OBD_CONNECT_DISP_STRIPE | OBD_CONNECT_LFSCK |
- OBD_CONNECT_OPEN_BY_FID;
+ OBD_CONNECT_OPEN_BY_FID |
+ OBD_CONNECT_DIR_STRIPE;
if (sbi->ll_flags & LL_SBI_SOM_PREVIEW)
data->ocd_connect_flags |= OBD_CONNECT_SOM;
RETURN(rc);
}
+/**
+ * Get the value of the default_easize parameter.
+ *
+ * \see client_obd::cl_default_mds_easize
+ *
+ * \param[in] sbi superblock info for this filesystem
+ * \param[out] lmmsize pointer to storage location for value
+ *
+ * \retval 0 on success
+ * \retval negative negated errno on failure
+ */
int ll_get_default_mdsize(struct ll_sb_info *sbi, int *lmmsize)
{
int size, rc;
RETURN(rc);
}
+/**
+ * Set the default_easize parameter to the given value.
+ *
+ * \see client_obd::cl_default_mds_easize
+ *
+ * \param[in] sbi superblock info for this filesystem
+ * \param[in] lmmsize the size to set
+ *
+ * \retval 0 on success
+ * \retval negative negated errno on failure
+ */
+int ll_set_default_mdsize(struct ll_sb_info *sbi, int lmmsize)
+{
+ int rc;
+
+ if (lmmsize < sizeof(struct lov_mds_md) ||
+ lmmsize > OBD_MAX_DEFAULT_EA_SIZE)
+ return -EINVAL;
+
+ rc = obd_set_info_async(NULL, sbi->ll_md_exp,
+ sizeof(KEY_DEFAULT_EASIZE), KEY_DEFAULT_EASIZE,
+ sizeof(int), &lmmsize, NULL);
+
+ RETURN(rc);
+}
+
int ll_get_max_cookiesize(struct ll_sb_info *sbi, int *lmmsize)
{
int size, rc;
" flags=0x%x, fsdata=%p, %d subdirs\n", dentry,
dentry->d_name.len, dentry->d_name.name,
dentry->d_parent->d_name.len, dentry->d_parent->d_name.name,
- dentry->d_parent, dentry->d_inode, d_count(dentry),
+ dentry->d_parent, dentry->d_inode, ll_d_count(dentry),
dentry->d_flags, dentry->d_fsdata, subdirs);
if (dentry->d_inode != NULL)
ll_dump_inode(dentry->d_inode);
void ll_kill_super(struct super_block *sb)
{
- struct ll_sb_info *sbi;
-
- ENTRY;
+ struct ll_sb_info *sbi;
+ ENTRY;
/* not init sb ?*/
- if (!(sb->s_flags & MS_ACTIVE))
- return;
+ if (!(sb->s_flags & MS_ACTIVE))
+ return;
- sbi = ll_s2sbi(sb);
- /* we need restore s_dev from changed for clustred NFS before put_super
- * because new kernels have cached s_dev and change sb->s_dev in
- * put_super not affected real removing devices */
+ sbi = ll_s2sbi(sb);
+ /* we need restore s_dev from changed for clustred NFS before put_super
+ * because new kernels have cached s_dev and change sb->s_dev in
+ * put_super not affected real removing devices */
if (sbi) {
sb->s_dev = sbi->ll_sdev_orig;
sbi->ll_umounting = 1;
+
+ /* wait running statahead threads to quit */
+ while (atomic_read(&sbi->ll_sa_running) > 0)
+ schedule_timeout_and_set_state(TASK_UNINTERRUPTIBLE,
+ msecs_to_jiffies(MSEC_PER_SEC >> 3));
}
+
EXIT;
}
*flags &= ~tmp;
goto next;
}
-#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 6, 51, 0)
- tmp = ll_set_opt("acl", s1, LL_SBI_ACL);
- if (tmp) {
- /* Ignore deprecated mount option. The client will
- * always try to mount with ACL support, whether this
- * is used depends on whether server supports it. */
- LCONSOLE_ERROR_MSG(0x152, "Ignoring deprecated "
- "mount option 'acl'.\n");
- goto next;
- }
- tmp = ll_set_opt("noacl", s1, LL_SBI_ACL);
+ tmp = ll_set_opt("remote_client", s1, LL_SBI_RMT_CLIENT);
if (tmp) {
- LCONSOLE_ERROR_MSG(0x152, "Ignoring deprecated "
- "mount option 'noacl'.\n");
+ *flags |= tmp;
goto next;
}
-#else
-#warning "{no}acl options have been deprecated since 1.8, please remove them"
-#endif
- tmp = ll_set_opt("remote_client", s1, LL_SBI_RMT_CLIENT);
- if (tmp) {
- *flags |= tmp;
- goto next;
- }
tmp = ll_set_opt("user_fid2path", s1, LL_SBI_USER_FID2PATH);
if (tmp) {
*flags |= tmp;
lli->lli_sai = NULL;
spin_lock_init(&lli->lli_sa_lock);
lli->lli_opendir_pid = 0;
+ lli->lli_sa_enabled = 0;
+ lli->lli_def_stripe_offset = -1;
} else {
mutex_init(&lli->lli_size_mutex);
lli->lli_symlink_name = NULL;
init_rwsem(&lli->lli_trunc_sem);
- mutex_init(&lli->lli_write_mutex);
+ range_lock_tree_init(&lli->lli_write_tree);
init_rwsem(&lli->lli_glimpse_sem);
lli->lli_glimpse_time = 0;
INIT_LIST_HEAD(&lli->lli_agl_list);
struct lustre_sb_info *lsi = s2lsi(sb);
struct ll_sb_info *sbi = ll_s2sbi(sb);
char *profilenm = get_profile_name(sb);
- int ccc_count, next, force = 1, rc = 0;
+ long ccc_count;
+ int next, force = 1, rc = 0;
ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op: sb %p - %s\n", sb, profilenm);
if (force == 0) {
struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
rc = l_wait_event(sbi->ll_cache.ccc_unstable_waitq,
- atomic_read(&sbi->ll_cache.ccc_unstable_nr) == 0,
+ atomic_long_read(&sbi->ll_cache.ccc_unstable_nr) == 0,
&lwi);
}
- ccc_count = atomic_read(&sbi->ll_cache.ccc_unstable_nr);
+ ccc_count = atomic_long_read(&sbi->ll_cache.ccc_unstable_nr);
if (force == 0 && rc != -EINTR)
- LASSERTF(ccc_count == 0, "count: %i\n", ccc_count);
+ LASSERTF(ccc_count == 0, "count: %li\n", ccc_count);
/* We need to set force before the lov_disconnect in
}
}
- /* Here is where the lsm is being initialized(fill lmo_info) after
- * client retrieve MD stripe information from MDT. */
- return md_update_lsm_md(ll_i2mdexp(inode), lsm, md->body,
- ll_md_blocking_ast);
+ return 0;
}
static inline int lli_lsm_md_eq(const struct lmv_stripe_md *lsm_md1,
struct lmv_stripe_md *old_lsm = lli->lli_lsm_md;
int idx;
- CERROR("%s: lmv layout mismatch "DFID"(%p)/"DFID"(%p)"
+ CERROR("%s: inode "DFID"(%p)'s lmv layout mismatch (%p)/(%p)"
"magic:0x%x/0x%x stripe count: %d/%d master_mdt: %d/%d"
"hash_type:0x%x/0x%x layout: 0x%x/0x%x pool:%s/%s\n",
- ll_get_fsname(inode->i_sb, NULL, 0),
- PFID(&lsm->lsm_md_master_fid), lsm,
- PFID(&old_lsm->lsm_md_master_fid), old_lsm,
+ ll_get_fsname(inode->i_sb, NULL, 0), PFID(&lli->lli_fid),
+ inode, lsm, old_lsm,
lsm->lsm_md_magic, old_lsm->lsm_md_magic,
lsm->lsm_md_stripe_count,
old_lsm->lsm_md_stripe_count,
RETURN(-EIO);
}
- rc = md_update_lsm_md(ll_i2mdexp(inode), ll_i2info(inode)->lli_lsm_md,
- md->body, ll_md_blocking_ast);
-
- RETURN(rc);
+ RETURN(0);
}
void ll_clear_inode(struct inode *inode)
EXIT;
}
-int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data,
- struct md_open_data **mod)
+static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data,
+ struct md_open_data **mod)
{
struct lustre_md md;
struct inode *inode = dentry->d_inode;
if (body->mbo_valid & OBD_MD_FLATIME) {
if (body->mbo_atime > LTIME_S(inode->i_atime))
LTIME_S(inode->i_atime) = body->mbo_atime;
- lli->lli_lvb.lvb_atime = body->mbo_atime;
+ lli->lli_atime = body->mbo_atime;
}
if (body->mbo_valid & OBD_MD_FLMTIME) {
LTIME_S(inode->i_mtime), body->mbo_mtime);
LTIME_S(inode->i_mtime) = body->mbo_mtime;
}
- lli->lli_lvb.lvb_mtime = body->mbo_mtime;
+ lli->lli_mtime = body->mbo_mtime;
}
if (body->mbo_valid & OBD_MD_FLCTIME) {
if (body->mbo_ctime > LTIME_S(inode->i_ctime))
LTIME_S(inode->i_ctime) = body->mbo_ctime;
- lli->lli_lvb.lvb_ctime = body->mbo_ctime;
+ lli->lli_ctime = body->mbo_ctime;
}
if (body->mbo_valid & OBD_MD_FLMODE)
void ll_delete_inode(struct inode *inode)
{
- struct cl_inode_info *lli = cl_i2info(inode);
+ struct ll_inode_info *lli = ll_i2info(inode);
ENTRY;
if (S_ISREG(inode->i_mode) && lli->lli_clob != NULL)
- /* discard all dirty pages before truncating them, required by
- * osc_extent implementation at LU-1030. */
- cl_sync_file_range(inode, 0, OBD_OBJECT_EOF,
- CL_FSYNC_DISCARD, 1);
-
- truncate_inode_pages(&inode->i_data, 0);
-
- /* Workaround for LU-118 */
- if (inode->i_data.nrpages) {
- spin_lock_irq(&inode->i_data.tree_lock);
- spin_unlock_irq(&inode->i_data.tree_lock);
- LASSERTF(inode->i_data.nrpages == 0,
- "inode="DFID"(%p) nrpages=%lu, see "
- "http://jira.whamcloud.com/browse/LU-118\n",
- PFID(ll_inode2fid(inode)), inode,
- inode->i_data.nrpages);
- }
- /* Workaround end */
+ /* It is last chance to write out dirty pages,
+ * otherwise we may lose data while umount */
+ cl_sync_file_range(inode, 0, OBD_OBJECT_EOF, CL_FSYNC_LOCAL, 1);
+
+ truncate_inode_pages_final(&inode->i_data);
+
+ LASSERTF(inode->i_data.nrpages == 0, "inode="DFID"(%p) nrpages=%lu, "
+ "see https://jira.hpdd.intel.com/browse/LU-118\n",
+ PFID(ll_inode2fid(inode)), inode, inode->i_data.nrpages);
#ifdef HAVE_SBOPS_EVICT_INODE
ll_clear_inode(inode);
ptlrpc_req_finished(req);
- RETURN(put_user(flags, (int *)arg));
+ RETURN(put_user(flags, (int __user *)arg));
}
case FSFILT_IOC_SETFLAGS: {
struct lov_stripe_md *lsm;
struct obd_info oinfo = { { { 0 } } };
struct md_op_data *op_data;
- if (get_user(flags, (int *)arg))
- RETURN(-EFAULT);
+ if (get_user(flags, (int __user *)arg))
+ RETURN(-EFAULT);
op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
LUSTRE_OPC_ANY, NULL);
if (IS_ERR(op_data))
RETURN(PTR_ERR(op_data));
- ((struct ll_iattr *)&op_data->op_attr)->ia_attr_flags = flags;
+ op_data->op_attr_flags = flags;
op_data->op_attr.ia_valid |= ATTR_ATTR_FLAG;
rc = md_setattr(sbi->ll_md_exp, op_data,
NULL, 0, NULL, 0, &req, NULL);
return 0;
}
+/**
+ * Cleanup the open handle that is cached on MDT-side.
+ *
+ * For open case, the client side open handling thread may hit error
+ * after the MDT grant the open. Under such case, the client should
+ * send close RPC to the MDT as cleanup; otherwise, the open handle
+ * on the MDT will be leaked there until the client umount or evicted.
+ *
+ * In further, if someone unlinked the file, because the open handle
+ * holds the reference on such file/object, then it will block the
+ * subsequent threads that want to locate such object via FID.
+ *
+ * \param[in] sb super block for this file-system
+ * \param[in] open_req pointer to the original open request
+ */
+void ll_open_cleanup(struct super_block *sb, struct ptlrpc_request *open_req)
+{
+ struct mdt_body *body;
+ struct md_op_data *op_data;
+ struct ptlrpc_request *close_req = NULL;
+ struct obd_export *exp = ll_s2sbi(sb)->ll_md_exp;
+ ENTRY;
+
+ body = req_capsule_server_get(&open_req->rq_pill, &RMF_MDT_BODY);
+ OBD_ALLOC_PTR(op_data);
+ if (op_data == NULL) {
+ CWARN("%s: cannot allocate op_data to release open handle for "
+ DFID"\n",
+ ll_get_fsname(sb, NULL, 0), PFID(&body->mbo_fid1));
+
+ RETURN_EXIT;
+ }
+
+ op_data->op_fid1 = body->mbo_fid1;
+ op_data->op_ioepoch = body->mbo_ioepoch;
+ op_data->op_handle = body->mbo_handle;
+ op_data->op_mod_time = cfs_time_current_sec();
+ md_close(exp, op_data, NULL, &close_req);
+ ptlrpc_req_finished(close_req);
+ ll_finish_md_op_data(op_data);
+
+ EXIT;
+}
+
int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req,
struct super_block *sb, struct lookup_intent *it)
{
struct ll_sb_info *sbi = NULL;
- struct lustre_md md = { 0 };
+ struct lustre_md md = { NULL };
int rc;
ENTRY;
- LASSERT(*inode || sb);
- sbi = sb ? ll_s2sbi(sb) : ll_i2sbi(*inode);
- rc = md_get_lustre_md(sbi->ll_md_exp, req, sbi->ll_dt_exp,
- sbi->ll_md_exp, &md);
- if (rc)
- RETURN(rc);
+ LASSERT(*inode || sb);
+ sbi = sb ? ll_s2sbi(sb) : ll_i2sbi(*inode);
+ rc = md_get_lustre_md(sbi->ll_md_exp, req, sbi->ll_dt_exp,
+ sbi->ll_md_exp, &md);
+ if (rc != 0)
+ GOTO(cleanup, rc);
if (*inode) {
rc = ll_update_inode(*inode, &md);
LDLM_LOCK_PUT(lock);
}
+ GOTO(out, rc = 0);
+
out:
if (md.lsm != NULL)
obd_free_memmd(sbi->ll_dt_exp, &md.lsm);
md_free_lustre_md(sbi->ll_md_exp, &md);
- RETURN(rc);
+
+cleanup:
+ if (rc != 0 && it != NULL && it->it_op & IT_OPEN)
+ ll_open_cleanup(sb != NULL ? sb : (*inode)->i_sb, req);
+
+ return rc;
}
-int ll_obd_statfs(struct inode *inode, void *arg)
+int ll_obd_statfs(struct inode *inode, void __user *arg)
{
struct ll_sb_info *sbi = NULL;
struct obd_export *exp;
char *buf = NULL;
struct obd_ioctl_data *data = NULL;
__u32 type;
- __u32 flags;
+ __u32 __user flags; /* not user, but obd_iocontrol is abused */
int len = 0, rc;
if (!inode || !(sbi = ll_i2sbi(inode)))
/* Note we have not called client_common_fill_super yet, so
proc fns must be able to handle that! */
- rc = class_process_proc_seq_param(PARAM_LLITE, lprocfs_llite_obd_vars,
- lcfg, sb);
+ rc = class_process_proc_param(PARAM_LLITE, lprocfs_llite_obd_vars,
+ lcfg, sb);
if (rc > 0)
rc = 0;
return rc;
/* this function prepares md_op_data hint for passing ot down to MD stack. */
struct md_op_data * ll_prep_md_op_data(struct md_op_data *op_data,
- struct inode *i1, struct inode *i2,
- const char *name, int namelen,
- int mode, __u32 opc, void *data)
+ struct inode *i1, struct inode *i2,
+ const char *name, size_t namelen,
+ __u32 mode, __u32 opc, void *data)
{
LASSERT(i1 != NULL);
ll_i2gids(op_data->op_suppgids, i1, i2);
op_data->op_fid1 = *ll_inode2fid(i1);
op_data->op_capa1 = ll_mdscapa_get(i1);
- if (S_ISDIR(i1->i_mode))
+ op_data->op_default_stripe_offset = -1;
+ if (S_ISDIR(i1->i_mode)) {
op_data->op_mea1 = ll_i2info(i1)->lli_lsm_md;
+ op_data->op_default_stripe_offset =
+ ll_i2info(i1)->lli_def_stripe_offset;
+ }
if (i2) {
op_data->op_fid2 = *ll_inode2fid(i2);
op_data->op_bias = 0;
op_data->op_cli_flags = 0;
if ((opc == LUSTRE_OPC_CREATE) && (name != NULL) &&
- filename_is_volatile(name, namelen, NULL))
+ filename_is_volatile(name, namelen, &op_data->op_mds)) {
op_data->op_bias |= MDS_CREATE_VOLATILE;
- op_data->op_mds = 0;
+ } else {
+ op_data->op_mds = 0;
+ }
op_data->op_data = data;
/* When called by ll_setattr_raw, file is i1. */
if (!obd)
RETURN(-ENOENT);
- if (copy_to_user((void *)arg, obd->obd_name,
+ if (copy_to_user((void __user *)arg, obd->obd_name,
strlen(obd->obd_name) + 1))
RETURN(-EFAULT);
{
char *buf, *path = NULL;
struct dentry *dentry = NULL;
- struct ccc_object *obj = cl_inode2ccc(page->mapping->host);
+ struct vvp_object *obj = cl_inode2vvp(page->mapping->host);
/* this can be called inside spin lock so use GFP_ATOMIC. */
buf = (char *)__get_free_page(GFP_ATOMIC);
"%s: dirty page discard: %s/fid: "DFID"/%s may get corrupted "
"(rc %d)\n", ll_get_fsname(page->mapping->host->i_sb, NULL, 0),
s2lsi(page->mapping->host->i_sb)->lsi_lmd->lmd_dev,
- PFID(&obj->cob_header.coh_lu.loh_fid),
+ PFID(&obj->vob_header.coh_lu.loh_fid),
(path && !IS_ERR(path)) ? path : "", ioret);
if (dentry != NULL)
free_page((unsigned long)buf);
}
+ssize_t ll_copy_user_md(const struct lov_user_md __user *md,
+ struct lov_user_md **kbuf)
+{
+ struct lov_user_md lum;
+ ssize_t lum_size;
+ ENTRY;
+
+ if (copy_from_user(&lum, md, sizeof(lum)))
+ RETURN(-EFAULT);
+
+ lum_size = ll_lov_user_md_size(&lum);
+ if (lum_size < 0)
+ RETURN(lum_size);
+
+ OBD_ALLOC(*kbuf, lum_size);
+ if (*kbuf == NULL)
+ RETURN(-ENOMEM);
+
+ if (copy_from_user(*kbuf, md, lum_size) != 0) {
+ OBD_FREE(*kbuf, lum_size);
+ RETURN(-EFAULT);
+ }
+
+ RETURN(lum_size);
+}
+
/*
* Compute llite root squash state after a change of root squash
* configuration setting or add/remove of a lnet nid
up_write(&squash->rsi_sem);
}
+/**
+ * Parse linkea content to extract information about a given hardlink
+ *
+ * \param[in] ldata - Initialized linkea data
+ * \param[in] linkno - Link identifier
+ * \param[out] parent_fid - The entry's parent FID
+ * \param[out] ln - Entry name destination buffer
+ *
+ * \retval 0 on success
+ * \retval Appropriate negative error code on failure
+ */
+static int ll_linkea_decode(struct linkea_data *ldata, unsigned int linkno,
+ struct lu_fid *parent_fid, struct lu_name *ln)
+{
+ unsigned int idx;
+ int rc;
+ ENTRY;
+
+ rc = linkea_init(ldata);
+ if (rc < 0)
+ RETURN(rc);
+
+ if (linkno >= ldata->ld_leh->leh_reccount)
+ /* beyond last link */
+ RETURN(-ENODATA);
+
+ linkea_first_entry(ldata);
+ for (idx = 0; ldata->ld_lee != NULL; idx++) {
+ linkea_entry_unpack(ldata->ld_lee, &ldata->ld_reclen, ln,
+ parent_fid);
+ if (idx == linkno)
+ break;
+
+ linkea_next_entry(ldata);
+ }
+
+ if (idx < linkno)
+ RETURN(-ENODATA);
+
+ RETURN(0);
+}
+
+/**
+ * Get parent FID and name of an identified link. Operation is performed for
+ * a given link number, letting the caller iterate over linkno to list one or
+ * all links of an entry.
+ *
+ * \param[in] file - File descriptor against which to perform the operation
+ * \param[in,out] arg - User-filled structure containing the linkno to operate
+ * on and the available size. It is eventually filled with
+ * the requested information or left untouched on error
+ *
+ * \retval - 0 on success
+ * \retval - Appropriate negative error code on failure
+ */
+int ll_getparent(struct file *file, struct getparent __user *arg)
+{
+ struct dentry *dentry = file->f_dentry;
+ struct inode *inode = file->f_dentry->d_inode;
+ struct linkea_data *ldata;
+ struct lu_buf buf = LU_BUF_NULL;
+ struct lu_name ln;
+ struct lu_fid parent_fid;
+ __u32 linkno;
+ __u32 name_size;
+ int rc;
+
+ ENTRY;
+
+ if (!cfs_capable(CFS_CAP_DAC_READ_SEARCH) &&
+ !(ll_i2sbi(inode)->ll_flags & LL_SBI_USER_FID2PATH))
+ RETURN(-EPERM);
+
+ if (get_user(name_size, &arg->gp_name_size))
+ RETURN(-EFAULT);
+
+ if (get_user(linkno, &arg->gp_linkno))
+ RETURN(-EFAULT);
+
+ if (name_size > PATH_MAX)
+ RETURN(-EINVAL);
+
+ OBD_ALLOC(ldata, sizeof(*ldata));
+ if (ldata == NULL)
+ RETURN(-ENOMEM);
+
+ rc = linkea_data_new(ldata, &buf);
+ if (rc < 0)
+ GOTO(ldata_free, rc);
+
+ rc = ll_getxattr(dentry, XATTR_NAME_LINK, buf.lb_buf, buf.lb_len);
+ if (rc < 0)
+ GOTO(lb_free, rc);
+ rc = ll_linkea_decode(ldata, linkno, &parent_fid, &ln);
+ if (rc < 0)
+ GOTO(lb_free, rc);
+
+ if (ln.ln_namelen >= name_size)
+ GOTO(lb_free, rc = -EOVERFLOW);
+
+ if (copy_to_user(&arg->gp_fid, &parent_fid, sizeof(arg->gp_fid)))
+ GOTO(lb_free, rc = -EFAULT);
+
+ if (copy_to_user(&arg->gp_name, ln.ln_name, ln.ln_namelen))
+ GOTO(lb_free, rc = -EFAULT);
+
+ if (put_user('\0', arg->gp_name + ln.ln_namelen))
+ GOTO(lb_free, rc = -EFAULT);
+
+lb_free:
+ lu_buf_free(&buf);
+ldata_free:
+ OBD_FREE(ldata, sizeof(*ldata));
+
+ RETURN(rc);
+}