static void ll_file_data_put(struct ll_file_data *fd)
{
- if (fd != NULL)
- OBD_SLAB_FREE_PTR(fd, ll_file_data_slab);
+ if (fd != NULL)
+ OBD_SLAB_FREE_PTR(fd, ll_file_data_slab);
}
-/**
- * Packs all the attributes into @op_data for the CLOSE rpc.
- */
+/* Packs all the attributes into @op_data for the CLOSE rpc. */
static void ll_prepare_close(struct inode *inode, struct md_op_data *op_data,
- struct obd_client_handle *och)
+ struct obd_client_handle *och)
{
ENTRY;
-
ll_prep_md_op_data(op_data, inode, NULL, NULL,
0, 0, LUSTRE_OPC_ANY, NULL);
test_and_clear_bit(LLIF_DATA_MODIFIED,
&ll_i2info(inode)->lli_flags))
/* For HSM: if inode data has been modified, pack it so that
- * MDT can set data dirty flag in the archive. */
+ * MDT can set data dirty flag in the archive.
+ */
op_data->op_bias |= MDS_DATA_MODIFIED;
EXIT;
}
-/**
+/*
* Perform a close, possibly with a bias.
* The meaning of "data" depends on the value of "bias".
*
struct md_op_data *op_data;
struct ptlrpc_request *req = NULL;
int rc;
- ENTRY;
+ ENTRY;
if (class_exp2obd(md_exp) == NULL) {
- CERROR("%s: invalid MDC connection handle closing "DFID"\n",
- ll_i2sbi(inode)->ll_fsname, PFID(&lli->lli_fid));
- GOTO(out, rc = 0);
+ rc = 0;
+ CERROR("%s: invalid MDC connection handle closing "DFID": rc = %d\n",
+ ll_i2sbi(inode)->ll_fsname, PFID(&lli->lli_fid), rc);
+ GOTO(out, rc);
}
OBD_ALLOC_PTR(op_data);
/* We leak openhandle and request here on error, but not much to be
- * done in OOM case since app won't retry close on error either. */
+ * done in OOM case since app won't retry close on error either.
+ */
if (op_data == NULL)
GOTO(out, rc = -ENOMEM);
struct obd_client_handle *och;
__u64 *och_usecount;
int rc = 0;
- ENTRY;
+ ENTRY;
if (fmode & FMODE_WRITE) {
och_p = &lli->lli_mds_write_och;
och_usecount = &lli->lli_open_fd_write_count;
mutex_lock(&lli->lli_och_mutex);
if (*och_usecount > 0) {
- /* There are still users of this handle, so skip
- * freeing it. */
+ /* There are still users of this handle, so skip freeing it */
mutex_unlock(&lli->lli_och_mutex);
RETURN(0);
}
mutex_unlock(&lli->lli_och_mutex);
if (och != NULL) {
- /* There might be a race and this handle may already
- * be closed. */
+ /* There might be race and this handle may already be closed. */
rc = ll_close_inode_openhandle(inode, och, 0, NULL);
}
struct lustre_handle lockh;
enum ldlm_mode lockmode;
int rc = 0;
- ENTRY;
+ ENTRY;
/* clear group lock, if present */
if (unlikely(fd->fd_flags & LL_FILE_GROUP_LOCKED))
ll_put_grouplock(inode, file, fd->fd_grouplock.lg_gid);
mutex_unlock(&lli->lli_och_mutex);
/* Usually the lease is not released when the
- * application crashed, we need to release here. */
+ * application crashed, we need to release here.
+ */
rc = ll_lease_close(lease_och, inode, &lease_broken);
mutex_lock(&lli->lli_och_mutex);
GOTO(out, rc);
}
- /* Let's see if we have good enough OPEN lock on the file and if
- we can skip talking to MDS */
+ /* Let's see if we have good enough OPEN lock on the file and if we can
+ * skip talking to MDS
+ */
if (fd->fd_omode & FMODE_WRITE) {
lockmode = LCK_CW;
LASSERT(lli->lli_open_fd_write_count);
mutex_unlock(&lli->lli_och_mutex);
/* LU-4398: do not cache write open lock if the file has exec bit */
- if ((lockmode == LCK_CW && inode->i_mode & S_IXUGO) ||
+ if ((lockmode == LCK_CW && inode->i_mode & 0111) ||
!md_lock_match(ll_i2mdexp(inode), flags, ll_inode2fid(inode),
LDLM_IBITS, &policy, lockmode, &lockh))
rc = ll_md_real_close(inode, fd->fd_omode);
int rc;
ENTRY;
-
CDEBUG(D_VFSTRACE|D_IOTRACE,
"START file %s:"DFID"(%p), flags %o\n",
file_dentry(file)->d_name.name,
LASSERT(fd != NULL);
/* The last ref on @file, maybe not the the owner pid of statahead,
- * because parent and child process can share the same file handle. */
+ * because parent and child process can share the same file handle.
+ */
if (S_ISDIR(inode->i_mode) &&
(lli->lli_opendir_key == fd || fd->fd_sai))
ll_deauthorize_statahead(inode, fd);
int rc;
ENTRY;
-
if (obj == NULL)
RETURN_EXIT;
vmpage = ll_read_cache_page(mapping, index + start,
ll_dom_read_folio, &lnb);
if (IS_ERR(vmpage)) {
- CWARN("%s: cannot fill page %lu for "DFID
- " with data: rc = %li\n",
+ CWARN("%s: cannot fill page %lu for "DFID" with data: rc = %li\n",
ll_i2sbi(inode)->ll_fsname, index + start,
PFID(lu_object_fid(&obj->co_lu)),
PTR_ERR(vmpage));
struct md_op_data *op_data;
struct ptlrpc_request *req = NULL;
int rc;
- ENTRY;
+ ENTRY;
LASSERT(parent != NULL);
LASSERT(itp->it_open_flags & MDS_OPEN_BY_FID);
/* if server supports open-by-fid, or file name is invalid, don't pack
- * name in open request */
+ * name in open request
+ */
if (CFS_FAIL_CHECK(OBD_FAIL_LLITE_OPEN_BY_NAME) ||
!(exp_connect_flags(sbi->ll_md_exp) & OBD_CONNECT_OPEN_BY_FID)) {
retry:
struct ll_file_data *fd, struct obd_client_handle *och)
{
struct inode *inode = file_inode(file);
- ENTRY;
+ ENTRY;
LASSERT(!file->private_data);
LASSERT(fd != NULL);
struct ll_file_data *fd;
ktime_t kstart = ktime_get();
int rc = 0;
- ENTRY;
+ ENTRY;
CDEBUG(D_VFSTRACE|D_IOTRACE,
"START file %s:"DFID"(%p), flags %o\n",
file_dentry(file)->d_name.name,
if (!it || !it->it_disposition) {
/* Convert f_flags into access mode. We cannot use file->f_mode,
- * because everything but O_ACCMODE mask was stripped from
- * there */
+ * because everything but O_ACCMODE mask was stripped from there
+ */
if ((oit.it_open_flags + 1) & O_ACCMODE)
oit.it_open_flags++;
if (file->f_flags & O_TRUNC)
oit.it_open_flags |= MDS_OPEN_OWNEROVERRIDE;
/* We do not want O_EXCL here, presumably we opened the file
- * already? XXX - NFS implications? */
+ * already? XXX - NFS implications?
+ */
oit.it_open_flags &= ~O_EXCL;
/* bug20584, if "it_open_flags" contains O_CREAT, file will be
* created if necessary, then "IT_CREAT" should be set to keep
- * consistent with it */
+ * consistent with it
+ */
if (oit.it_open_flags & O_CREAT)
oit.it_op |= IT_CREAT;
if (*och_p) { /* Open handle is present */
if (it_disposition(it, DISP_OPEN_OPEN)) {
/* Well, there's extra open request that we do not need,
- * let's close it somehow. This will decref request. */
+ * let's close it somehow. This will decref request.
+ */
rc = it_open_error(DISP_OPEN_OPEN, it);
if (rc) {
mutex_unlock(&lli->lli_och_mutex);
/* md_intent_lock() didn't get a request ref if there was an
* open error, so don't do cleanup on the request here
- * (bug 3430) */
- /* XXX (green): Should not we bail out on any error here, not
- * just open error? */
+ * (bug b=3430)
+ *
+ * XXX (green): Should not we bail out on any error here, not
+ * just open error?
+ */
rc = it_open_error(DISP_OPEN_OPEN, it);
if (rc != 0)
GOTO(out_och_free, rc);
fd = NULL;
/* Must do this outside lli_och_mutex lock to prevent deadlock where
- different kind of OPEN lock for this same inode gets cancelled
- by ldlm_cancel_lru */
+ * different kind of OPEN lock for this same inode gets cancelled by
+ * ldlm_cancel_lru
+ */
if (!S_ISREG(inode->i_mode))
GOTO(out_och_free, rc);
cl_lov_delay_create_clear(&file->f_flags);
{
int rc;
struct lustre_handle lockh;
- ENTRY;
+ ENTRY;
switch (flag) {
case LDLM_CB_BLOCKING:
ldlm_lock2handle(lock, &lockh);
RETURN(0);
}
-/**
+/*
* When setting a lease on a file, we take ownership of the lli_mds_*_och
* and save it as fd->fd_och so as to force client to reopen the file even
* if it has an open lock in cache already.
struct obd_client_handle **och_p;
__u64 *och_usecount;
int rc = 0;
- ENTRY;
+ ENTRY;
/* Get the openhandle of the file */
mutex_lock(&lli->lli_och_mutex);
if (fd->fd_lease_och != NULL)
return rc;
}
-/**
- * Release ownership on lli_mds_*_och when putting back a file lease.
- */
+/* Release ownership on lli_mds_*_och when putting back a file lease. */
static int ll_lease_och_release(struct inode *inode, struct file *file)
{
struct ll_inode_info *lli = ll_i2info(inode);
struct obd_client_handle *old_och = NULL;
__u64 *och_usecount;
int rc = 0;
- ENTRY;
+ ENTRY;
mutex_lock(&lli->lli_och_mutex);
if (file->f_mode & FMODE_WRITE) {
och_p = &lli->lli_mds_write_och;
RETURN(rc);
}
-/**
- * Acquire a lease and open the file.
- */
+/* Acquire a lease and open the file. */
static struct obd_client_handle *
ll_lease_open(struct inode *inode, struct file *file, fmode_t fmode,
__u64 open_flags)
struct obd_client_handle *och = NULL;
int rc;
int rc2;
- ENTRY;
+ ENTRY;
if (fmode != FMODE_WRITE && fmode != FMODE_READ)
RETURN(ERR_PTR(-EINVAL));
* broken;
* LDLM_FL_EXCL: Set this flag so that it won't be matched by normal
* open in ll_md_blocking_ast(). Otherwise as ll_md_blocking_lease_ast
- * doesn't deal with openhandle, so normal openhandle will be leaked. */
+ * doesn't deal with openhandle, so normal openhandle will be leaked.
+ */
LDLM_FL_NO_LRU | LDLM_FL_EXCL);
ll_finish_md_op_data(op_data);
ptlrpc_req_finished(req);
if (!it.it_lock_mode ||
!(it.it_lock_bits & MDS_INODELOCK_OPEN)) {
/* open lock must return for lease */
- CERROR(DFID "lease granted but no open lock, %d/%llu.\n",
- PFID(ll_inode2fid(inode)), it.it_lock_mode,
- it.it_lock_bits);
- GOTO(out_close, rc = -EPROTO);
+ rc = -EPROTO;
+ CERROR("%s: "DFID" lease granted but no open lock, %d/%llu: rc = %d\n",
+ sbi->ll_fsname, PFID(ll_inode2fid(inode)),
+ it.it_lock_mode, it.it_lock_bits, rc);
+ GOTO(out_close, rc);
}
ll_intent_release(&it);
RETURN(ERR_PTR(rc));
}
-/**
+/*
* Check whether a layout swap can be done between two inodes.
*
* \param[in] inode1 First inode to check
struct swap_layouts_param slp;
const struct lu_fid *fid2;
int rc;
- ENTRY;
+ ENTRY;
CDEBUG(D_INODE, "%s: biased close of file "DFID"\n",
ll_i2sbi(inode)->ll_fsname, PFID(fid1));
/* Close the file and {swap,merge} layouts between inode & inode2.
* NB: local lease handle is released in mdc_close_intent_pack()
- * because we still need it to pack l_remote_handle to MDT. */
+ * because we still need it to pack l_remote_handle to MDT.
+ */
slp.slp_inode = inode2;
slp.slp_dv1 = lsl->sl_dv1;
slp.slp_dv2 = lsl->sl_dv2;
RETURN(rc);
}
-/**
+/*
* Release lease and close the file.
* It will check if the lease has ever broken.
*/
struct ldlm_lock *lock;
bool cancelled = true;
int rc;
- ENTRY;
+ ENTRY;
lock = ldlm_handle2lock(&och->och_lease_handle);
if (lock != NULL) {
lock_res_and_lock(lock);
return ll_lease_close_intent(och, inode, lease_broken, 0, NULL);
}
-/**
- * After lease is taken, send the RPC MDS_REINT_RESYNC to the MDT
- */
+/* After lease is taken, send the RPC MDS_REINT_RESYNC to the MDT */
static int ll_lease_file_resync(struct obd_client_handle *och,
struct inode *inode, void __user *uarg)
{
struct ll_ioc_lease_id ioc;
__u64 data_version_unused;
int rc;
- ENTRY;
+ ENTRY;
op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
LUSTRE_OPC_ANY, NULL);
if (IS_ERR(op_data))
/* before starting file resync, it's necessary to clean up page cache
* in client memory, otherwise once the layout version is increased,
- * writing back cached data will be denied the OSTs. */
+ * writing back cached data will be denied the OSTs.
+ */
rc = ll_data_version(inode, &data_version_unused, LL_DV_WR_FLUSH);
if (rc)
GOTO(out, rc);
int rc = 0;
ENTRY;
-
/* Merge timestamps the most recently obtained from MDS with
* timestamps obtained from OSTs.
*
return rc;
}
-/**
+/*
* Set designated mirror for I/O.
*
* So far only read, write, and truncated can support to issue I/O to
struct ll_file_data *fd = file->private_data;
/* clear layout version for generic(non-resync) I/O in case it carries
- * stale layout version due to I/O restart */
+ * stale layout version due to I/O restart
+ */
io->ci_layout_version = 0;
/* FLR: disable non-delay for designated mirror I/O because obviously
- * only one mirror is available */
+ * only one mirror is available
+ */
if (fd->fd_designated_mirror > 0) {
io->ci_ndelay = 0;
io->ci_designated_mirror = fd->fd_designated_mirror;
file->f_path.dentry->d_name.name, io->ci_designated_mirror);
}
-/*
- * This is relatime_need_update() from Linux 5.17, which is not exported.
- */
+/* This is relatime_need_update() from Linux 5.17, which is not exported */
static int relatime_need_update(struct vfsmount *mnt, struct inode *inode,
struct timespec64 now)
{
if (!(mnt->mnt_flags & MNT_RELATIME))
return 1;
- /*
- * Is mtime younger than atime? If yes, update atime:
- */
+ /* Is mtime younger than atime? If yes, update atime: */
atime = inode_get_atime(inode);
ts = inode_get_mtime(inode);
if (timespec64_compare(&ts, &atime) >= 0)
return 1;
- /*
- * Is ctime younger than atime? If yes, update atime:
- */
+ /* Is ctime younger than atime? If yes, update atime: */
ts = inode_get_ctime(inode);
if (timespec64_compare(&ts, &atime) >= 0)
return 1;
- /*
- * Is the previous atime value older than a day? If yes,
- * update atime:
- */
+ /* Is the previous atime value older than a day? If yes, update atime */
if ((long)(now.tv_sec - atime.tv_sec) >= 24*60*60)
return 1;
- /*
- * Good, we can skip the atime update:
- */
+ /* Good, we can skip the atime update: */
return 0;
}
-/*
- * Very similar to kernel function: !__atime_needs_update()
- */
+/* Very similar to kernel function: !__atime_needs_update() */
static bool file_is_noatime(const struct file *file)
{
struct vfsmount *mnt = file->f_path.mnt;
io->ci_async_readahead = false;
/* FLR: only use non-delay I/O for read as there is only one
- * avaliable mirror for write. */
+ * avaliable mirror for write.
+ */
io->ci_ndelay = !(iot == CIT_WRITE);
/* unaligned DIO has compat issues with some older servers, but we find
* out if there are such servers while setting up the IO, so it starts
int dio_switch = false;
ENTRY;
-
/* it doesn't make sense to switch unless it's READ or WRITE */
if (iot != CIT_WRITE && iot != CIT_READ)
RETURN(false);
int rc = 0;
ENTRY;
-
CDEBUG(D_VFSTRACE, "%s: %s ppos: %llu, bytes: %zu\n",
file_dentry(file)->d_name.name,
iot == CIT_READ ? "read" : "write", *ppos, bytes);
}
restart:
- /**
+ /*
* IO block size need be aware of cached page limit, otherwise
* if we have small max_cached_mb but large block IO issued, io
* could not be finished and blocked whole client.
args->u.normal.via_iter = vio->vui_iter;
if (partial_io) {
- /**
+ /*
* Reexpand iov count because it was zero
* after IO finish.
*/
*/
else if (is_aio) /* rc == -EIOCBQUEUED */
result = 0;
- /**
+ /*
* Drop the reference held by the llite layer on this top level
* IO context.
*
return 0;
/* NB: we can't do direct IO for fast read because it will need a lock
- * to make IO engine happy. */
+ * to make IO engine happy.
+ */
if (iocb_ki_flags_check(flags, DIRECT))
return 0;
int rc = 0;
ENTRY;
-
if (!obj)
RETURN(rc);
#endif /* HAVE_DIO_ITER */
}
-/*
- * Read from a file (through the page cache).
- */
+/* Read from a file (through the page cache) */
static ssize_t do_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
struct lu_env *env;
bool stale_data = false;
ENTRY;
-
CDEBUG(D_VFSTRACE|D_IOTRACE,
"START file %s:"DFID", ppos: %lld, count: %zu\n",
file_dentry(file)->d_name.name,
cl_env_put(env, &refcheck);
if (stale_data && result > 0) {
- /**
+ /*
* we've reached EOF before the read, the data read are cached
* stale data.
*/
return do_file_read_iter(iocb, iter);
}
-/**
+/*
* Similar trick to ll_do_fast_read, this improves write speed for tiny writes.
* If a page is already in the page cache and dirty (and some other things -
* See ll_tiny_write_begin for the instantiation of these rules), then we can
ssize_t result = 0;
ENTRY;
-
/* Restrict writes to single page and < PAGE_SIZE. See comment at top
* of function for why.
*/
RETURN(result);
}
-/*
- * Write to a file (through the page cache).
- */
+/* Write to a file (through the page cache). */
static ssize_t do_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
int result;
ENTRY;
-
CDEBUG(D_VFSTRACE|D_IOTRACE,
"START file %s:"DFID", ppos: %lld, count: %zu\n",
file_dentry(file)->d_name.name,
if (!iov_iter_count(from))
GOTO(out, rc_normal = 0);
- /**
+ /*
* When PCC write failed, we usually do not fall back to the normal
* write path, just return the error. But there is a special case when
* returned error code is -ENOSPC due to running out of space on PCC HSM
*/
if (ll_sbi_has_tiny_write(ll_i2sbi(file_inode(file))) &&
!(flags &
- (ki_flag(DIRECT) | ki_flag(DSYNC) | ki_flag(SYNC) | ki_flag(APPEND))))
+ (ki_flag(DIRECT) | ki_flag(DSYNC) | ki_flag(SYNC) |
+ ki_flag(APPEND))))
rc_tiny = ll_do_tiny_write(iocb, from);
/* In case of error, go on and try normal write - Only stop if tiny
static ssize_t ll_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
{
- struct iov_iter to;
+ struct iov_iter to;
size_t iov_count;
ssize_t result;
ENTRY;
-
result = ll_file_get_iov_count(iov, &nr_segs, &iov_count, VERIFY_READ);
if (result)
RETURN(result);
static ssize_t ll_file_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
- struct iovec iov = { .iov_base = buf, .iov_len = count };
- struct kiocb kiocb;
- ssize_t result;
+ struct iovec iov = { .iov_base = buf, .iov_len = count };
+ struct kiocb kiocb;
+ ssize_t result;
ENTRY;
-
if (!count)
RETURN(0);
RETURN(result);
}
-/*
- * Write to a file (through the page cache).
- * AIO stuff
- */
+/* Write to a file (through the page cache). AIO stuff */
static ssize_t ll_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
{
ssize_t result;
ENTRY;
-
result = ll_file_get_iov_count(iov, &nr_segs, &iov_count, VERIFY_WRITE);
if (result)
RETURN(result);
{
struct iovec iov = { .iov_base = (void __user *)buf,
.iov_len = count };
- struct kiocb kiocb;
- ssize_t result;
+ struct kiocb kiocb;
+ ssize_t result;
ENTRY;
-
if (!count)
RETURN(0);
.it_open_flags = flags | MDS_OPEN_BY_FID,
};
int rc;
- ENTRY;
+ ENTRY;
if ((__swab32(lum->lmm_magic) & le32_to_cpu(LOV_MAGIC_MASK)) ==
le32_to_cpu(LOV_MAGIC_MAGIC)) {
/* this code will only exist for big-endian systems */
}
int ll_lov_getstripe_ea_info(struct inode *inode, const char *filename,
- struct lov_mds_md **lmmp, int *lmm_size,
- struct ptlrpc_request **request)
+ struct lov_mds_md **lmmp, int *lmm_size,
+ struct ptlrpc_request **request)
{
struct ll_sb_info *sbi = ll_i2sbi(inode);
struct mdt_body *body;
int rc, lmmsize;
ENTRY;
-
rc = ll_get_default_mdsize(sbi, &lmmsize);
if (rc)
RETURN(rc);
lmm->lmm_stripe_count = v1->lmm_stripe_count;
lmm->lmm_stripe_size = v1->lmm_stripe_size;
- /**
+ /*
* Return valid stripe_count and stripe_size instead of 0 for
* DoM files to avoid divide-by-zero for older userspace that
* calls this ioctl, e.g. lustre ADIO driver.
struct lov_user_md *lump;
int lum_size = sizeof(*lump) + sizeof(struct lov_user_ost_data);
int rc;
- ENTRY;
+ ENTRY;
if (!capable(CAP_SYS_ADMIN))
RETURN(-EPERM);
struct lu_env *env;
__u16 refcheck;
int rc;
- ENTRY;
+ ENTRY;
/* exit before doing any work if pointer is bad */
if (unlikely(!ll_access_ok(lum, sizeof(struct lov_user_md))))
RETURN(-EFAULT);
struct lov_user_md *klum;
int lum_size, rc;
__u64 flags = FMODE_WRITE;
- ENTRY;
+ ENTRY;
rc = ll_copy_user_md(lum, &klum);
if (rc < 0)
RETURN(rc);
struct ll_file_data *fd = file->private_data;
struct ll_grouplock grouplock;
int rc;
+
ENTRY;
if (arg == 0) {
- CWARN("group id for group lock must not be 0\n");
- RETURN(-EINVAL);
+ rc = -EINVAL;
+ CWARN("%s: group id for group lock must not be 0: rc = %d\n",
+ ll_i2sbi(inode)->ll_fsname, rc);
+ RETURN(rc);
}
if (ll_file_nolock(file))
}
if (fd->fd_flags & LL_FILE_GROUP_LOCKED) {
- CWARN("group lock already existed with gid %lu\n",
- fd->fd_grouplock.lg_gid);
- GOTO(out, rc = -EINVAL);
+ rc = -EINVAL;
+ CWARN("%s: group lock already existed with gid %lu: rc = %d\n",
+ ll_i2sbi(inode)->ll_fsname, fd->fd_grouplock.lg_gid, rc);
+ GOTO(out, rc);
}
if (arg != lli->lli_group_gid && lli->lli_group_users != 0) {
if (file->f_flags & O_NONBLOCK)
}
LASSERT(fd->fd_grouplock.lg_lock == NULL);
- /**
+ /*
* XXX: group lock needs to protect all OST objects while PFL
* can add new OST objects during the IO, so we'd instantiate
* all OST objects before getting its group lock.
static int ll_put_grouplock(struct inode *inode, struct file *file,
unsigned long arg)
{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct ll_file_data *fd = file->private_data;
- struct ll_grouplock grouplock;
- int rc;
- ENTRY;
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct ll_file_data *fd = file->private_data;
+ struct ll_grouplock grouplock;
+ int rc;
+ ENTRY;
mutex_lock(&lli->lli_group_mutex);
if (!(fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
- CWARN("no group lock held\n");
- GOTO(out, rc = -EINVAL);
+ rc = -EINVAL;
+ CWARN("%s: no group lock held: rc = %d\n",
+ ll_i2sbi(inode)->ll_fsname, rc);
+ GOTO(out, rc);
}
LASSERT(fd->fd_grouplock.lg_lock != NULL);
if (fd->fd_grouplock.lg_gid != arg) {
- CWARN("group lock %lu doesn't match current id %lu\n",
- arg, fd->fd_grouplock.lg_gid);
- GOTO(out, rc = -EINVAL);
+ rc = -EINVAL;
+ CWARN("%s: group lock %lu not match current id %lu: rc = %d\n",
+ ll_i2sbi(inode)->ll_fsname, arg, fd->fd_grouplock.lg_gid,
+ rc);
+ GOTO(out, rc);
}
grouplock = fd->fd_grouplock;
struct inode *inode = dentry->d_inode;
struct obd_client_handle *och;
int rc;
- ENTRY;
+ ENTRY;
LASSERT(inode);
/* Root ? Do nothing. */
RETURN(rc);
}
-/**
+/*
* Get size for inode for which FIEMAP mapping is requested.
* Make the FIEMAP get_info call and returns the result.
* \param fiemap kernel buffer to hold extens
static int ll_do_fiemap(struct inode *inode, struct fiemap *fiemap,
size_t num_bytes)
{
- struct lu_env *env;
- __u16 refcheck;
- int rc = 0;
- struct ll_fiemap_info_key fmkey = { .lfik_name = KEY_FIEMAP, };
- ENTRY;
+ struct lu_env *env;
+ __u16 refcheck;
+ int rc = 0;
+ struct ll_fiemap_info_key fmkey = { .lfik_name = KEY_FIEMAP, };
+ ENTRY;
/* Checks for fiemap flags */
if (fiemap->fm_flags & ~LUSTRE_FIEMAP_FLAGS_COMPAT) {
fiemap->fm_flags &= ~LUSTRE_FIEMAP_FLAGS_COMPAT;
int rc = 0;
ENTRY;
-
if (!capable(CAP_DAC_READ_SEARCH) &&
!test_bit(LL_SBI_USER_FID2PATH, ll_i2sbi(inode)->ll_flags))
RETURN(-EPERM);
int result;
ENTRY;
-
ioc->idv_version = 0;
ioc->idv_layout_version = UINT_MAX;
return rc;
}
-/*
- * Trigger a HSM release request for the provided inode.
- */
+/* Trigger a HSM release request for the provided inode. */
int ll_hsm_release(struct inode *inode)
{
struct lu_env *env;
int rc;
ENTRY;
-
CDEBUG(D_INODE, "%s: Releasing file "DFID".\n",
ll_i2sbi(inode)->ll_fsname,
PFID(&ll_i2info(inode)->lli_fid));
if (rc != 0)
GOTO(out, rc);
- /* Release the file.
- * NB: lease lock handle is released in mdc_hsm_release_pack() because
- * we still need it to pack l_remote_handle to MDT. */
+ /* Release the file. NB: lease lock handle is released in
+ * mdc_hsm_release_pack() because we still need it to pack
+ * l_remote_handle to MDT.
+ */
rc = ll_close_inode_openhandle(inode, och, MDS_HSM_RELEASE,
&data_version);
och = NULL;
static int ll_swap_layouts(struct file *file1, struct file *file2,
struct lustre_swap_layouts *lsl)
{
- struct mdc_swap_layouts msl;
- struct md_op_data *op_data;
- __u32 gid;
- __u64 dv;
- struct ll_swap_stack *llss = NULL;
- int rc;
+ struct mdc_swap_layouts msl;
+ struct md_op_data *op_data;
+ __u32 gid;
+ __u64 dv;
+ struct ll_swap_stack *llss = NULL;
+ int rc;
OBD_ALLOC_PTR(llss);
if (llss == NULL)
}
/* ultimate check, before swaping the layouts we check if
- * dataversion has changed (if requested) */
+ * dataversion has changed (if requested)
+ */
if (llss->check_dv1) {
rc = ll_data_version(llss->inode1, &dv, 0);
if (rc)
/* struct md_op_data is used to send the swap args to the mdt
* only flags is missing, so we use struct mdc_swap_layouts
- * through the md_op_data->op_data */
- /* flags from user space have to be converted before they are send to
- * server, no flag is sent today, they are only used on the client */
+ * through the md_op_data->op_data
+ *
+ * flags from user space have to be converted before they are send to
+ * server, no flag is sent today, they are only used on the client
+ */
msl.msl_flags = 0;
rc = -ENOMEM;
op_data = ll_prep_md_op_data(NULL, llss->inode1, llss->inode2, NULL, 0,
struct obd_export *exp = ll_i2mdexp(inode);
struct md_op_data *op_data;
int rc;
- ENTRY;
+ ENTRY;
/* Detect out-of range masks */
if ((hss->hss_setmask | hss->hss_clearmask) & ~HSM_FLAGS_MASK)
RETURN(-EINVAL);
/* Non-root users are forbidden to set or clear flags which are
- * NOT defined in HSM_USER_MASK. */
+ * NOT defined in HSM_USER_MASK.
+ */
if (((hss->hss_setmask | hss->hss_clearmask) & ~HSM_USER_MASK) &&
!capable(CAP_SYS_ADMIN))
RETURN(-EPERM);
static int ll_hsm_import(struct inode *inode, struct file *file,
struct hsm_user_import *hui)
{
- struct hsm_state_set *hss = NULL;
- struct iattr *attr = NULL;
- int rc;
- ENTRY;
+ struct hsm_state_set *hss = NULL;
+ struct iattr *attr = NULL;
+ int rc;
+ ENTRY;
if (!S_ISREG(inode->i_mode))
RETURN(-EINVAL);
if (attr == NULL)
GOTO(out, rc = -ENOMEM);
- attr->ia_mode = hui->hui_mode & (S_IRWXU | S_IRWXG | S_IRWXO);
+ attr->ia_mode = hui->hui_mode & (0777);
attr->ia_mode |= S_IFREG;
attr->ia_uid = make_kuid(&init_user_ns, hui->hui_uid);
attr->ia_gid = make_kgid(&init_user_ns, hui->hui_gid);
},
};
int rc;
- ENTRY;
+ ENTRY;
if (!capable(CAP_SYS_ADMIN))
RETURN(-EPERM);
__u16 refcheck;
ENTRY;
-
CDEBUG(D_VFSTRACE,
"Lock request: file=%pd, inode=%p, mode=%s start=%llu, end=%llu\n",
dentry, dentry->d_inode,
descr->cld_end = end >> PAGE_SHIFT;
descr->cld_mode = cl_mode;
/* CEF_MUST is used because we do not want to convert a
- * lockahead request to a lockless lock */
+ * lockahead request to a lockless lock
+ */
descr->cld_enq_flags = CEF_MUST | CEF_LOCK_NO_EXPAND;
if (ladvise->lla_peradvice_flags & LF_ASYNC)
* We convert them to positive values for userspace to make
* recognizing true errors easier.
* Note we can only return these detailed results on async requests,
- * as sync requests look the same as i/o requests for locking. */
+ * as sync requests look the same as i/o requests for locking.
+ */
if (result == -ECANCELED)
result = LLA_RESULT_DIFFERENT;
else if (result == -EEXIST)
struct ll_sb_info *sbi = ll_i2sbi(inode);
enum lu_ladvise_type advice = ladvise->lla_advice;
/* Note the peradvice flags is a 32 bit field, so per advice flags must
- * be in the first 32 bits of enum ladvise_flags */
+ * be in the first 32 bits of enum ladvise_flags
+ */
__u32 flags = ladvise->lla_peradvice_flags;
/* 3 lines at 80 characters per line, should be plenty */
int rc = 0;
case LU_LADVISE_LOCKNOEXPAND:
if (flags & ~LF_LOCKNOEXPAND_MASK) {
rc = -EINVAL;
- CDEBUG(D_VFSTRACE, "%s: Invalid flags (%x) for %s: "
- "rc = %d\n", sbi->ll_fsname, flags,
+ CDEBUG(D_VFSTRACE, "%s: Invalid flags (%x) for %s: rc = %d\n",
+ sbi->ll_fsname, flags,
ladvise_names[advice], rc);
GOTO(out, rc);
}
if (ladvise->lla_lockahead_mode >= MODE_MAX_USER ||
ladvise->lla_lockahead_mode == 0) {
rc = -EINVAL;
- CDEBUG(D_VFSTRACE, "%s: Invalid mode (%d) for %s: "
- "rc = %d\n", sbi->ll_fsname,
+ CDEBUG(D_VFSTRACE, "%s: Invalid mode (%d) for %s: rc = %d\n",
+ sbi->ll_fsname,
ladvise->lla_lockahead_mode,
ladvise_names[advice], rc);
GOTO(out, rc);
case LU_LADVISE_DONTNEED:
default:
/* Note fall through above - These checks apply to all advices
- * except LOCKNOEXPAND */
+ * except LOCKNOEXPAND
+ */
if (flags & ~LF_DEFAULT_MASK) {
rc = -EINVAL;
- CDEBUG(D_VFSTRACE, "%s: Invalid flags (%x) for %s: "
- "rc = %d\n", sbi->ll_fsname, flags,
+ CDEBUG(D_VFSTRACE, "%s: Invalid flags (%x) for %s: rc = %d\n",
+ sbi->ll_fsname, flags,
ladvise_names[advice], rc);
GOTO(out, rc);
}
if (ladvise->lla_start >= ladvise->lla_end) {
rc = -EINVAL;
- CDEBUG(D_VFSTRACE, "%s: Invalid range (%llu to %llu) "
- "for %s: rc = %d\n", sbi->ll_fsname,
+ CDEBUG(D_VFSTRACE, "%s: Invalid range (%llu to %llu) for %s: rc = %d\n",
+ sbi->ll_fsname,
ladvise->lla_start, ladvise->lla_end,
ladvise_names[advice], rc);
GOTO(out, rc);
struct cl_ladvise_io *lio;
int rc;
__u16 refcheck;
- ENTRY;
+ ENTRY;
env = cl_env_get(&refcheck);
if (IS_ERR(env))
RETURN(PTR_ERR(env));
struct fsxattr fsxattr;
ENTRY;
-
if (copy_from_user(&fsxattr, uarg, sizeof(fsxattr)))
RETURN(-EFAULT);
long rc, rc2 = 0;
ENTRY;
-
mutex_lock(&lli->lli_och_mutex);
if (fd->fd_lease_och != NULL) {
och = fd->fd_lease_och;
bool lease_broken;
fmode_t fmode;
long rc;
- ENTRY;
+ ENTRY;
switch (ioc->lil_mode) {
case LL_LEASE_WRLCK:
if (!(file->f_mode & FMODE_WRITE))
struct ll_file_data *fd = file->private_data;
void __user *uarg = (void __user *)arg;
int flags, rc;
- ENTRY;
+ ENTRY;
CDEBUG(D_VFSTRACE|D_IOCTL, "VFS Op:inode="DFID"(%pK) cmd=%x arg=%lx\n",
PFID(ll_inode2fid(inode)), inode, cmd, arg);
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_IOCTL, 1);
CDEBUG(D_HSM,
"HSM current state %s action %s, offset = %llu, length %llu\n",
hsm_progress_state2name(hca->hca_state), action,
- hca->hca_location.offset, hca->hca_location.length);
+ hca->hca_location.offset,
+ hca->hca_location.length);
}
if (copy_to_user(uarg, hca, sizeof(*hca)))
}
case LL_IOC_FLR_SET_MIRROR: {
/* mirror I/O must be direct to avoid polluting page cache
- * by stale data. */
+ * by stale data.
+ */
if (!(file->f_flags & O_DIRECT))
RETURN(-EINVAL);
loff_t retval;
ENTRY;
-
env = cl_env_get(&refcheck);
if (IS_ERR(env))
RETURN(PTR_ERR(env));
ktime_t kstart = ktime_get();
ENTRY;
-
CDEBUG(D_VFSTRACE|D_IOTRACE,
"START file %s:"DFID", offset: %lld, type: %s\n",
file_dentry(file)->d_name.name,
LASSERT(!S_ISDIR(inode->i_mode));
/* catch async errors that were recorded back when async writeback
- * failed for pages in this mapping. */
+ * failed for pages in this mapping.
+ */
rc = lli->lli_async_rc;
lli->lli_async_rc = 0;
if (lli->lli_clob != NULL) {
}
/* The application has been told write failure already.
- * Do not report failure again. */
+ * Do not report failure again.
+ */
if (fd->fd_write_failed)
return 0;
return rc ? -EIO : 0;
}
-/**
+/*
* Called to make sure a portion of file has been written out.
* if @mode is not CL_FSYNC_LOCAL, it will send OST_SYNC RPCs to OST.
*
struct cl_fsync_io *fio;
int result;
__u16 refcheck;
- ENTRY;
+ ENTRY;
if (mode != CL_FSYNC_NONE && mode != CL_FSYNC_LOCAL &&
mode != CL_FSYNC_DISCARD && mode != CL_FSYNC_ALL &&
mode != CL_FSYNC_RECLAIM)
int rc, err;
ENTRY;
-
CDEBUG(D_VFSTRACE,
"VFS Op:inode="DFID"(%p), start %lld, end %lld, datasync %d\n",
PFID(ll_inode2fid(inode)), inode, start, end, datasync);
/* fsync's caller has already called _fdata{sync,write}, we want
- * that IO to finish before calling the osc and mdc sync methods */
+ * that IO to finish before calling the osc and mdc sync methods
+ */
rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
/* catch async errors that were recorded back when async writeback
- * failed for pages in this mapping. */
+ * failed for pages in this mapping.
+ */
if (!S_ISDIR(inode->i_mode)) {
err = lli->lli_async_rc;
lli->lli_async_rc = 0;
__u64 flags = 0;
int rc;
int rc2 = 0;
- ENTRY;
+ ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID" file_lock=%p\n",
PFID(ll_inode2fid(inode)), file_lock);
* I guess between lockd processes) and then compares pid.
* As such we assign pid to the owner field to make it all work,
* conflict with normal locks is unlikely since pid space and
- * pointer space for current->files are not intersecting */
+ * pointer space for current->files are not intersecting
+ */
if (file_lock->fl_lmops && file_lock->fl_lmops->lm_compare_owner)
flock.l_flock.owner = (unsigned long)file_lock->C_FLC_PID;
#endif
* order to process an unlock request we need all of the same
* information that is given with a normal read or write record
* lock request. To avoid creating another ldlm unlock (cancel)
- * message we'll treat a LCK_NL flock request as an unlock. */
+ * message we'll treat a LCK_NL flock request as an unlock.
+ */
einfo.ei_mode = LCK_NL;
break;
case F_WRLCK:
}
/* Save the old mode so that if the mode in the lock changes we
- * can decrement the appropriate reader or writer refcount. */
+ * can decrement the appropriate reader or writer refcount.
+ */
file_lock->C_FLC_TYPE = einfo.ei_mode;
op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
if (IS_ERR(op_data))
RETURN(PTR_ERR(op_data));
- CDEBUG(D_DLMTRACE, "inode="DFID", pid=%u, flags=%#llx, mode=%u, "
- "start=%llu, end=%llu\n", PFID(ll_inode2fid(inode)),
+ CDEBUG(D_DLMTRACE,
+ "inode="DFID", pid=%u, flags=%#llx, mode=%u, start=%llu, end=%llu\n",
+ PFID(ll_inode2fid(inode)),
flock.l_flock.pid, flags, einfo.ei_mode,
flock.l_flock.start, flock.l_flock.end);
struct mdt_body *body;
struct ptlrpc_request *req;
int rc;
- ENTRY;
+ ENTRY;
op_data = ll_prep_md_op_data(NULL, parent, NULL, name, namelen, 0,
LUSTRE_OPC_ANY, NULL);
if (IS_ERR(op_data))
struct ptlrpc_request *request = NULL;
struct obd_client_handle *och = NULL;
struct qstr qstr;
- struct mdt_body *body;
+ struct mdt_body *body;
__u64 data_version = 0;
size_t namelen = strlen(name);
int lumlen = lmv_user_md_size(lum->lum_stripe_count, lum->lum_magic);
bool locked = false;
int rc;
- ENTRY;
+ ENTRY;
CDEBUG(D_VFSTRACE, "migrate "DFID"/%s to MDT%04x stripe count %d\n",
PFID(ll_inode2fid(parent)), name,
lum->lum_stripe_offset, lum->lum_stripe_count);
OBD_CONNECT2_DIR_MIGRATE)) {
if (le32_to_cpu(lum->lum_stripe_count) > 1 ||
ll_dir_striped(child_inode)) {
- CERROR("%s: MDT doesn't support stripe directory "
- "migration!\n", ll_i2sbi(parent)->ll_fsname);
- GOTO(out_iput, rc = -EOPNOTSUPP);
+ rc = -EOPNOTSUPP;
+ CERROR("%s: MDT doesn't support stripe directory migration!: rc = %d\n",
+ ll_i2sbi(parent)->ll_fsname, rc);
+ GOTO(out_iput, rc);
}
}
op_data->op_fid3 = *ll_inode2fid(child_inode);
if (!fid_is_sane(&op_data->op_fid3)) {
- CERROR("%s: migrate %s, but FID "DFID" is insane\n",
+ rc = -EINVAL;
+ CERROR("%s: migrate %s, but FID "DFID" is insane: rc = %d\n",
ll_i2sbi(parent)->ll_fsname, name,
- PFID(&op_data->op_fid3));
- GOTO(out_data, rc = -EINVAL);
+ PFID(&op_data->op_fid3), rc);
+ GOTO(out_data, rc);
}
op_data->op_cli_flags |= CLI_MIGRATE | CLI_SET_MEA;
LASSERT(body != NULL);
/* If the server does release layout lock, then we cleanup
- * the client och here, otherwise release it in out_close: */
+ * the client och here, otherwise release it in out_close:
+ */
if (och && body->mbo_valid & OBD_MD_CLOSE_INTENT_EXECED) {
obd_mod_put(och->och_mod);
md_clear_open_replay_data(ll_i2sbi(parent)->ll_md_exp,
ll_file_noflock(struct file *file, int cmd, struct file_lock *file_lock)
{
struct ll_file_data *fd = file->private_data;
- ENTRY;
+ ENTRY;
/*
* In order to avoid flood of warning messages, only print one message
* for one file. And the entire message rate on the client is limited
RETURN(-ENOSYS);
}
-/**
+/*
* test if some locks matching bits and l_req_mode are acquired
* - bits can be in different locks
* - if found clear the common lock bits in *bits
struct lu_fid *fid;
__u64 flags;
int i;
- ENTRY;
- if (!inode)
- RETURN(0);
+ ENTRY;
+ if (!inode)
+ RETURN(0);
- fid = &ll_i2info(inode)->lli_fid;
- CDEBUG(D_INFO, "trying to match res "DFID" mode %s\n", PFID(fid),
- ldlm_lockname[mode]);
+ fid = &ll_i2info(inode)->lli_fid;
+ CDEBUG(D_INFO, "trying to match res "DFID" mode %s\n", PFID(fid),
+ ldlm_lockname[mode]);
flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_CBPENDING | LDLM_FL_TEST_LOCK;
for (i = 0; i < MDS_INODELOCK_NUMBITS && *bits != 0; i++) {
}
}
}
- RETURN(*bits == 0);
+ RETURN(*bits == 0);
}
enum ldlm_mode ll_take_md_lock(struct inode *inode, __u64 bits,
union ldlm_policy_data policy = { .l_inodebits = { bits } };
struct lu_fid *fid;
enum ldlm_mode rc;
- ENTRY;
+ ENTRY;
fid = &ll_i2info(inode)->lli_fid;
CDEBUG(D_INFO, "trying to match res "DFID"\n", PFID(fid));
clear_nlink(inode);
/* If it is striped directory, and there is bad stripe
* Let's revalidate the dentry again, instead of returning
- * error */
+ * error
+ */
if (ll_dir_striped(inode))
return 0;
/* This path cannot be hit for regular files unless in
* case of obscure races, so no need to to validate
- * size. */
+ * size.
+ */
if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode))
return 0;
} else if (rc != 0) {
const char *name = NULL;
size_t namelen = 0;
int rc = 0;
- ENTRY;
+ ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p),name=%s\n",
PFID(ll_inode2fid(inode)), inode, dentry->d_name.name);
}
/* Unlinked? Unhash dentry, so it is not picked up later by
- * do_lookup() -> ll_revalidate_it(). We cannot use d_drop
- * here to preserve get_cwd functionality on 2.6.
- * Bug 10503 */
+ * do_lookup() -> ll_revalidate_it(). We cannot use d_drop here to
+ * preserve get_cwd functionality on 2.6. Bug 10503
+ */
if (!dentry->d_inode->i_nlink)
d_lustre_invalidate(dentry);
RETURN(rc);
}
} else {
- /* If object isn't regular a file then don't validate size. */
- /* foreign dir is not striped dir */
+ /* If object isn't regular file then don't validate size.
+ * foreign dir is not striped dir
+ */
if (!foreign) {
rc = ll_merge_md_attr(inode);
if (rc < 0)
int rc;
ENTRY;
-
env = cl_env_get(&refcheck);
if (IS_ERR(env))
RETURN(PTR_ERR(env));
static int ll_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
__u64 start, __u64 len)
{
- int rc;
- size_t num_bytes;
- struct fiemap *fiemap;
- unsigned int extent_count = fieinfo->fi_extents_max;
+ int rc;
+ size_t num_bytes;
+ struct fiemap *fiemap;
+ unsigned int extent_count = fieinfo->fi_extents_max;
num_bytes = sizeof(*fiemap) + (extent_count *
sizeof(struct fiemap_extent));
OBD_ALLOC_LARGE(fiemap, num_bytes);
-
if (fiemap == NULL)
RETURN(-ENOMEM);
ktime_t kstart = ktime_get();
ENTRY;
-
if (mask & MAY_NOT_BLOCK)
return -ECHILD;
if (unlikely(squash->rsi_uid != 0 &&
uid_eq(current_fsuid(), GLOBAL_ROOT_UID) &&
!test_bit(LL_SBI_NOROOTSQUASH, sbi->ll_flags))) {
- squash_id = true;
+ squash_id = true;
}
if (squash_id) {
CDEBUG(D_OTHER, "squash creds (%d:%d)=>(%d:%d)\n",
__kuid_val(current_fsuid()), __kgid_val(current_fsgid()),
squash->rsi_uid, squash->rsi_gid);
- /* update current process's credentials
- * and FS capability */
+ /* update current process's credentials and FS capability */
cred = prepare_creds();
if (cred == NULL)
RETURN(-ENOMEM);
struct lu_env *env;
int rc;
__u16 refcheck;
- ENTRY;
+ ENTRY;
if (obj == NULL)
RETURN(0);
/* it can only be allowed to match after layout is
* applied to inode otherwise false layout would be
* seen. Applying layout shoud happen before dropping
- * the intent lock. */
+ * the intent lock.
+ */
ldlm_lock_allow_match(lock);
rc = cl_object_layout_get(env, obj, &cl);
void *lmm;
int lmmsize;
int rc;
- ENTRY;
+ ENTRY;
CDEBUG(D_INODE, DFID" LVB_READY=%d l_lvb_data=%p l_lvb_len=%d\n",
PFID(ll_inode2fid(inode)), ldlm_is_lvb_ready(lock),
lock->l_lvb_data, lock->l_lvb_len);
* within DLM_LVB of dlm reply; otherwise if the lock was ever
* blocked and then granted via completion ast, we have to fetch
* layout here. Please note that we can't use the LVB buffer in
- * completion AST because it doesn't have a large enough buffer */
+ * completion AST because it doesn't have a large enough buffer
+ */
rc = ll_get_default_mdsize(sbi, &lmmsize);
if (rc < 0)
RETURN(rc);
return rc;
}
-/**
+/*
* Apply the layout to the inode. Layout lock is held and will be released
* in this function.
*/
int rc = 0;
bool lvb_ready;
bool wait_layout = false;
- ENTRY;
+ ENTRY;
LASSERT(lustre_handle_is_used(lockh));
lock = ldlm_handle2lock(lockh);
unlock_res_and_lock(lock);
/* checking lvb_ready is racy but this is okay. The worst case is
- * that multi processes may configure the file on the same time. */
+ * that multi processes may configure the file on the same time.
+ */
if (lvb_ready)
GOTO(out, rc = 0);
* without res lock.
*
* set layout to file. Unlikely this will fail as old layout was
- * surely eliminated */
- memset(&conf, 0, sizeof conf);
+ * surely eliminated
+ */
+ memset(&conf, 0, sizeof(conf));
conf.coc_opc = OBJECT_CONF_SET;
conf.coc_inode = inode;
conf.coc_lock = lock;
CDEBUG(D_INODE, "%s: "DFID"(%p) wait for layout reconf\n",
sbi->ll_fsname, PFID(&lli->lli_fid), inode);
- memset(&conf, 0, sizeof conf);
+ memset(&conf, 0, sizeof(conf));
conf.coc_opc = OBJECT_CONF_WAIT;
conf.coc_inode = inode;
rc = ll_layout_conf(inode, &conf);
if (rc == -ERESTARTSYS) {
__u16 refcheck;
struct lu_env *env;
- struct cl_object * obj = lli->lli_clob;
+ struct cl_object *obj = lli->lli_clob;
env = cl_env_get(&refcheck);
if (IS_ERR(env))
*/
static int ll_layout_intent(struct inode *inode, struct layout_intent *intent)
{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct md_op_data *op_data;
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ struct md_op_data *op_data;
struct lookup_intent it;
struct ptlrpc_request *req;
int rc;
- ENTRY;
+ ENTRY;
op_data = ll_prep_md_op_data(NULL, inode, inode, NULL,
0, 0, LUSTRE_OPC_ANY, NULL);
if (IS_ERR(op_data))
RETURN(rc);
}
-/**
+/*
* This function checks if there exists a LAYOUT lock on the client side,
* or enqueues it if it doesn't have one in cache.
*
enum ldlm_mode mode;
int rc;
bool try = true;
- ENTRY;
+ ENTRY;
*gen = ll_layout_version_get(lli);
if (!test_bit(LL_SBI_LAYOUT_LOCK, sbi->ll_flags) ||
*gen != CL_LAYOUT_GEN_NONE)
while (1) {
/* mostly layout lock is caching on the local side, so try to
- * match it before grabbing layout lock mutex. */
+ * match it before grabbing layout lock mutex.
+ */
mode = ll_take_md_lock(inode, MDS_INODELOCK_LAYOUT, &lockh, 0,
LCK_CR | LCK_CW | LCK_PR |
LCK_PW | LCK_EX);
.lai_extent.e_end = ext->e_end,
};
int rc;
- ENTRY;
+ ENTRY;
rc = ll_layout_intent(inode, &intent);
RETURN(rc);
}
-/**
- * This function send a restore request to the MDT
- */
+/* This function send a restore request to the MDT */
int ll_layout_restore(struct inode *inode, loff_t offset, __u64 length)
{
struct ll_inode_info *lli = ll_i2info(inode);
int len, rc;
ENTRY;
-
len = sizeof(struct hsm_user_request) +
sizeof(struct hsm_user_item);
OBD_ALLOC(hur, len);