sbi->ll_rw_stats_on = 0;
sbi->ll_statfs_max_age = OBD_STATFS_CACHE_SECONDS;
- si_meminfo(&si);
- pages = si.totalram - si.totalhigh;
+ si_meminfo(&si);
+ pages = si.totalram - si.totalhigh;
lru_page_max = pages / 2;
sbi->ll_ra_info.ra_async_max_active = ll_get_ra_async_max_active();
static void ll_free_sbi(struct super_block *sb)
{
struct ll_sb_info *sbi = ll_s2sbi(sb);
+
ENTRY;
if (sbi != NULL) {
ENTRY;
sbi->ll_md_obd = class_name2obd(md);
if (!sbi->ll_md_obd) {
- CERROR("MD %s: not setup or attached\n", md);
- RETURN(-EINVAL);
- }
+ CERROR("%s: not setup or attached: rc = %d\n", md, -EINVAL);
+ RETURN(-EINVAL);
+ }
- OBD_ALLOC_PTR(data);
- if (data == NULL)
- RETURN(-ENOMEM);
+ OBD_ALLOC_PTR(data);
+ if (data == NULL)
+ RETURN(-ENOMEM);
- OBD_ALLOC_PTR(osfs);
- if (osfs == NULL) {
- OBD_FREE_PTR(data);
- RETURN(-ENOMEM);
- }
+ OBD_ALLOC_PTR(osfs);
+ if (osfs == NULL) {
+ OBD_FREE_PTR(data);
+ RETURN(-ENOMEM);
+ }
/* pass client page size via ocd_grant_blkbits, the server should report
- * back its backend blocksize for grant calculation purpose */
+ * back its backend blocksize for grant calculation purpose
+ */
data->ocd_grant_blkbits = PAGE_SHIFT;
/* indicate MDT features supported by this client */
#ifdef HAVE_LRU_RESIZE_SUPPORT
if (test_bit(LL_SBI_LRU_RESIZE, sbi->ll_flags))
- data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
+ data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
#endif
data->ocd_connect_flags |= OBD_CONNECT_ACL_FLAGS;
if (CFS_FAIL_CHECK(OBD_FAIL_MDC_LIGHTWEIGHT))
/* flag mdc connection as lightweight, only used for test
- * purpose, use with care */
+ * purpose, use with care
+ */
data->ocd_connect_flags |= OBD_CONNECT_LIGHTWEIGHT;
data->ocd_ibits_known = MDS_INODELOCK_FULL;
err = obd_connect(NULL, &sbi->ll_md_exp, sbi->ll_md_obd,
&sbi->ll_sb_uuid, data, sbi->ll_cache);
if (err == -EBUSY) {
- LCONSOLE_ERROR_MSG(0x14f, "An MDT (md %s) is performing "
- "recovery, of which this client is not a "
- "part. Please wait for recovery to complete,"
- " abort, or time out.\n", md);
+ LCONSOLE_ERROR_MSG(0x14f,
+ "An MDT (md %s) is performing recovery, of which this client is not a part. Please wait for recovery to complete, abort, or time out.\n",
+ md);
GOTO(out, err);
} else if (err) {
CERROR("cannot connect to %s: rc = %d\n", md, err);
err = obd_fid_init(sbi->ll_md_exp->exp_obd, sbi->ll_md_exp,
LUSTRE_SEQ_METADATA);
if (err) {
- CERROR("%s: Can't init metadata layer FID infrastructure, "
- "rc = %d\n", sbi->ll_md_exp->exp_obd->obd_name, err);
+ CERROR("%s: Can't init metadata layer FID infrastructure: rc = %d\n",
+ sbi->ll_md_exp->exp_obd->obd_name, err);
GOTO(out_md, err);
}
/* For mount, we only need fs info from MDT0, and also in DNE, it
- * can make sure the client can be mounted as long as MDT0 is
- * avaible */
+ * can make sure the client can be mounted as long as MDT0 is avaible
+ */
err = obd_statfs(NULL, sbi->ll_md_exp, osfs,
ktime_get_seconds() - sbi->ll_statfs_max_age,
OBD_STATFS_FOR_MDT0);
* we can access the MDC export directly and exp_connect_flags will
* be non-zero, but if accessing an upgraded 2.1 server it will
* have the correct flags filled in.
- * XXX: fill in the LMV exp_connect_flags from MDC(s). */
+ * XXX: fill in the LMV exp_connect_flags from MDC(s).
+ */
valid = exp_connect_flags(sbi->ll_md_exp) & CLIENT_CONNECT_MDT_REQD;
if (exp_connect_flags(sbi->ll_md_exp) != 0 &&
valid != CLIENT_CONNECT_MDT_REQD) {
OBD_ALLOC_WAIT(buf, PAGE_SIZE);
obd_connect_flags2str(buf, PAGE_SIZE,
valid ^ CLIENT_CONNECT_MDT_REQD, 0, ",");
- LCONSOLE_ERROR_MSG(0x170, "Server %s does not support "
- "feature(s) needed for correct operation "
- "of this client (%s). Please upgrade "
- "server or downgrade client.\n",
+ LCONSOLE_ERROR_MSG(0x170,
+ "Server %s does not support feature(s) needed for correct operation of this client (%s). Please upgrade server or downgrade client.\n",
sbi->ll_md_exp->exp_obd->obd_name, buf);
OBD_FREE(buf, PAGE_SIZE);
GOTO(out_md_fid, err = -EPROTO);
if (test_bit(LL_SBI_USER_XATTR, sbi->ll_flags) &&
!(data->ocd_connect_flags & OBD_CONNECT_XATTR)) {
- LCONSOLE_INFO("Disabling user_xattr feature because "
- "it is not supported on the server\n");
+ LCONSOLE_INFO("Disabling user_xattr feature because it is not supported on the server\n");
clear_bit(LL_SBI_USER_XATTR, sbi->ll_flags);
}
if (data->ocd_ibits_known & MDS_INODELOCK_XATTR) {
if (!(data->ocd_connect_flags & OBD_CONNECT_MAX_EASIZE)) {
- LCONSOLE_INFO("%s: disabling xattr cache due to "
- "unknown maximum xattr size.\n", dt);
+ LCONSOLE_INFO("%s: disabling xattr cache due to unknown maximum xattr size.\n",
+ dt);
} else if (!sbi->ll_xattr_cache_set) {
/* If xattr_cache is already set (no matter 0 or 1)
- * during processing llog, it won't be enabled here. */
+ * during processing llog, it won't be enabled here.
+ */
set_bit(LL_SBI_XATTR_CACHE, sbi->ll_flags);
sbi->ll_xattr_cache_enabled = 1;
}
}
/* pass client page size via ocd_grant_blkbits, the server should report
- * back its backend blocksize for grant calculation purpose */
+ * back its backend blocksize for grant calculation purpose
+ */
data->ocd_grant_blkbits = PAGE_SHIFT;
/* indicate OST features supported by this client */
if (ll_sbi_has_encrypt(sbi))
obd_connect_set_enc(data);
- CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d "
- "ocd_grant: %d\n", data->ocd_connect_flags,
+ CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d ocd_grant: %d\n",
+ data->ocd_connect_flags,
data->ocd_version, data->ocd_grant);
sbi->ll_dt_obd->obd_upcall.onu_owner = &sbi->ll_lco;
err = obd_connect(NULL, &sbi->ll_dt_exp, sbi->ll_dt_obd,
&sbi->ll_sb_uuid, data, sbi->ll_cache);
if (err == -EBUSY) {
- LCONSOLE_ERROR_MSG(0x150, "An OST (dt %s) is performing "
- "recovery, of which this client is not a "
- "part. Please wait for recovery to "
- "complete, abort, or time out.\n", dt);
+ LCONSOLE_ERROR_MSG(0x150,
+ "An OST (dt %s) is performing recovery, of which this client is not a part. Please wait for recovery to complete, abort, or time out.\n",
+ dt);
GOTO(out_md, err);
} else if (err) {
CERROR("%s: Cannot connect to %s: rc = %d\n",
err = obd_fid_init(sbi->ll_dt_exp->exp_obd, sbi->ll_dt_exp,
LUSTRE_SEQ_METADATA);
if (err) {
- CERROR("%s: Can't init data layer FID infrastructure, "
- "rc = %d\n", sbi->ll_dt_exp->exp_obd->obd_name, err);
+ CERROR("%s: Can't init data layer FID infrastructure: rc = %d\n",
+ sbi->ll_dt_exp->exp_obd->obd_name, err);
GOTO(out_dt, err);
}
llcrypt_set_ops(sb, &lustre_cryptops);
#endif
- /* make root inode
- * XXX: move this to after cbd setup? */
+ /* make root inode (XXX: move this to after cbd setup?) */
valid = OBD_MD_FLGETATTR | OBD_MD_FLBLOCKS | OBD_MD_FLMODEASIZE |
OBD_MD_ENCCTX;
if (test_bit(LL_SBI_ACL, sbi->ll_flags))
err = md_getattr(sbi->ll_md_exp, op_data, &request);
- /* We need enc ctx info, so reset it in op_data to
- * prevent it from being freed.
- */
+ /* Need enc ctx info, reset in op_data to prevent it being freed. */
encctx = op_data->op_file_encctx;
encctxlen = op_data->op_file_encctx_size;
op_data->op_file_encctx = NULL;
/* We set sb->s_dev equal on all lustre clients in order to support
* NFS export clustering. NFSD requires that the FSID be the same
- * on all clients. */
- /* s_dev is also used in lt_compare() to compare two fs, but that is
- * only a node-local comparison. */
+ * on all clients.
+ *
+ * s_dev is also used in lt_compare() to compare two fs, but that is
+ * only a node-local comparison.
+ */
uuid = obd_get_uuid(sbi->ll_md_exp);
if (uuid != NULL)
sb->s_dev = get_uuid2int(uuid->uuid, strlen(uuid->uuid));
RETURN(rc);
}
-/**
- * Get the value of the default_easize parameter.
+/* Get the value of the default_easize parameter.
*
* \see client_obd::cl_default_mds_easize
*
RETURN(rc);
}
-/**
+/*
* Set the default_easize parameter to the given value.
*
* \see client_obd::cl_default_mds_easize
static void client_common_put_super(struct super_block *sb)
{
struct ll_sb_info *sbi = ll_s2sbi(sb);
+
ENTRY;
cl_sb_fini(sb);
void ll_kill_super(struct super_block *sb)
{
struct ll_sb_info *sbi;
+
ENTRY;
/* not init sb ?*/
sbi = ll_s2sbi(sb);
/* we need restore s_dev from changed for clustred NFS before put_super
* because new kernels have cached s_dev and change sb->s_dev in
- * put_super not affected real removing devices */
+ * put_super not affected real removing devices
+ */
if (sbi) {
sb->s_dev = sbi->ll_sdev_orig;
if (match_wildcard("context", s1) ||
match_wildcard("fscontext", s1) ||
match_wildcard("defcontext", s1) ||
- match_wildcard("rootcontext",s1))
+ match_wildcard("rootcontext", s1))
continue;
LCONSOLE_ERROR_MSG(0x152,
err = llcrypt_set_test_dummy_encryption(sb,
#ifdef HAVE_FSCRYPT_SET_TEST_DUMMY_ENC_CHAR_ARG
- args->from,
+ args->from,
#else
- &args[0],
+ &args[0],
#endif
- &lsi->lsi_dummy_enc_policy);
+ &lsi->lsi_dummy_enc_policy);
if (!err)
break;
default:
break;
}
- }
+ }
kfree(orig_opts);
RETURN(err);
}
/* Do not set lli_fid, it has been initialized already. */
fid_zero(&lli->lli_pfid);
lli->lli_mds_read_och = NULL;
- lli->lli_mds_write_och = NULL;
- lli->lli_mds_exec_och = NULL;
- lli->lli_open_fd_read_count = 0;
- lli->lli_open_fd_write_count = 0;
- lli->lli_open_fd_exec_count = 0;
+ lli->lli_mds_write_och = NULL;
+ lli->lli_mds_exec_och = NULL;
+ lli->lli_open_fd_read_count = 0;
+ lli->lli_open_fd_write_count = 0;
+ lli->lli_open_fd_exec_count = 0;
mutex_init(&lli->lli_och_mutex);
spin_lock_init(&lli->lli_agl_lock);
spin_lock_init(&lli->lli_layout_lock);
/* Profile set with LCFG_MOUNTOPT so we can find our mdc and osc obds */
lprof = class_get_profile(profilenm);
if (lprof == NULL) {
- LCONSOLE_ERROR_MSG(0x156, "The client profile '%s' could not be"
- " read from the MGS. Does that filesystem "
- "exist?\n", profilenm);
+ LCONSOLE_ERROR_MSG(0x156,
+ "The client profile '%s' could not be read from the MGS. Does that filesystem exist?\n",
+ profilenm);
GOTO(out_debugfs, err = -EINVAL);
}
CDEBUG(D_CONFIG, "Found profile %s: mdc=%s osc=%s\n", profilenm,
/* NOTE: we depend on atomic igrab() -bzzz */
lock_res_and_lock(lock);
if (lock->l_resource->lr_lvb_inode) {
- struct ll_inode_info * lli;
+ struct ll_inode_info *lli;
+
lli = ll_i2info(lock->l_resource->lr_lvb_inode);
if (lli->lli_inode_magic == LLI_INODE_MAGIC) {
inode = igrab(lock->l_resource->lr_lvb_inode);
} else {
inode = lock->l_resource->lr_lvb_inode;
LDLM_DEBUG_LIMIT(inode->i_state & I_FREEING ? D_INFO :
- D_WARNING, lock, "lr_lvb_inode %p is "
- "bogus: magic %08x",
+ D_WARNING, lock,
+ "lr_lvb_inode %p is bogus: magic %08x",
lock->l_resource->lr_lvb_inode,
lli->lli_inode_magic);
inode = NULL;
/* XXX sigh, this lsm_root initialization should be in
* LMV layer, but it needs ll_iget right now, so we
- * put this here right now. */
+ * put this here right now.
+ */
for (i = 0; i < lsm->lsm_md_stripe_count; i++) {
fid = &lsm->lsm_md_oinfo[i].lmo_fid;
LASSERT(lsm->lsm_md_oinfo[i].lmo_root == NULL);
/* Unfortunately ll_iget will call ll_update_inode,
* where the initialization of slave inode is slightly
* different, so it reset lsm_md to NULL to avoid
- * initializing lsm for slave inode. */
+ * initializing lsm for slave inode.
+ */
lsm->lsm_md_oinfo[i].lmo_root =
ll_iget_anon_dir(inode->i_sb, fid, md);
if (IS_ERR(lsm->lsm_md_oinfo[i].lmo_root)) {
{
struct ll_inode_info *lli = ll_i2info(inode);
struct lmv_stripe_object *lsm_obj = md->def_lsm_obj;
+
ENTRY;
if (!lsm_obj) {
void ll_clear_inode(struct inode *inode)
{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct ll_sb_info *sbi = ll_i2sbi(inode);
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
- ENTRY;
+ ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
PFID(ll_inode2fid(inode)), inode);
md_null_inode(sbi->ll_md_exp, ll_inode2fid(inode));
- LASSERT(!lli->lli_open_fd_write_count);
- LASSERT(!lli->lli_open_fd_read_count);
- LASSERT(!lli->lli_open_fd_exec_count);
+ LASSERT(!lli->lli_open_fd_write_count);
+ LASSERT(!lli->lli_open_fd_read_count);
+ LASSERT(!lli->lli_open_fd_exec_count);
- if (lli->lli_mds_write_och)
- ll_md_real_close(inode, FMODE_WRITE);
- if (lli->lli_mds_exec_och)
- ll_md_real_close(inode, FMODE_EXEC);
- if (lli->lli_mds_read_och)
- ll_md_real_close(inode, FMODE_READ);
+ if (lli->lli_mds_write_och)
+ ll_md_real_close(inode, FMODE_WRITE);
+ if (lli->lli_mds_exec_och)
+ ll_md_real_close(inode, FMODE_EXEC);
+ if (lli->lli_mds_read_och)
+ ll_md_real_close(inode, FMODE_READ);
- if (S_ISLNK(inode->i_mode) && lli->lli_symlink_name) {
- OBD_FREE(lli->lli_symlink_name,
- strlen(lli->lli_symlink_name) + 1);
- lli->lli_symlink_name = NULL;
- }
+ if (S_ISLNK(inode->i_mode) && lli->lli_symlink_name) {
+ OBD_FREE(lli->lli_symlink_name,
+ strlen(lli->lli_symlink_name) + 1);
+ lli->lli_symlink_name = NULL;
+ }
ll_xattr_cache_destroy(inode);
if (rc == -ENOENT) {
clear_nlink(inode);
/* Unlinked special device node? Or just a race?
- * Pretend we done everything. */
+ * Pretend we done everything.
+ */
if (!S_ISREG(inode->i_mode) &&
!S_ISDIR(inode->i_mode)) {
ia_valid = op_data->op_attr.ia_valid;
rc = md_get_lustre_md(sbi->ll_md_exp, &request->rq_pill, sbi->ll_dt_exp,
sbi->ll_md_exp, &md);
- if (rc) {
- ptlrpc_req_finished(request);
- RETURN(rc);
- }
+ if (rc) {
+ ptlrpc_req_finished(request);
+ RETURN(rc);
+ }
ia_valid = op_data->op_attr.ia_valid;
/* inode size will be in ll_setattr_ost, can't do it now since dirty
- * cache is not cleared yet. */
+ * cache is not cleared yet.
+ */
op_data->op_attr.ia_valid &= ~(TIMES_SET_FLAGS | ATTR_SIZE);
if (S_ISREG(inode->i_mode))
ll_inode_lock(inode);
RETURN(rc);
}
-/**
- * Zero portion of page that is part of @inode.
+/* Zero portion of page that is part of @inode.
* This implies, if necessary:
* - taking cl_lock on range corresponding to concerned page
* - grabbing vm page
struct cl_io *io = NULL;
struct cl_page *clpage = NULL;
struct page *vmpage = NULL;
- unsigned from = index << PAGE_SHIFT;
+ unsigned int from = index << PAGE_SHIFT;
struct cl_lock *lock = NULL;
struct cl_lock_descr *descr = NULL;
struct cl_2queue *queue = NULL;
RETURN(rc);
}
-/**
- * Get reference file from volatile file name.
+/* Get reference file from volatile file name.
* Volatile file name may look like:
* <parent>/LUSTRE_VOLATILE_HDR:<mdt_index>:<random>:fd=<fd>
* where fd is opened descriptor of reference file.
int ll_setattr_raw(struct dentry *dentry, struct iattr *attr,
enum op_xvalid xvalid, bool hsm_import)
{
- struct inode *inode = dentry->d_inode;
- struct ll_inode_info *lli = ll_i2info(inode);
- struct md_op_data *op_data = NULL;
+ struct inode *inode = dentry->d_inode;
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct md_op_data *op_data = NULL;
ktime_t kstart = ktime_get();
int rc = 0;
(long long) attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec,
(long long) attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec);
-
if (attr->ia_valid & ATTR_SIZE) {
/* Check new size against VFS/VM file size limit and rlimit */
rc = inode_newsize_ok(inode, attr->ia_size);
/* The maximum Lustre file size is variable, based on the
* OST maximum object size and number of stripes. This
- * needs another check in addition to the VFS check above. */
+ * needs another check in addition to the VFS check above.
+ */
if (attr->ia_size > ll_file_maxbytes(inode)) {
- CDEBUG(D_INODE,"file "DFID" too large %llu > %llu\n",
+ CDEBUG(D_INODE, "file "DFID" too large %llu > %llu\n",
PFID(&lli->lli_fid), attr->ia_size,
ll_file_maxbytes(inode));
GOTO(clear, rc = -EFBIG);
if (!(attr->ia_valid & ATTR_ATIME_SET) &&
(attr->ia_valid & ATTR_ATIME)) {
attr->ia_atime = current_time(inode);
- attr->ia_valid |= ATTR_ATIME_SET;
- }
+ attr->ia_valid |= ATTR_ATIME_SET;
+ }
if (!(attr->ia_valid & ATTR_MTIME_SET) &&
(attr->ia_valid & ATTR_MTIME)) {
attr->ia_mtime = current_time(inode);
attr->ia_valid |= ATTR_MTIME_SET;
}
- if (attr->ia_valid & (ATTR_MTIME | ATTR_CTIME))
+ if (attr->ia_valid & (ATTR_MTIME | ATTR_CTIME))
CDEBUG(D_INODE, "setting mtime %lld, ctime %lld, now = %lld\n",
(s64)attr->ia_mtime.tv_sec, (s64)attr->ia_ctime.tv_sec,
ktime_get_real_seconds());
ll_inode_unlock(inode);
/* We always do an MDS RPC, even if we're only changing the size;
- * only the MDS knows whether truncate() should fail with -ETXTBUSY */
+ * only the MDS knows whether truncate() should fail with -ETXTBUSY
+ */
OBD_ALLOC_PTR(op_data);
if (op_data == NULL)
GOTO(out, rc = -ENOMEM);
if (!hsm_import && attr->ia_valid & ATTR_SIZE) {
- /* If we are changing file size, file content is
- * modified, flag it.
- */
+ /* If changing file size, file content is modified, flag it */
xvalid |= OP_XVALID_OWNEROVERRIDE;
op_data->op_bias |= MDS_DATA_MODIFIED;
clear_bit(LLIF_DATA_MODIFIED, &lli->lli_flags);
rc = pcc_inode_setattr(inode, attr, &cached);
if (cached) {
if (rc) {
- CERROR("%s: PCC inode "DFID" setattr failed: "
- "rc = %d\n",
+ CERROR("%s: PCC inode "DFID" setattr failed: rc = %d\n",
ll_i2sbi(inode)->ll_fsname,
PFID(&lli->lli_fid), rc);
GOTO(out, rc);
* Please notice that if the file is not released, the previous
* MDS_DATA_MODIFIED has taken effect and usually
* LLIF_DATA_MODIFIED is not set(see vvp_io_setattr_fini()).
- * This way we can save an RPC for common open + trunc
- * operation. */
+ * This way we can save an RPC for common open + trunc operation.
+ */
if (test_and_clear_bit(LLIF_DATA_MODIFIED, &lli->lli_flags)) {
struct hsm_state_set hss = {
.hss_valid = HSS_SETMASK,
rc2 = ll_hsm_state_set(inode, &hss);
/* truncate and write can happen at the same time, so that
* the file can be set modified even though the file is not
- * restored from released state, and ll_hsm_state_set() is
- * not applicable for the file, and rc2 < 0 is normal in this
- * case. */
+ * restored from released state, and ll_hsm_state_set() is not
+ * applicable for the file, and rc2 < 0 is normal in this case.
+ */
if (rc2 < 0)
CDEBUG(D_INFO, DFID "HSM set dirty failed: rc2 = %d\n",
PFID(ll_inode2fid(inode)), rc2);
* flag. ll_update_inode (called from ll_md_setattr), clears
* inode flags, so there is a gap where S_NOSEC is not set.
* This can cause a writer to take the i_mutex unnecessarily,
- * but this is safe to do and should be rare. */
+ * but this is safe to do and should be rare.
+ */
inode_has_no_xattr(inode);
}
if (((attr->ia_valid & (ATTR_MODE|ATTR_FORCE|ATTR_SIZE)) ==
(ATTR_SIZE|ATTR_MODE)) &&
(((mode & S_ISUID) && !(attr->ia_mode & S_ISUID)) ||
- (((mode & (S_ISGID|S_IXGRP)) == (S_ISGID|S_IXGRP)) &&
+ (((mode & (S_ISGID | 0010)) == (S_ISGID | 0010)) &&
!(attr->ia_mode & S_ISGID))))
attr->ia_valid |= ATTR_FORCE;
attr->ia_valid |= ATTR_KILL_SUID;
if ((attr->ia_valid & ATTR_MODE) &&
- ((mode & (S_ISGID|S_IXGRP)) == (S_ISGID|S_IXGRP)) &&
+ ((mode & (S_ISGID | 0010)) == (S_ISGID | 0010)) &&
!(attr->ia_mode & S_ISGID) &&
!(attr->ia_valid & ATTR_KILL_SGID))
attr->ia_valid |= ATTR_KILL_SGID;
if (rc)
return rc;
- statfs_unpack(sfs, &osfs);
+ statfs_unpack(sfs, &osfs);
- /* We need to downshift for all 32-bit kernels, because we can't
- * tell if the kernel is being called via sys_statfs64() or not.
- * Stop before overflowing f_bsize - in which case it is better
- * to just risk EOVERFLOW if caller is using old sys_statfs(). */
- if (sizeof(long) < 8) {
- while (osfs.os_blocks > ~0UL && sfs->f_bsize < 0x40000000) {
- sfs->f_bsize <<= 1;
+ /* We need to downshift for all 32-bit kernels, because we can't
+ * tell if the kernel is being called via sys_statfs64() or not.
+ * Stop before overflowing f_bsize - in which case it is better
+ * to just risk EOVERFLOW if caller is using old sys_statfs().
+ */
+ if (sizeof(long) < 8) {
+ while (osfs.os_blocks > ~0UL && sfs->f_bsize < 0x40000000) {
+ sfs->f_bsize <<= 1;
- osfs.os_blocks >>= 1;
- osfs.os_bfree >>= 1;
- osfs.os_bavail >>= 1;
- }
- }
+ osfs.os_blocks >>= 1;
+ osfs.os_bfree >>= 1;
+ osfs.os_bavail >>= 1;
+ }
+ }
sfs->f_blocks = osfs.os_blocks;
sfs->f_bfree = osfs.os_bfree;
LSM_SEM_CHILD,
};
-/**
- * Update directory depth and default LMV
+/* Update directory depth and default LMV
*
* Update directory depth to ROOT and inherit default LMV from parent if
* parent's default LMV is inheritable. The default LMV set with command
XA_STATE(xas, &mapping->i_pages, 0);
struct page *page;
#endif
- CWARN("%s: inode="DFID"(%p) nrpages=%lu "
- "state %#lx, lli_flags %#lx, "
- "see https://jira.whamcloud.com/browse/LU-118\n",
- ll_i2sbi(inode)->ll_fsname,
- PFID(ll_inode2fid(inode)), inode, nrpages,
- inode->i_state, ll_i2info(inode)->lli_flags);
+ CWARN("%s: inode="DFID"(%p) nrpages=%lu state %#lx, lli_flags %#lx, see https://jira.whamcloud.com/browse/LU-118\n",
+ ll_i2sbi(inode)->ll_fsname, PFID(ll_inode2fid(inode)),
+ inode, nrpages, inode->i_state,
+ ll_i2info(inode)->lli_flags);
#ifdef HAVE_XARRAY_SUPPORT
rcu_read_lock();
xas_for_each(&xas, page, ULONG_MAX) {
int ll_read_inode2(struct inode *inode, void *opaque)
{
- struct lustre_md *md = opaque;
- struct ll_inode_info *lli = ll_i2info(inode);
- int rc;
- ENTRY;
+ struct lustre_md *md = opaque;
+ struct ll_inode_info *lli = ll_i2info(inode);
+ int rc;
+
+ ENTRY;
- CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
- PFID(&lli->lli_fid), inode);
+ CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
+ PFID(&lli->lli_fid), inode);
- /* Core attributes from the MDS first. This is a new inode, and
- * the VFS doesn't zero times in the core inode so we have to do
- * it ourselves. They will be overwritten by either MDS or OST
+ /* Core attributes from the MDS first. This is a new inode, and
+ * the VFS doesn't zero times in the core inode so we have to do
+ * it ourselves. They will be overwritten by either MDS or OST
* attributes - we just need to make sure they aren't newer.
*/
inode_set_mtime(inode, 0, 0);
if (rc != 0)
RETURN(rc);
- /* OIDEBUG(inode); */
+ /* OIDEBUG(inode); */
#ifdef HAVE_BACKING_DEV_INFO
/* initializing backing dev info. */
inode->i_mapping->backing_dev_info = &s2lsi(inode->i_sb)->lsi_bdi;
#endif
- if (S_ISREG(inode->i_mode)) {
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- inode->i_op = &ll_file_inode_operations;
- inode->i_fop = sbi->ll_fop;
- inode->i_mapping->a_ops = (struct address_space_operations *)&ll_aops;
- EXIT;
- } else if (S_ISDIR(inode->i_mode)) {
- inode->i_op = &ll_dir_inode_operations;
- inode->i_fop = &ll_dir_operations;
- EXIT;
- } else if (S_ISLNK(inode->i_mode)) {
- inode->i_op = &ll_fast_symlink_inode_operations;
- EXIT;
- } else {
- inode->i_op = &ll_special_inode_operations;
+ if (S_ISREG(inode->i_mode)) {
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+
+ inode->i_op = &ll_file_inode_operations;
+ inode->i_fop = sbi->ll_fop;
+ inode->i_mapping->a_ops = &ll_aops;
+ EXIT;
+ } else if (S_ISDIR(inode->i_mode)) {
+ inode->i_op = &ll_dir_inode_operations;
+ inode->i_fop = &ll_dir_operations;
+ EXIT;
+ } else if (S_ISLNK(inode->i_mode)) {
+ inode->i_op = &ll_fast_symlink_inode_operations;
+ EXIT;
+ } else {
+ inode->i_op = &ll_special_inode_operations;
init_special_inode(inode, inode->i_mode,
inode->i_rdev);
- EXIT;
- }
+ EXIT;
+ }
return 0;
}
void ll_delete_inode(struct inode *inode)
{
struct ll_inode_info *lli = ll_i2info(inode);
+
ENTRY;
if (S_ISREG(inode->i_mode) && lli->lli_clob != NULL) {
unsigned int cmd, void __user *uarg)
{
int rc;
+
ENTRY;
switch (cmd) {
RETURN(ll_fid2path(inode, uarg));
#ifdef OBD_IOC_GETNAME_OLD
case_OBD_IOC_DEPRECATED_FT(OBD_IOC_GETNAME_OLD,
- ll_i2sbi(inode)->ll_md_exp->exp_obd->obd_name,
- 2, 16);
+ ll_i2sbi(inode)->ll_md_exp->exp_obd->obd_name,
+ 2, 16);
#endif
case OBD_IOC_GETDTNAME:
case OBD_IOC_GETMDNAME:
struct obd_device *obd;
struct obd_ioctl_data *ioc_data;
int cnt;
+
ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op: superblock %p count %d active %d\n", sb,
OBD_ALLOC_PTR(ioc_data);
if (ioc_data) {
obd_iocontrol(OBD_IOC_SET_ACTIVE, sbi->ll_md_exp,
- sizeof *ioc_data, ioc_data, NULL);
+ sizeof(*ioc_data), ioc_data, NULL);
obd_iocontrol(OBD_IOC_SET_ACTIVE, sbi->ll_dt_exp,
- sizeof *ioc_data, ioc_data, NULL);
+ sizeof(*ioc_data), ioc_data, NULL);
OBD_FREE_PTR(ioc_data);
}
return 0;
}
-/**
- * Cleanup the open handle that is cached on MDT-side.
+/* Cleanup the open handle that is cached on MDT-side.
*
* For open case, the client side open handling thread may hit error
* after the MDT grant the open. Under such case, the client should
struct md_op_data *op_data;
struct ptlrpc_request *close_req = NULL;
struct obd_export *exp = ll_s2sbi(sb)->ll_md_exp;
+
ENTRY;
body = req_capsule_server_get(pill, &RMF_MDT_BODY);
*inode = ll_iget(sb, cl_fid_build_ino(fid1, api32), &md);
if (IS_ERR(*inode)) {
- lmd_clear_acl(&md);
- rc = IS_ERR(*inode) ? PTR_ERR(*inode) : -ENOMEM;
- *inode = NULL;
- CERROR("new_inode -fatal: rc %d\n", rc);
- GOTO(out, rc);
- }
- }
+ lmd_clear_acl(&md);
+ rc = IS_ERR(*inode) ? PTR_ERR(*inode) : -ENOMEM;
+ *inode = NULL;
+ CERROR("%s: new_inode - fatal error: rc = %d\n",
+ sbi->ll_fsname, rc);
+ GOTO(out, rc);
+ }
+ }
/* Handling piggyback layout lock.
* Layout lock can be piggybacked by getattr and open request.
* 1. proc1: mdt returns a lsm but not granting layout
* 2. layout was changed by another client
* 3. proc2: refresh layout and layout lock granted
- * 4. proc1: to apply a stale layout */
+ * 4. proc1: to apply a stale layout
+ */
if (it != NULL && it->it_lock_mode != 0) {
struct lustre_handle lockh;
struct ldlm_lock *lock;
RETURN(0);
}
-/**
+/*
* Get obd name by cmd, and copy out to user space
*/
int ll_get_obd_name(struct inode *inode, unsigned int cmd, void __user *uarg)
{
struct ll_sb_info *sbi = ll_i2sbi(inode);
struct obd_device *obd;
+
ENTRY;
if (cmd == OBD_IOC_GETNAME_OLD || cmd == OBD_IOC_GETDTNAME)
free_page((unsigned long)db);
}
-static char* ll_d_path(struct dentry *dentry, char *buf, int bufsize)
+static char *ll_d_path(struct dentry *dentry, char *buf, int bufsize)
{
char *path = NULL;
/* The below message is checked in recovery-small.sh test_24b */
CDEBUG(D_WARNING,
- "%s: dirty page discard: %s/fid: "DFID"/%s may get corrupted "
- "(rc %d)\n", ll_i2sbi(inode)->ll_fsname,
+ "%s: dirty page discard: %s/fid: "DFID"/%s may get corrupted (rc %d)\n",
+ ll_i2sbi(inode)->ll_fsname,
s2lsi(inode->i_sb)->lsi_lmd->lmd_dev,
PFID(ll_inode2fid(inode)),
(path && !IS_ERR(path)) ? path : "", ioret);
{
struct lov_user_md lum;
ssize_t lum_size;
+
ENTRY;
if (copy_from_user(&lum, md, sizeof(lum)))
spin_unlock(&squash->rsi_lock);
}
-/**
+/*
* Parse linkea content to extract information about a given hardlink
*
* \param[in] ldata - Initialized linkea data
{
unsigned int idx;
int rc;
+
ENTRY;
rc = linkea_init_with_rec(ldata);
RETURN(0);
}
-/**
+/*
* Get parent FID and name of an identified link. Operation is performed for
* a given link number, letting the caller iterate over linkno to list one or
* all links of an entry.