{
struct lu_env *env;
struct ll_inode_info *lli;
- struct cl_object *clob;
- struct lu_site *site;
- struct lu_fid *fid;
+ struct cl_object *clob;
+ struct lu_site *site;
+ struct lu_fid *fid;
struct cl_object_conf conf = {
.coc_inode = inode,
.u = {
.coc_layout = md->layout,
}
};
- int result = 0;
+ int result = 0;
__u16 refcheck;
LASSERT(md->body->mbo_valid & OBD_MD_FLID);
env = cl_inode_fini_env;
}
- /*
- * cl_object cache is a slave to inode cache (which, in turn
- * is a slave to dentry cache), don't keep cl_object in memory
- * when its master is evicted.
- */
- cl_object_kill(env, clob);
- lu_object_ref_del(&clob->co_lu, "inode", inode);
- cl_object_put_last(env, clob);
- lli->lli_clob = NULL;
+ /*
+ * cl_object cache is a slave to inode cache (which, in turn
+ * is a slave to dentry cache), don't keep cl_object in memory
+ * when its master is evicted.
+ */
+ cl_object_kill(env, clob);
+ lu_object_ref_del(&clob->co_lu, "inode", inode);
+ cl_object_put_last(env, clob);
+ lli->lli_clob = NULL;
if (emergency)
mutex_unlock(&cl_inode_fini_guard);
else
#include "llite_internal.h"
#include "vvp_internal.h"
-/*****************************************************************************
- *
- * Object operations.
- *
- */
-
+/* Object operations.*/
int vvp_object_invariant(const struct cl_object *obj)
{
struct inode *inode = vvp_object_inode(obj);
}
static int vvp_attr_get(const struct lu_env *env, struct cl_object *obj,
- struct cl_attr *attr)
+ struct cl_attr *attr)
{
struct inode *inode = vvp_object_inode(obj);
}
static int vvp_attr_update(const struct lu_env *env, struct cl_object *obj,
- const struct cl_attr *attr, unsigned valid)
+ const struct cl_attr *attr, unsigned int valid)
{
struct inode *inode = vvp_object_inode(obj);
* page may be stale due to layout change, and the process
* will never be notified.
* This operation is expensive but mmap processes have to pay
- * a price themselves. */
+ * a price themselves.
+ */
unmap_mapping_range(conf->coc_inode->i_mapping,
0, OBD_OBJECT_EOF, 0);
pcc_layout_invalidate(conf->coc_inode);
{
struct inode *inode = vvp_object_inode(obj);
int rc;
+
ENTRY;
rc = cl_sync_file_range(inode, 0, OBD_OBJECT_EOF, CL_FSYNC_LOCAL, 1);
struct vvp_object *cl_inode2vvp(struct inode *inode)
{
struct ll_inode_info *lli = ll_i2info(inode);
- struct cl_object *obj = lli->lli_clob;
- struct lu_object *lu;
+ struct cl_object *obj = lli->lli_clob;
+ struct lu_object *lu;
- LASSERT(obj != NULL);
- lu = lu_object_locate(obj->co_lu.lo_header, &vvp_device_type);
- LASSERT(lu != NULL);
+ LASSERT(obj != NULL);
+ lu = lu_object_locate(obj->co_lu.lo_header, &vvp_device_type);
+ LASSERT(lu != NULL);
return lu2vvp(lu);
}
/* When setxattr() is called with a size of 0 the value is
* unconditionally replaced by "". When removexattr() is
- * called we get a NULL value and XATTR_REPLACE for flags. */
+ * called we get a NULL value and XATTR_REPLACE for flags.
+ */
if (!value && flags == XATTR_REPLACE)
valid = OBD_MD_FLXATTRRM;
else
clear_bit(LL_SBI_USER_XATTR, sbi->ll_flags);
}
out:
- ptlrpc_req_finished(req);
+ ptlrpc_req_finished(req);
RETURN(rc);
}
* using a hash or a tree structure instead of list for faster lookups.
*/
struct ll_xattr_entry {
- struct list_head xe_list; /* protected with
- * lli_xattrs_list_rwsem */
+ struct list_head xe_list; /* protect by lli_xattrs_list_rwsem */
char *xe_name; /* xattr name, \0-terminated */
char *xe_value; /* xattr value */
- unsigned xe_namelen; /* strlen(xe_name) + 1 */
- unsigned xe_vallen; /* xattr value length */
+ unsigned int xe_namelen; /* strlen(xe_name) + 1 */
+ unsigned int xe_vallen; /* xattr value length */
};
static struct kmem_cache *xattr_kmem;
static int ll_xattr_cache_add(struct list_head *cache,
const char *xattr_name,
const char *xattr_val,
- unsigned xattr_val_len)
+ unsigned int xattr_val_len)
{
struct ll_xattr_entry *xattr;
RETURN(0);
while (ll_xattr_cache_del(&lli->lli_xattrs, NULL) == 0)
- /* empty loop */ ;
+ /* empty loop */;
clear_bit(LLIF_XATTR_CACHE_FILLED, &lli->lli_flags);
clear_bit(LLIF_XATTR_CACHE, &lli->lli_flags);
mutex_lock(&lli->lli_xattrs_enq_lock);
/* inode may have been shrunk and recreated, so data is gone, match lock
- * only when data exists. */
+ * only when data exists.
+ */
if (ll_xattr_cache_filled(lli)) {
/* Try matching first. */
mode = ll_take_md_lock(inode, MDS_INODELOCK_XATTR, &lockh, 0,
*req = oit->it_request;
if (rc < 0) {
- CDEBUG(D_CACHE, "md_intent_lock failed with %d for fid "DFID"\n",
- rc, PFID(ll_inode2fid(inode)));
+ CDEBUG(D_CACHE,
+ "md_intent_lock failed with %d for fid "DFID"\n", rc,
+ PFID(ll_inode2fid(inode)));
mutex_unlock(&lli->lli_xattrs_enq_lock);
RETURN(rc);
}