* A mutex serializing calls to slp_inode_fini() under extreme memory
* pressure, when environments cannot be allocated.
*/
-static DEFINE_MUTEX(ccc_inode_fini_guard);
+static CFS_DEFINE_MUTEX(ccc_inode_fini_guard);
static int dummy_refcheck;
int ccc_global_init(struct lu_device_type *device_type)
*/
struct lu_object *ccc_object_alloc(const struct lu_env *env,
- const struct lu_object_header *_,
+ const struct lu_object_header *unused,
struct lu_device *dev,
const struct cl_object_operations *clops,
const struct lu_object_operations *luops)
int ccc_lock_init(const struct lu_env *env,
struct cl_object *obj, struct cl_lock *lock,
- const struct cl_io *_,
+ const struct cl_io *unused,
const struct cl_lock_operations *lkops)
{
struct ccc_lock *clk;
{
}
-void ccc_transient_page_own(const struct lu_env *env,
+int ccc_transient_page_own(const struct lu_env *env,
const struct cl_page_slice *slice,
- struct cl_io *_)
+ struct cl_io *unused,
+ int nonblock)
{
ccc_transient_page_verify(slice->cpl_page);
+ return 0;
}
void ccc_transient_page_assume(const struct lu_env *env,
const struct cl_page_slice *slice,
- struct cl_io *_)
+ struct cl_io *unused)
{
ccc_transient_page_verify(slice->cpl_page);
}
void ccc_transient_page_unassume(const struct lu_env *env,
const struct cl_page_slice *slice,
- struct cl_io *_)
+ struct cl_io *unused)
{
ccc_transient_page_verify(slice->cpl_page);
}
void ccc_transient_page_disown(const struct lu_env *env,
const struct cl_page_slice *slice,
- struct cl_io *_)
+ struct cl_io *unused)
{
ccc_transient_page_verify(slice->cpl_page);
}
void ccc_transient_page_discard(const struct lu_env *env,
const struct cl_page_slice *slice,
- struct cl_io *_)
+ struct cl_io *unused)
{
struct cl_page *page = slice->cpl_page;
int ccc_transient_page_prep(const struct lu_env *env,
const struct cl_page_slice *slice,
- struct cl_io *_)
+ struct cl_io *unused)
{
ENTRY;
/* transient page should always be sent. */
int ccc_lock_enqueue(const struct lu_env *env,
const struct cl_lock_slice *slice,
- struct cl_io *_, __u32 enqflags)
+ struct cl_io *unused, __u32 enqflags)
{
CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj));
return 0;
* cached lock "fits" into io.
*
* \param slice lock to be checked
- *
* \param io IO that wants a lock.
*
* \see lov_lock_fits_into().
*/
if (cio->cui_glimpse)
result = descr->cld_mode != CLM_WRITE;
+
/*
* Also, don't match incomplete write locks for read, otherwise read
* would enqueue missing sub-locks in the write mode.
- *
- * XXX this is a candidate for generic locking policy, to be moved
- * into cl_lock_lookup().
*/
else if (need->cld_mode != descr->cld_mode)
result = lock->cll_state >= CLS_ENQUEUED;
if (rc == 0) {
if (lock->cll_descr.cld_start == 0 &&
lock->cll_descr.cld_end == CL_PAGE_EOF) {
- cl_isize_write(inode, attr->cat_kms);
+ cl_isize_write_nolock(inode, attr->cat_kms);
CDEBUG(D_INODE, DFID" updating i_size %llu\n",
PFID(lu_object_fid(&obj->co_lu)),
(__u64)cl_isize_read(inode));
cl_inode_mtime(inode) = attr->cat_mtime;
cl_inode_atime(inode) = attr->cat_atime;
cl_inode_ctime(inode) = attr->cat_ctime;
- } else
- CL_LOCK_DEBUG(D_ERROR, env, lock, "attr_get: %i\n", rc);
+ } else {
+ CL_LOCK_DEBUG(D_INFO, env, lock, "attr_get: %i\n", rc);
+ }
cl_object_attr_unlock(obj);
cl_isize_unlock(inode, 0);
}
CDEBUG(D_VFSTRACE, "lock: %i [%lu, %lu]\n", mode, start, end);
memset(&cio->cui_link, 0, sizeof cio->cui_link);
- descr->cld_mode = mode;
+
+ if (cio->cui_fd && (cio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
+ descr->cld_mode = CLM_GROUP;
+ descr->cld_gid = cio->cui_fd->fd_grouplock.cg_gid;
+ } else {
+ descr->cld_mode = mode;
+ }
descr->cld_obj = obj;
descr->cld_start = start;
descr->cld_end = end;
+ descr->cld_enq_flags = enqflags;
- cio->cui_link.cill_enq_flags = enqflags;
cl_io_lock_add(env, io, &cio->cui_link);
RETURN(0);
}
size_t size = io->u.ci_rw.crw_count;
cio->cui_iov_olen = 0;
- if (cl_io_is_sendfile(io) || size == cio->cui_tot_count)
+ if (!cl_is_normalio(env, io) || size == cio->cui_tot_count)
return;
if (cio->cui_tot_nrsegs == 0)
CLOBINVRNT(env, obj, ccc_object_invariant(obj));
- if (!cl_io_is_sendfile(io) && io->ci_continue) {
+ if (cl_is_normalio(env, io) && io->ci_continue) {
/* update the iov */
LASSERT(cio->cui_tot_nrsegs >= cio->cui_nrsegs);
LASSERT(cio->cui_tot_count >= nob);
*/
if (cl_isize_read(inode) < kms) {
if (vfslock)
- cl_isize_write(inode, kms);
- else
cl_isize_write_nolock(inode, kms);
+ else
+ cl_isize_write(inode, kms);
}
}
}
*
* - o_generation
*
- * - and IO epoch (stored in o_easize),
+ * - o_ioepoch,
*
* and capability.
*/
if (slice->crs_req->crq_type == CRT_WRITE) {
if (flags & OBD_MD_FLEPOCH) {
oa->o_valid |= OBD_MD_FLEPOCH;
- oa->o_easize = cl_i2info(inode)->lli_ioepoch;
+ oa->o_ioepoch = cl_i2info(inode)->lli_ioepoch;
valid_flags |= OBD_MD_FLMTIME|OBD_MD_FLCTIME|
OBD_MD_FLUID|OBD_MD_FLGID|
OBD_MD_FLFID|OBD_MD_FLGENER;
oinfo.oi_oa = oa;
oinfo.oi_md = lsm;
+ oinfo.oi_capa = capa;
/* XXX: this looks unnecessary now. */
rc = obd_setattr_rqset(cl_i2sbi(inode)->ll_dt_exp, &oinfo,
env = cl_env_get(&refcheck);
emergency = IS_ERR(env);
if (emergency) {
- mutex_lock(&ccc_inode_fini_guard);
+ cfs_mutex_lock(&ccc_inode_fini_guard);
LASSERT(ccc_inode_fini_env != NULL);
cl_env_implant(ccc_inode_fini_env, &refcheck);
env = ccc_inode_fini_env;
lli->lli_clob = NULL;
if (emergency) {
cl_env_unplant(ccc_inode_fini_env, &refcheck);
- mutex_unlock(&ccc_inode_fini_guard);
+ cfs_mutex_unlock(&ccc_inode_fini_guard);
} else
cl_env_put(env, &refcheck);
cl_env_reexit(cookie);
}
return type;
}
+
+/**
+ * build inode number from passed @fid */
+ino_t cl_fid_build_ino(const struct lu_fid *fid)
+{
+ ino_t ino;
+ ENTRY;
+
+ if (fid_is_igif(fid)) {
+ ino = lu_igif_ino(fid);
+ RETURN(ino);
+ }
+
+ /* Very stupid and having many downsides inode allocation algorithm
+ * based on fid. */
+ ino = fid_flatten(fid) & 0xFFFFFFFF;
+
+ if (unlikely(ino == 0))
+ /* the first result ino is 0xFFC001, so this is rarely used */
+ ino = 0xffbcde;
+ ino = ino | 0x80000000;
+ RETURN(ino);
+}
+
+/**
+ * build inode generation from passed @fid. If our FID overflows the 32-bit
+ * inode number then return a non-zero generation to distinguish them. */
+__u32 cl_fid_build_gen(const struct lu_fid *fid)
+{
+ __u32 gen;
+ ENTRY;
+
+ if (fid_is_igif(fid)) {
+ gen = lu_igif_gen(fid);
+ RETURN(gen);
+ }
+
+ gen = (fid_flatten(fid) >> 32);
+ RETURN(gen);
+}