*/
struct lu_object *ccc_object_alloc(const struct lu_env *env,
- const struct lu_object_header *_,
+ const struct lu_object_header *unused,
struct lu_device *dev,
const struct cl_object_operations *clops,
const struct lu_object_operations *luops)
int ccc_lock_init(const struct lu_env *env,
struct cl_object *obj, struct cl_lock *lock,
- const struct cl_io *_,
+ const struct cl_io *unused,
const struct cl_lock_operations *lkops)
{
struct ccc_lock *clk;
{
}
-void ccc_transient_page_own(const struct lu_env *env,
+int ccc_transient_page_own(const struct lu_env *env,
const struct cl_page_slice *slice,
- struct cl_io *_)
+ struct cl_io *unused,
+ int nonblock)
{
ccc_transient_page_verify(slice->cpl_page);
+ return 0;
}
void ccc_transient_page_assume(const struct lu_env *env,
const struct cl_page_slice *slice,
- struct cl_io *_)
+ struct cl_io *unused)
{
ccc_transient_page_verify(slice->cpl_page);
}
void ccc_transient_page_unassume(const struct lu_env *env,
const struct cl_page_slice *slice,
- struct cl_io *_)
+ struct cl_io *unused)
{
ccc_transient_page_verify(slice->cpl_page);
}
void ccc_transient_page_disown(const struct lu_env *env,
const struct cl_page_slice *slice,
- struct cl_io *_)
+ struct cl_io *unused)
{
ccc_transient_page_verify(slice->cpl_page);
}
void ccc_transient_page_discard(const struct lu_env *env,
const struct cl_page_slice *slice,
- struct cl_io *_)
+ struct cl_io *unused)
{
struct cl_page *page = slice->cpl_page;
int ccc_transient_page_prep(const struct lu_env *env,
const struct cl_page_slice *slice,
- struct cl_io *_)
+ struct cl_io *unused)
{
ENTRY;
/* transient page should always be sent. */
int ccc_lock_enqueue(const struct lu_env *env,
const struct cl_lock_slice *slice,
- struct cl_io *_, __u32 enqflags)
+ struct cl_io *unused, __u32 enqflags)
{
CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj));
return 0;
* cached lock "fits" into io.
*
* \param slice lock to be checked
- *
* \param io IO that wants a lock.
*
* \see lov_lock_fits_into().
* doesn't enqueue CLM_WRITE sub-locks.
*/
if (cio->cui_glimpse)
- result = descr->cld_mode != CLM_WRITE;
+ result = descr->cld_mode == CLM_PHANTOM;
+
/*
* Also, don't match incomplete write locks for read, otherwise read
* would enqueue missing sub-locks in the write mode.
oinfo.oi_oa = oa;
oinfo.oi_md = lsm;
+ oinfo.oi_capa = capa;
/* XXX: this looks unnecessary now. */
rc = obd_setattr_rqset(cl_i2sbi(inode)->ll_dt_exp, &oinfo,
}
return type;
}
+
+/**
+ * build inode number from passed @fid */
+ino_t cl_fid_build_ino(struct lu_fid *fid)
+{
+ ino_t ino;
+ ENTRY;
+
+ if (fid_is_igif(fid)) {
+ ino = lu_igif_ino(fid);
+ RETURN(ino);
+ }
+
+ /* Very stupid and having many downsides inode allocation algorithm
+ * based on fid. */
+ ino = fid_flatten(fid) & 0xFFFFFFFF;
+
+ if (unlikely(ino == 0))
+ /* the first result ino is 0xFFC001, so this is rarely used */
+ ino = 0xffbcde;
+ ino = ino | 0x80000000;
+ RETURN(ino);
+}
+
+/**
+ * build inode generation from passed @fid. If our FID overflows the 32-bit
+ * inode number then return a non-zero generation to distinguish them. */
+__u32 cl_fid_build_gen(struct lu_fid *fid)
+{
+ __u32 gen;
+ ENTRY;
+
+ if (fid_is_igif(fid)) {
+ gen = lu_igif_gen(fid);
+ RETURN(gen);
+ }
+
+ gen = (fid_flatten(fid) >> 32);
+ RETURN(gen);
+}