-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
# include <linux/fs.h>
# include <linux/sched.h>
# include <linux/mm.h>
-# include <linux/smp_lock.h>
# include <linux/quotaops.h>
# include <linux/highmem.h>
# include <linux/pagemap.h>
* A mutex serializing calls to slp_inode_fini() under extreme memory
* pressure, when environments cannot be allocated.
*/
-static CFS_DEFINE_MUTEX(ccc_inode_fini_guard);
+static DEFINE_MUTEX(ccc_inode_fini_guard);
static int dummy_refcheck;
int ccc_global_init(struct lu_device_type *device_type)
return 0;
}
+static void ccc_object_size_lock(struct cl_object *obj)
+{
+ struct inode *inode = ccc_object_inode(obj);
+
+ cl_isize_lock(inode);
+ cl_object_attr_lock(obj);
+}
+
+static void ccc_object_size_unlock(struct cl_object *obj)
+{
+ struct inode *inode = ccc_object_inode(obj);
+
+ cl_object_attr_unlock(obj);
+ cl_isize_unlock(inode);
+}
+
/*****************************************************************************
*
* Page operations.
* of finding lock in the cache.
*/
if (state == CLS_HELD && lock->cll_state < CLS_HELD) {
- int rc;
-
- obj = slice->cls_obj;
- inode = ccc_object_inode(obj);
- attr = ccc_env_thread_attr(env);
-
- /* vmtruncate()->ll_truncate() first sets the i_size and then
- * the kms under both a DLM lock and the
- * ll_inode_size_lock(). If we don't get the
- * ll_inode_size_lock() here we can match the DLM lock and
- * reset i_size from the kms before the truncating path has
- * updated the kms. generic_file_write can then trust the
- * stale i_size when doing appending writes and effectively
- * cancel the result of the truncate. Getting the
- * ll_inode_size_lock() after the enqueue maintains the DLM
- * -> ll_inode_size_lock() acquiring order. */
- cl_isize_lock(inode, 0);
- cl_object_attr_lock(obj);
+ int rc;
+
+ obj = slice->cls_obj;
+ inode = ccc_object_inode(obj);
+ attr = ccc_env_thread_attr(env);
+
+ /* vmtruncate() sets the i_size
+ * under both a DLM lock and the
+ * ll_inode_size_lock(). If we don't get the
+ * ll_inode_size_lock() here we can match the DLM lock and
+ * reset i_size. generic_file_write can then trust the
+ * stale i_size when doing appending writes and effectively
+ * cancel the result of the truncate. Getting the
+ * ll_inode_size_lock() after the enqueue maintains the DLM
+ * -> ll_inode_size_lock() acquiring order. */
+ ccc_object_size_lock(obj);
rc = cl_object_attr_get(env, obj, attr);
if (rc == 0) {
if (lock->cll_descr.cld_start == 0 &&
} else {
CL_LOCK_DEBUG(D_INFO, env, lock, "attr_get: %d\n", rc);
}
- cl_object_attr_unlock(obj);
- cl_isize_unlock(inode, 0);
- }
- EXIT;
+ ccc_object_size_unlock(obj);
+ }
+ EXIT;
}
/*****************************************************************************
}
}
-static void ccc_object_size_lock(struct cl_object *obj)
-{
- struct inode *inode = ccc_object_inode(obj);
-
- cl_isize_lock(inode, 0);
- cl_object_attr_lock(obj);
-}
-
-static void ccc_object_size_unlock(struct cl_object *obj)
-{
- struct inode *inode = ccc_object_inode(obj);
-
- cl_object_attr_unlock(obj);
- cl_isize_unlock(inode, 0);
-}
-
/**
* Helper function that if necessary adjusts file size (inode->i_size), when
* position at the offset \a pos is accessed. File size can be arbitrary stale
struct obdo *oa;
obd_flag valid_flags;
- oa = attr->cra_oa;
- inode = ccc_object_inode(obj);
- valid_flags = OBD_MD_FLTYPE|OBD_MD_FLATIME;
-
- if (flags != (obd_valid)~0ULL)
- valid_flags |= OBD_MD_FLMTIME|OBD_MD_FLCTIME|OBD_MD_FLATIME;
- else {
- LASSERT(attr->cra_capa == NULL);
- attr->cra_capa = cl_capa_lookup(inode,
- slice->crs_req->crq_type);
- }
-
- if (slice->crs_req->crq_type == CRT_WRITE) {
- if (flags & OBD_MD_FLEPOCH) {
- oa->o_valid |= OBD_MD_FLEPOCH;
- oa->o_ioepoch = cl_i2info(inode)->lli_ioepoch;
- valid_flags |= OBD_MD_FLMTIME|OBD_MD_FLCTIME|
- OBD_MD_FLUID|OBD_MD_FLGID;
- }
- }
- obdo_from_inode(oa, inode, &cl_i2info(inode)->lli_fid,
- valid_flags & flags);
+ oa = attr->cra_oa;
+ inode = ccc_object_inode(obj);
+ valid_flags = OBD_MD_FLTYPE;
+
+ if ((flags & OBD_MD_FLOSSCAPA) != 0) {
+ LASSERT(attr->cra_capa == NULL);
+ attr->cra_capa = cl_capa_lookup(inode,
+ slice->crs_req->crq_type);
+ }
+
+ if (slice->crs_req->crq_type == CRT_WRITE) {
+ if (flags & OBD_MD_FLEPOCH) {
+ oa->o_valid |= OBD_MD_FLEPOCH;
+ oa->o_ioepoch = cl_i2info(inode)->lli_ioepoch;
+ valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME |
+ OBD_MD_FLUID | OBD_MD_FLGID;
+ }
+ }
+ obdo_from_inode(oa, inode, valid_flags & flags);
+ obdo_set_parent_fid(oa, &cl_i2info(inode)->lli_fid);
+#ifdef __KERNEL__
+ memcpy(attr->cra_jobid, cl_i2info(inode)->lli_jobid,
+ JOBSTATS_JOBID_SIZE);
+#endif
}
const struct cl_req_operations ccc_req_ops = {
io->u.ci_setattr.sa_valid = attr->ia_valid;
io->u.ci_setattr.sa_capa = capa;
+again:
if (cl_io_init(env, io, CIT_SETATTR, io->ci_obj) == 0) {
struct ccc_io *cio = ccc_env_io(env);
result = io->ci_result;
}
cl_io_fini(env, io);
- cl_env_put(env, &refcheck);
- RETURN(result);
+ if (unlikely(io->ci_need_restart))
+ goto again;
+ cl_env_put(env, &refcheck);
+ RETURN(result);
}
/*****************************************************************************
env = cl_env_get(&refcheck);
emergency = IS_ERR(env);
if (emergency) {
- cfs_mutex_lock(&ccc_inode_fini_guard);
+ mutex_lock(&ccc_inode_fini_guard);
LASSERT(ccc_inode_fini_env != NULL);
cl_env_implant(ccc_inode_fini_env, &refcheck);
env = ccc_inode_fini_env;
lli->lli_clob = NULL;
if (emergency) {
cl_env_unplant(ccc_inode_fini_env, &refcheck);
- cfs_mutex_unlock(&ccc_inode_fini_guard);
+ mutex_unlock(&ccc_inode_fini_guard);
} else
cl_env_put(env, &refcheck);
cl_env_reexit(cookie);
gen = (fid_flatten(fid) >> 32);
RETURN(gen);
}
+
+/* lsm is unreliable after hsm implementation as layout can be changed at
+ * any time. This is only to support old, non-clio-ized interfaces. It will
+ * cause deadlock if clio operations are called with this extra layout refcount
+ * because in case the layout changed during the IO, ll_layout_refresh() will
+ * have to wait for the refcount to become zero to destroy the older layout.
+ *
+ * Notice that the lsm returned by this function may not be valid unless called
+ * inside layout lock - MDS_INODELOCK_LAYOUT. */
+struct lov_stripe_md *ccc_inode_lsm_get(struct inode *inode)
+{
+ return lov_lsm_get(cl_i2info(inode)->lli_clob);
+}
+
+void inline ccc_inode_lsm_put(struct inode *inode, struct lov_stripe_md *lsm)
+{
+ lov_lsm_put(cl_i2info(inode)->lli_clob, lsm);
+}