* This file is part of Lustre, http://www.lustre.org/
* Lustre is a trademark of Sun Microsystems, Inc.
*
- * cl code shared between vvp and liblustre (and other Lustre clients in the
- * future).
- *
* Author: Nikita Danilov <nikita.danilov@sun.com>
*/
#include <cl_object.h>
#include "llite_internal.h"
-
-static const struct cl_req_operations ccc_req_ops;
-
-/*
- * ccc_ prefix stands for "Common Client Code".
- */
-
-static struct kmem_cache *ccc_thread_kmem;
-static struct kmem_cache *ccc_req_kmem;
-
-static struct lu_kmem_descr ccc_caches[] = {
- {
- .ckd_cache = &ccc_thread_kmem,
- .ckd_name = "ccc_thread_kmem",
- .ckd_size = sizeof (struct ccc_thread_info),
- },
- {
- .ckd_cache = &ccc_req_kmem,
- .ckd_name = "ccc_req_kmem",
- .ckd_size = sizeof (struct ccc_req)
- },
- {
- .ckd_cache = NULL
- }
-};
-
-/*****************************************************************************
- *
- * Vvp device and device type functions.
- *
- */
-
-void *ccc_key_init(const struct lu_context *ctx, struct lu_context_key *key)
-{
- struct ccc_thread_info *info;
-
- OBD_SLAB_ALLOC_PTR_GFP(info, ccc_thread_kmem, GFP_NOFS);
- if (info == NULL)
- info = ERR_PTR(-ENOMEM);
- return info;
-}
-
-void ccc_key_fini(const struct lu_context *ctx,
- struct lu_context_key *key, void *data)
-{
- struct ccc_thread_info *info = data;
- OBD_SLAB_FREE_PTR(info, ccc_thread_kmem);
-}
-
-struct lu_context_key ccc_key = {
- .lct_tags = LCT_CL_THREAD,
- .lct_init = ccc_key_init,
- .lct_fini = ccc_key_fini
-};
-
-int ccc_req_init(const struct lu_env *env, struct cl_device *dev,
- struct cl_req *req)
-{
- struct ccc_req *vrq;
- int result;
-
- OBD_SLAB_ALLOC_PTR_GFP(vrq, ccc_req_kmem, GFP_NOFS);
- if (vrq != NULL) {
- cl_req_slice_add(req, &vrq->crq_cl, dev, &ccc_req_ops);
- result = 0;
- } else
- result = -ENOMEM;
- return result;
-}
+#include "vvp_internal.h"
/**
- * An `emergency' environment used by ccc_inode_fini() when cl_env_get()
- * fails. Access to this environment is serialized by ccc_inode_fini_guard
+ * An `emergency' environment used by cl_inode_fini() when cl_env_get()
+ * fails. Access to this environment is serialized by cl_inode_fini_guard
* mutex.
*/
-static struct lu_env *ccc_inode_fini_env = NULL;
+struct lu_env *cl_inode_fini_env;
+int cl_inode_fini_refcheck;
/**
* A mutex serializing calls to slp_inode_fini() under extreme memory
* pressure, when environments cannot be allocated.
*/
-static DEFINE_MUTEX(ccc_inode_fini_guard);
-static int dummy_refcheck;
-
-int ccc_global_init(struct lu_device_type *device_type)
-{
- int result;
-
- result = lu_kmem_init(ccc_caches);
- if (result)
- return result;
-
- result = lu_device_type_init(device_type);
- if (result)
- goto out_kmem;
-
- ccc_inode_fini_env = cl_env_alloc(&dummy_refcheck,
- LCT_REMEMBER|LCT_NOREF);
- if (IS_ERR(ccc_inode_fini_env)) {
- result = PTR_ERR(ccc_inode_fini_env);
- goto out_device;
- }
-
- ccc_inode_fini_env->le_ctx.lc_cookie = 0x4;
- return 0;
-out_device:
- lu_device_type_fini(device_type);
-out_kmem:
- lu_kmem_fini(ccc_caches);
- return result;
-}
-
-void ccc_global_fini(struct lu_device_type *device_type)
-{
- if (ccc_inode_fini_env != NULL) {
- cl_env_put(ccc_inode_fini_env, &dummy_refcheck);
- ccc_inode_fini_env = NULL;
- }
- lu_device_type_fini(device_type);
- lu_kmem_fini(ccc_caches);
-}
-
-/*****************************************************************************
- *
- * Transfer operations.
- *
- */
+static DEFINE_MUTEX(cl_inode_fini_guard);
-void ccc_req_completion(const struct lu_env *env,
- const struct cl_req_slice *slice, int ioret)
-{
- struct ccc_req *vrq;
-
- if (ioret > 0)
- cl_stats_tally(slice->crs_dev, slice->crs_req->crq_type, ioret);
-
- vrq = cl2ccc_req(slice);
- OBD_SLAB_FREE_PTR(vrq, ccc_req_kmem);
-}
-
-/**
- * Implementation of struct cl_req_operations::cro_attr_set() for ccc
- * layer. ccc is responsible for
- *
- * - o_[mac]time
- *
- * - o_mode
- *
- * - o_parent_seq
- *
- * - o_[ug]id
- *
- * - o_parent_oid
- *
- * - o_parent_ver
- *
- * - o_ioepoch,
- *
- * and capability.
- */
-void ccc_req_attr_set(const struct lu_env *env,
- const struct cl_req_slice *slice,
- const struct cl_object *obj,
- struct cl_req_attr *attr, u64 flags)
-{
- struct inode *inode;
- struct obdo *oa;
- u32 valid_flags;
-
- oa = attr->cra_oa;
- inode = vvp_object_inode(obj);
- valid_flags = OBD_MD_FLTYPE;
-
- if ((flags & OBD_MD_FLOSSCAPA) != 0) {
- LASSERT(attr->cra_capa == NULL);
- attr->cra_capa = cl_capa_lookup(inode,
- slice->crs_req->crq_type);
- }
-
- if (slice->crs_req->crq_type == CRT_WRITE) {
- if (flags & OBD_MD_FLEPOCH) {
- oa->o_valid |= OBD_MD_FLEPOCH;
- oa->o_ioepoch = ll_i2info(inode)->lli_ioepoch;
- valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME |
- OBD_MD_FLUID | OBD_MD_FLGID;
- }
- }
- obdo_from_inode(oa, inode, valid_flags & flags);
- obdo_set_parent_fid(oa, &ll_i2info(inode)->lli_fid);
- if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_INVALID_PFID))
- oa->o_parent_oid++;
- memcpy(attr->cra_jobid, ll_i2info(inode)->lli_jobid,
- LUSTRE_JOBID_SIZE);
-}
-
-static const struct cl_req_operations ccc_req_ops = {
- .cro_attr_set = ccc_req_attr_set,
- .cro_completion = ccc_req_completion
-};
-
-int cl_setattr_ost(struct inode *inode, const struct iattr *attr,
- struct obd_capa *capa)
+int cl_setattr_ost(struct cl_object *obj, const struct iattr *attr,
+ unsigned int attr_flags)
{
struct lu_env *env;
struct cl_io *io;
if (IS_ERR(env))
RETURN(PTR_ERR(env));
- io = ccc_env_thread_io(env);
- io->ci_obj = ll_i2info(inode)->lli_clob;
+ io = vvp_env_thread_io(env);
+ io->ci_obj = obj;
+ io->ci_verify_layout = 1;
io->u.ci_setattr.sa_attr.lvb_atime = LTIME_S(attr->ia_atime);
io->u.ci_setattr.sa_attr.lvb_mtime = LTIME_S(attr->ia_mtime);
io->u.ci_setattr.sa_attr.lvb_ctime = LTIME_S(attr->ia_ctime);
io->u.ci_setattr.sa_attr.lvb_size = attr->ia_size;
+ io->u.ci_setattr.sa_attr_flags = attr_flags;
io->u.ci_setattr.sa_valid = attr->ia_valid;
- io->u.ci_setattr.sa_parent_fid = ll_inode2fid(inode);
- io->u.ci_setattr.sa_capa = capa;
+ io->u.ci_setattr.sa_parent_fid = lu_object_fid(&obj->co_lu);
again:
if (cl_io_init(env, io, CIT_SETATTR, io->ci_obj) == 0) {
cl_io_fini(env, io);
if (unlikely(io->ci_need_restart))
goto again;
- /* HSM import case: file is released, cannot be restored
- * no need to fail except if restore registration failed
- * with -ENODATA */
- if (result == -ENODATA && io->ci_restore_needed &&
- io->ci_result != -ENODATA)
- result = 0;
+
cl_env_put(env, &refcheck);
RETURN(result);
}
-/*****************************************************************************
- *
- * Type conversions.
- *
- */
-
-struct ccc_req *cl2ccc_req(const struct cl_req_slice *slice)
-{
- return container_of0(slice, struct ccc_req, crq_cl);
-}
-
/**
* Initialize or update CLIO structures for regular files when new
* meta-data arrives from the server.
struct cl_object *clob;
struct lu_site *site;
struct lu_fid *fid;
- struct cl_object_conf conf = {
- .coc_inode = inode,
- .u = {
- .coc_md = md
- }
- };
+ struct cl_object_conf conf = {
+ .coc_inode = inode,
+ .u = {
+ .coc_layout = md->layout,
+ }
+ };
int result = 0;
int refcheck;
* locked by I_NEW bit.
*/
lli->lli_clob = clob;
- lli->lli_has_smd = lsm_has_objects(md->lsm);
lu_object_ref_add(&clob->co_lu, "inode", inode);
} else
result = PTR_ERR(clob);
bkt = lu_site_bkt_from_fid(site, &header->loh_fid);
- init_waitqueue_entry_current(&waiter);
+ init_waitqueue_entry(&waiter, current);
add_wait_queue(&bkt->lsb_marche_funebre, &waiter);
while (1) {
set_current_state(TASK_UNINTERRUPTIBLE);
if (atomic_read(&header->loh_ref) == 1)
break;
- waitq_wait(&waiter, TASK_UNINTERRUPTIBLE);
+ schedule();
}
set_current_state(TASK_RUNNING);
cookie = cl_env_reenter();
env = cl_env_get(&refcheck);
emergency = IS_ERR(env);
- if (emergency) {
- mutex_lock(&ccc_inode_fini_guard);
- LASSERT(ccc_inode_fini_env != NULL);
- cl_env_implant(ccc_inode_fini_env, &refcheck);
- env = ccc_inode_fini_env;
- }
+ if (emergency) {
+ mutex_lock(&cl_inode_fini_guard);
+ LASSERT(cl_inode_fini_env != NULL);
+ cl_env_implant(cl_inode_fini_env, &refcheck);
+ env = cl_inode_fini_env;
+ }
+
/*
* cl_object cache is a slave to inode cache (which, in turn
* is a slave to dentry cache), don't keep cl_object in memory
lu_object_ref_del(&clob->co_lu, "inode", inode);
cl_object_put_last(env, clob);
lli->lli_clob = NULL;
- if (emergency) {
- cl_env_unplant(ccc_inode_fini_env, &refcheck);
- mutex_unlock(&ccc_inode_fini_guard);
- } else
- cl_env_put(env, &refcheck);
+ if (emergency) {
+ cl_env_unplant(cl_inode_fini_env, &refcheck);
+ mutex_unlock(&cl_inode_fini_guard);
+ } else {
+ cl_env_put(env, &refcheck);
+ }
+
cl_env_reexit(cookie);
}
}
/**
- * return IF_* type for given lu_dirent entry.
- * IF_* flag shld be converted to particular OS file type in
- * platform llite module.
- */
-__u16 ll_dirent_type_get(struct lu_dirent *ent)
-{
- __u16 type = 0;
- struct luda_type *lt;
- int len = 0;
-
- if (le32_to_cpu(ent->lde_attrs) & LUDA_TYPE) {
- const unsigned align = sizeof(struct luda_type) - 1;
-
- len = le16_to_cpu(ent->lde_namelen);
- len = (len + align) & ~align;
- lt = (void *)ent->lde_name + len;
- type = IFTODT(le16_to_cpu(lt->lt_type));
- }
- return type;
-}
-
-/**
* build inode number from passed @fid */
__u64 cl_fid_build_ino(const struct lu_fid *fid, int api32)
{
gen = (fid_flatten(fid) >> 32);
RETURN(gen);
}
-
-/* lsm is unreliable after hsm implementation as layout can be changed at
- * any time. This is only to support old, non-clio-ized interfaces. It will
- * cause deadlock if clio operations are called with this extra layout refcount
- * because in case the layout changed during the IO, ll_layout_refresh() will
- * have to wait for the refcount to become zero to destroy the older layout.
- *
- * Notice that the lsm returned by this function may not be valid unless called
- * inside layout lock - MDS_INODELOCK_LAYOUT. */
-struct lov_stripe_md *ccc_inode_lsm_get(struct inode *inode)
-{
- return lov_lsm_get(ll_i2info(inode)->lli_clob);
-}
-
-void inline ccc_inode_lsm_put(struct inode *inode, struct lov_stripe_md *lsm)
-{
- lov_lsm_put(ll_i2info(inode)->lli_clob, lsm);
-}