-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Whamcloud, Inc.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#include <sys/stat.h>
#include <sys/queue.h>
#include <fcntl.h>
-# include <sysio.h>
-# ifdef HAVE_XTIO_H
-# include <xtio.h>
-# endif
-# include <fs.h>
-# include <mount.h>
-# include <inode.h>
-# ifdef HAVE_FILE_H
-# include <file.h>
-# endif
# include <liblustre.h>
#endif
{
struct ccc_thread_info *info;
- OBD_SLAB_ALLOC_PTR(info, ccc_thread_kmem);
+ OBD_SLAB_ALLOC_PTR_GFP(info, ccc_thread_kmem, CFS_ALLOC_IO);
if (info == NULL)
info = ERR_PTR(-ENOMEM);
return info;
{
struct ccc_session *session;
- OBD_SLAB_ALLOC_PTR(session, ccc_session_kmem);
+ OBD_SLAB_ALLOC_PTR_GFP(session, ccc_session_kmem, CFS_ALLOC_IO);
if (session == NULL)
session = ERR_PTR(-ENOMEM);
return session;
struct ccc_req *vrq;
int result;
- OBD_SLAB_ALLOC_PTR(vrq, ccc_req_kmem);
+ OBD_SLAB_ALLOC_PTR_GFP(vrq, ccc_req_kmem, CFS_ALLOC_IO);
if (vrq != NULL) {
cl_req_slice_add(req, &vrq->crq_cl, dev, &ccc_req_ops);
result = 0;
* A mutex serializing calls to slp_inode_fini() under extreme memory
* pressure, when environments cannot be allocated.
*/
-static DEFINE_MUTEX(ccc_inode_fini_guard);
+static CFS_DEFINE_MUTEX(ccc_inode_fini_guard);
static int dummy_refcheck;
int ccc_global_init(struct lu_device_type *device_type)
int result;
result = lu_kmem_init(ccc_caches);
- if (result == 0) {
- result = lu_device_type_init(device_type);
- ccc_inode_fini_env = cl_env_alloc(&dummy_refcheck,
- LCT_REMEMBER|LCT_NOREF);
- if (IS_ERR(ccc_inode_fini_env))
- result = PTR_ERR(ccc_inode_fini_env);
- else
- ccc_inode_fini_env->le_ctx.lc_cookie = 0x4;
+ if (result)
+ return result;
+
+ result = lu_device_type_init(device_type);
+ if (result)
+ goto out_kmem;
+
+ ccc_inode_fini_env = cl_env_alloc(&dummy_refcheck,
+ LCT_REMEMBER|LCT_NOREF);
+ if (IS_ERR(ccc_inode_fini_env)) {
+ result = PTR_ERR(ccc_inode_fini_env);
+ goto out_device;
}
+
+ ccc_inode_fini_env->le_ctx.lc_cookie = 0x4;
+ return 0;
+out_device:
+ lu_device_type_fini(device_type);
+out_kmem:
+ lu_kmem_fini(ccc_caches);
return result;
}
*/
struct lu_object *ccc_object_alloc(const struct lu_env *env,
- const struct lu_object_header *_,
+ const struct lu_object_header *unused,
struct lu_device *dev,
const struct cl_object_operations *clops,
const struct lu_object_operations *luops)
struct ccc_object *vob;
struct lu_object *obj;
- OBD_SLAB_ALLOC_PTR(vob, ccc_object_kmem);
+ OBD_SLAB_ALLOC_PTR_GFP(vob, ccc_object_kmem, CFS_ALLOC_IO);
if (vob != NULL) {
struct cl_object_header *hdr;
int ccc_lock_init(const struct lu_env *env,
struct cl_object *obj, struct cl_lock *lock,
- const struct cl_io *_,
+ const struct cl_io *unused,
const struct cl_lock_operations *lkops)
{
struct ccc_lock *clk;
CLOBINVRNT(env, obj, ccc_object_invariant(obj));
- OBD_SLAB_ALLOC_PTR(clk, ccc_lock_kmem);
+ OBD_SLAB_ALLOC_PTR_GFP(clk, ccc_lock_kmem, CFS_ALLOC_IO);
if (clk != NULL) {
cl_lock_slice_add(lock, &clk->clk_cl, obj, lkops);
result = 0;
lvb->lvb_mtime = cl_inode_mtime(inode);
lvb->lvb_atime = cl_inode_atime(inode);
lvb->lvb_ctime = cl_inode_ctime(inode);
+ /*
+ * LU-417: Add dirty pages block count lest i_blocks reports 0, some
+ * "cp" or "tar" on remote node may think it's a completely sparse file
+ * and skip it.
+ */
+ if (lvb->lvb_size > 0 && lvb->lvb_blocks == 0)
+ lvb->lvb_blocks = dirty_cnt(inode);
RETURN(0);
}
return 0;
}
+static void ccc_object_size_lock(struct cl_object *obj)
+{
+ struct inode *inode = ccc_object_inode(obj);
+
+ cl_isize_lock(inode);
+ cl_object_attr_lock(obj);
+}
+
+static void ccc_object_size_unlock(struct cl_object *obj)
+{
+ struct inode *inode = ccc_object_inode(obj);
+
+ cl_object_attr_unlock(obj);
+ cl_isize_unlock(inode);
+}
+
/*****************************************************************************
*
* Page operations.
const struct cl_page_slice *slice,
struct cl_io *io)
{
- struct ccc_io *vio = ccc_env_io(env);
+ struct ccc_io *cio = ccc_env_io(env);
struct cl_lock_descr *desc = &ccc_env_info(env)->cti_descr;
struct cl_page *page = slice->cpl_page;
if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE ||
io->ci_type == CIT_FAULT) {
- if (vio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED)
+ if (cio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED)
result = -EBUSY;
else {
desc->cld_start = page->cp_index;
{
}
-void ccc_transient_page_own(const struct lu_env *env,
+int ccc_transient_page_own(const struct lu_env *env,
const struct cl_page_slice *slice,
- struct cl_io *_)
+ struct cl_io *unused,
+ int nonblock)
{
ccc_transient_page_verify(slice->cpl_page);
+ return 0;
}
void ccc_transient_page_assume(const struct lu_env *env,
const struct cl_page_slice *slice,
- struct cl_io *_)
+ struct cl_io *unused)
{
ccc_transient_page_verify(slice->cpl_page);
}
void ccc_transient_page_unassume(const struct lu_env *env,
const struct cl_page_slice *slice,
- struct cl_io *_)
+ struct cl_io *unused)
{
ccc_transient_page_verify(slice->cpl_page);
}
void ccc_transient_page_disown(const struct lu_env *env,
const struct cl_page_slice *slice,
- struct cl_io *_)
+ struct cl_io *unused)
{
ccc_transient_page_verify(slice->cpl_page);
}
void ccc_transient_page_discard(const struct lu_env *env,
const struct cl_page_slice *slice,
- struct cl_io *_)
+ struct cl_io *unused)
{
struct cl_page *page = slice->cpl_page;
int ccc_transient_page_prep(const struct lu_env *env,
const struct cl_page_slice *slice,
- struct cl_io *_)
+ struct cl_io *unused)
{
ENTRY;
/* transient page should always be sent. */
*
*/
+void ccc_lock_delete(const struct lu_env *env,
+ const struct cl_lock_slice *slice)
+{
+ CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj));
+}
+
void ccc_lock_fini(const struct lu_env *env, struct cl_lock_slice *slice)
{
struct ccc_lock *clk = cl2ccc_lock(slice);
-
- CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj));
OBD_SLAB_FREE_PTR(clk, ccc_lock_kmem);
}
int ccc_lock_enqueue(const struct lu_env *env,
const struct cl_lock_slice *slice,
- struct cl_io *_, __u32 enqflags)
+ struct cl_io *unused, __u32 enqflags)
{
CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj));
return 0;
* cached lock "fits" into io.
*
* \param slice lock to be checked
- *
* \param io IO that wants a lock.
*
* \see lov_lock_fits_into().
*/
if (cio->cui_glimpse)
result = descr->cld_mode != CLM_WRITE;
+
/*
* Also, don't match incomplete write locks for read, otherwise read
* would enqueue missing sub-locks in the write mode.
- *
- * XXX this is a candidate for generic locking policy, to be moved
- * into cl_lock_lookup().
*/
else if (need->cld_mode != descr->cld_mode)
result = lock->cll_state >= CLS_ENQUEUED;
obj = slice->cls_obj;
inode = ccc_object_inode(obj);
- attr = &ccc_env_info(env)->cti_attr;
+ attr = ccc_env_thread_attr(env);
/* vmtruncate()->ll_truncate() first sets the i_size and then
* the kms under both a DLM lock and the
* cancel the result of the truncate. Getting the
* ll_inode_size_lock() after the enqueue maintains the DLM
* -> ll_inode_size_lock() acquiring order. */
- cl_isize_lock(inode, 0);
- cl_object_attr_lock(obj);
+ ccc_object_size_lock(obj);
rc = cl_object_attr_get(env, obj, attr);
if (rc == 0) {
if (lock->cll_descr.cld_start == 0 &&
lock->cll_descr.cld_end == CL_PAGE_EOF) {
- cl_isize_write(inode, attr->cat_kms);
- CDEBUG(D_INODE, DFID" updating i_size %llu\n",
+ cl_isize_write_nolock(inode, attr->cat_kms);
+ CDEBUG(D_INODE|D_VFSTRACE,
+ DFID" updating i_size "LPU64"\n",
PFID(lu_object_fid(&obj->co_lu)),
(__u64)cl_isize_read(inode));
}
cl_inode_mtime(inode) = attr->cat_mtime;
cl_inode_atime(inode) = attr->cat_atime;
cl_inode_ctime(inode) = attr->cat_ctime;
- } else
- CL_LOCK_DEBUG(D_ERROR, env, lock, "attr_get: %i\n", rc);
- cl_object_attr_unlock(obj);
- cl_isize_unlock(inode, 0);
- }
- EXIT;
+ } else {
+ CL_LOCK_DEBUG(D_INFO, env, lock, "attr_get: %d\n", rc);
+ }
+ ccc_object_size_unlock(obj);
+ }
+ EXIT;
}
/*****************************************************************************
__u32 enqflags, enum cl_lock_mode mode,
pgoff_t start, pgoff_t end)
{
- struct ccc_io *vio = ccc_env_io(env);
- struct cl_lock_descr *descr = &vio->cui_link.cill_descr;
+ struct ccc_io *cio = ccc_env_io(env);
+ struct cl_lock_descr *descr = &cio->cui_link.cill_descr;
struct cl_object *obj = io->ci_obj;
CLOBINVRNT(env, obj, ccc_object_invariant(obj));
ENTRY;
- CDEBUG(D_VFSTRACE, "lock: %i [%lu, %lu]\n", mode, start, end);
+ CDEBUG(D_VFSTRACE, "lock: %d [%lu, %lu]\n", mode, start, end);
- memset(&vio->cui_link, 0, sizeof vio->cui_link);
- descr->cld_mode = mode;
+ memset(&cio->cui_link, 0, sizeof cio->cui_link);
+
+ if (cio->cui_fd && (cio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
+ descr->cld_mode = CLM_GROUP;
+ descr->cld_gid = cio->cui_fd->fd_grouplock.cg_gid;
+ } else {
+ descr->cld_mode = mode;
+ }
descr->cld_obj = obj;
descr->cld_start = start;
descr->cld_end = end;
+ descr->cld_enq_flags = enqflags;
- vio->cui_link.cill_enq_flags = enqflags;
- cl_io_lock_add(env, io, &vio->cui_link);
+ cl_io_lock_add(env, io, &cio->cui_link);
RETURN(0);
}
+void ccc_io_update_iov(const struct lu_env *env,
+ struct ccc_io *cio, struct cl_io *io)
+{
+ int i;
+ size_t size = io->u.ci_rw.crw_count;
+
+ cio->cui_iov_olen = 0;
+ if (!cl_is_normalio(env, io))
+ return;
+
+ for (i = 0; i < cio->cui_tot_nrsegs; i++) {
+ struct iovec *iv = &cio->cui_iov[i];
+
+ if (iv->iov_len < size)
+ size -= iv->iov_len;
+ else {
+ if (iv->iov_len > size) {
+ cio->cui_iov_olen = iv->iov_len;
+ iv->iov_len = size;
+ }
+ break;
+ }
+ }
+
+ cio->cui_nrsegs = i + 1;
+}
+
int ccc_io_one_lock(const struct lu_env *env, struct cl_io *io,
__u32 enqflags, enum cl_lock_mode mode,
loff_t start, loff_t end)
{
struct cl_object *obj = io->ci_obj;
-
return ccc_io_one_lock_index(env, io, enqflags, mode,
cl_index(obj, start), cl_index(obj, end));
}
ccc_object_invariant(ios->cis_io->ci_obj));
}
-static void ccc_object_size_lock(struct cl_object *obj, int vfslock)
+void ccc_io_advance(const struct lu_env *env,
+ const struct cl_io_slice *ios,
+ size_t nob)
{
- struct inode *inode = ccc_object_inode(obj);
+ struct ccc_io *cio = cl2ccc_io(env, ios);
+ struct cl_io *io = ios->cis_io;
+ struct cl_object *obj = ios->cis_io->ci_obj;
- if (vfslock)
- cl_isize_lock(inode, 0);
- cl_object_attr_lock(obj);
-}
+ CLOBINVRNT(env, obj, ccc_object_invariant(obj));
-static void ccc_object_size_unlock(struct cl_object *obj, int vfslock)
-{
- struct inode *inode = ccc_object_inode(obj);
+ if (cl_is_normalio(env, io) && io->ci_continue) {
+ /* update the iov */
+ LASSERT(cio->cui_tot_nrsegs >= cio->cui_nrsegs);
+ LASSERT(cio->cui_tot_count >= nob);
+
+ cio->cui_iov += cio->cui_nrsegs;
+ cio->cui_tot_nrsegs -= cio->cui_nrsegs;
+ cio->cui_tot_count -= nob;
- cl_object_attr_unlock(obj);
- if (vfslock)
- cl_isize_unlock(inode, 0);
+ if (cio->cui_iov_olen) {
+ struct iovec *iv;
+
+ cio->cui_iov--;
+ cio->cui_tot_nrsegs++;
+ iv = &cio->cui_iov[0];
+ iv->iov_base += iv->iov_len;
+ LASSERT(cio->cui_iov_olen > iv->iov_len);
+ iv->iov_len = cio->cui_iov_olen - iv->iov_len;
+ }
+ }
}
/**
* protect consistency between inode size and cl_object
* attributes. cl_object_size_lock() protects consistency between cl_attr's of
* top-object and sub-objects.
- *
- * In page fault path cl_isize_lock cannot be taken, client has to live with
- * the resulting races.
*/
int ccc_prep_size(const struct lu_env *env, struct cl_object *obj,
- struct cl_io *io, loff_t pos, int vfslock)
+ struct cl_io *io, loff_t start, size_t count, int *exceed)
{
- struct cl_attr *attr = &ccc_env_info(env)->cti_attr;
+ struct cl_attr *attr = ccc_env_thread_attr(env);
struct inode *inode = ccc_object_inode(obj);
+ loff_t pos = start + count - 1;
loff_t kms;
int result;
* ll_inode_size_lock(). This guarantees that short reads are handled
* correctly in the face of concurrent writes and truncates.
*/
- ccc_object_size_lock(obj, vfslock);
+ ccc_object_size_lock(obj);
result = cl_object_attr_get(env, obj, attr);
if (result == 0) {
kms = attr->cat_kms;
* return a short read (B) or some zeroes at the end
* of the buffer (C)
*/
- ccc_object_size_unlock(obj, vfslock);
- return cl_glimpse_lock(env, io, inode, obj);
+ ccc_object_size_unlock(obj);
+ result = cl_glimpse_lock(env, io, inode, obj, 0);
+ if (result == 0 && exceed != NULL) {
+ /* If objective page index exceed end-of-file
+ * page index, return directly. Do not expect
+ * kernel will check such case correctly.
+ * linux-2.6.18-128.1.1 miss to do that.
+ * --bug 17336 */
+ loff_t size = cl_isize_read(inode);
+ unsigned long cur_index = start >> CFS_PAGE_SHIFT;
+
+ if ((size == 0 && cur_index != 0) ||
+ (((size - 1) >> CFS_PAGE_SHIFT) < cur_index))
+ *exceed = 1;
+ }
+ return result;
} else {
/*
* region is within kms and, hence, within real file
* which will always be >= the kms value here.
* b=11081
*/
- /*
- * XXX in a page fault path, change inode size without
- * ll_inode_size_lock() held! there is a race
- * condition with truncate path. (see ll_extent_lock)
- */
- /*
- * XXX i_size_write() is not used because it is not
- * safe to take the ll_inode_size_lock() due to a
- * potential lock inversion (bug 6077). And since
- * it's not safe to use i_size_write() without a
- * covering mutex we do the assignment directly. It
- * is not critical that the size be correct.
- */
if (cl_isize_read(inode) < kms) {
- if (vfslock)
- cl_isize_write(inode, kms);
- else
- cl_isize_write_nolock(inode, kms);
+ cl_isize_write_nolock(inode, kms);
+ CDEBUG(D_VFSTRACE,
+ DFID" updating i_size "LPU64"\n",
+ PFID(lu_object_fid(&obj->co_lu)),
+ (__u64)cl_isize_read(inode));
+
}
}
}
- ccc_object_size_unlock(obj, vfslock);
+ ccc_object_size_unlock(obj);
return result;
}
{
struct ccc_req *vrq;
+ if (ioret > 0)
+ cl_stats_tally(slice->crs_dev, slice->crs_req->crq_type, ioret);
+
vrq = cl2ccc_req(slice);
OBD_SLAB_FREE_PTR(vrq, ccc_req_kmem);
}
*
* - o_mode
*
- * - o_fid (filled with inode number?!)
+ * - o_parent_seq
*
* - o_[ug]id
*
- * - o_generation
+ * - o_parent_oid
+ *
+ * - o_parent_ver
*
- * - and IO epoch (stored in o_easize),
+ * - o_ioepoch,
*
* and capability.
*/
struct obdo *oa;
obd_flag valid_flags;
- oa = attr->cra_oa;
- inode = ccc_object_inode(obj);
- valid_flags = OBD_MD_FLTYPE|OBD_MD_FLATIME;
-
- if (flags != (obd_valid)~0ULL)
- valid_flags |= OBD_MD_FLMTIME|OBD_MD_FLCTIME|OBD_MD_FLATIME;
- else {
- LASSERT(attr->cra_capa == NULL);
- attr->cra_capa = cl_capa_lookup(inode,
- slice->crs_req->crq_type);
- }
-
- if (slice->crs_req->crq_type == CRT_WRITE) {
- if (flags & OBD_MD_FLEPOCH) {
- oa->o_valid |= OBD_MD_FLEPOCH;
- oa->o_easize = cl_i2info(inode)->lli_ioepoch;
- valid_flags |= OBD_MD_FLMTIME|OBD_MD_FLCTIME|
- OBD_MD_FLUID|OBD_MD_FLGID|
- OBD_MD_FLFID|OBD_MD_FLGENER;
- }
- }
- obdo_from_inode(oa, inode, valid_flags & flags);
+ oa = attr->cra_oa;
+ inode = ccc_object_inode(obj);
+ valid_flags = OBD_MD_FLTYPE;
+
+ if ((flags & OBD_MD_FLOSSCAPA) != 0) {
+ LASSERT(attr->cra_capa == NULL);
+ attr->cra_capa = cl_capa_lookup(inode,
+ slice->crs_req->crq_type);
+ }
+
+ if (slice->crs_req->crq_type == CRT_WRITE) {
+ if (flags & OBD_MD_FLEPOCH) {
+ oa->o_valid |= OBD_MD_FLEPOCH;
+ oa->o_ioepoch = cl_i2info(inode)->lli_ioepoch;
+ valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME |
+ OBD_MD_FLUID | OBD_MD_FLGID;
+ }
+ }
+ obdo_from_inode(oa, inode, valid_flags & flags);
+ obdo_set_parent_fid(oa, &cl_i2info(inode)->lli_fid);
+#ifdef __KERNEL__
+ memcpy(attr->cra_jobid, cl_i2info(inode)->lli_jobid,
+ JOBSTATS_JOBID_SIZE);
+#endif
}
const struct cl_req_operations ccc_req_ops = {
.cro_completion = ccc_req_completion
};
-/* Setattr helpers */
-int cl_setattr_do_truncate(struct inode *inode, loff_t size,
- struct obd_capa *capa)
+int cl_setattr_ost(struct inode *inode, const struct iattr *attr,
+ struct obd_capa *capa)
{
struct lu_env *env;
struct cl_io *io;
if (IS_ERR(env))
RETURN(PTR_ERR(env));
- io = &ccc_env_info(env)->cti_io;
+ io = ccc_env_thread_io(env);
io->ci_obj = cl_i2info(inode)->lli_clob;
- io->u.ci_truncate.tr_size = size;
- io->u.ci_truncate.tr_capa = capa;
- if (cl_io_init(env, io, CIT_TRUNC, io->ci_obj) == 0)
+
+ io->u.ci_setattr.sa_attr.lvb_atime = LTIME_S(attr->ia_atime);
+ io->u.ci_setattr.sa_attr.lvb_mtime = LTIME_S(attr->ia_mtime);
+ io->u.ci_setattr.sa_attr.lvb_ctime = LTIME_S(attr->ia_ctime);
+ io->u.ci_setattr.sa_attr.lvb_size = attr->ia_size;
+ io->u.ci_setattr.sa_valid = attr->ia_valid;
+ io->u.ci_setattr.sa_capa = capa;
+
+ if (cl_io_init(env, io, CIT_SETATTR, io->ci_obj) == 0) {
+ struct ccc_io *cio = ccc_env_io(env);
+
+ if (attr->ia_valid & ATTR_FILE)
+ /* populate the file descriptor for ftruncate to honor
+ * group lock - see LU-787 */
+ cio->cui_fd = cl_iattr2fd(inode, attr);
+
result = cl_io_loop(env, io);
- else
+ } else {
result = io->ci_result;
+ }
cl_io_fini(env, io);
cl_env_put(env, &refcheck);
RETURN(result);
}
-int cl_setattr_ost(struct inode *inode, struct obd_capa *capa)
-{
- struct cl_inode_info *lli = cl_i2info(inode);
- struct lov_stripe_md *lsm = lli->lli_smd;
- int rc;
- obd_flag flags;
- struct obd_info oinfo = { { { 0 } } };
- struct obdo *oa;
-
- OBDO_ALLOC(oa);
- if (oa) {
- oa->o_id = lsm->lsm_object_id;
- oa->o_gr = lsm->lsm_object_gr;
- oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
-
- flags = OBD_MD_FLTYPE | OBD_MD_FLATIME |
- OBD_MD_FLMTIME | OBD_MD_FLCTIME |
- OBD_MD_FLFID | OBD_MD_FLGENER |
- OBD_MD_FLGROUP;
-
- obdo_from_inode(oa, inode, flags);
-
- oinfo.oi_oa = oa;
- oinfo.oi_md = lsm;
-
- /* XXX: this looks unnecessary now. */
- rc = obd_setattr_rqset(cl_i2sbi(inode)->ll_dt_exp, &oinfo,
- NULL);
- if (rc)
- CERROR("obd_setattr_async fails: rc=%d\n", rc);
- OBDO_FREE(oa);
- } else {
- rc = -ENOMEM;
- }
- return rc;
-}
-
-
/*****************************************************************************
*
* Type conversions.
}
/**
- * Initializes or updates CLIO part when new meta-data arrives from the
- * server.
+ * Initialize or update CLIO structures for regular files when new
+ * meta-data arrives from the server.
*
- * - allocates cl_object if necessary,
- * - updated layout, if object was already here.
+ * \param inode regular file inode
+ * \param md new file metadata from MDS
+ * - allocates cl_object if necessary,
+ * - updated layout, if object was already here.
*/
-int cl_inode_init(struct inode *inode, struct lustre_md *md)
+int cl_file_inode_init(struct inode *inode, struct lustre_md *md)
{
struct lu_env *env;
struct cl_inode_info *lli;
struct cl_object *clob;
struct lu_site *site;
struct lu_fid *fid;
- const struct cl_object_conf conf = {
+ struct cl_object_conf conf = {
.coc_inode = inode,
.u = {
.coc_md = md
int result = 0;
int refcheck;
- /* LASSERT(inode->i_state & I_NEW); */
LASSERT(md->body->valid & OBD_MD_FLID);
-
- if (!S_ISREG(cl_inode_mode(inode)))
- return 0;
+ LASSERT(S_ISREG(cl_inode_mode(inode)));
env = cl_env_get(&refcheck);
if (IS_ERR(env))
LASSERT(fid_is_sane(fid));
if (lli->lli_clob == NULL) {
+ /* clob is slave of inode, empty lli_clob means for new inode,
+ * there is no clob in cache with the given fid, so it is
+ * unnecessary to perform lookup-alloc-lookup-insert, just
+ * alloc and insert directly. */
+#ifdef __KERNEL__
+ LASSERT(inode->i_state & I_NEW);
+#endif
+ conf.coc_lu.loc_flags = LOC_F_NEW;
clob = cl_object_find(env, lu2cl_dev(site->ls_top_dev),
fid, &conf);
if (!IS_ERR(clob)) {
return result;
}
+/**
+ * Wait for others drop their references of the object at first, then we drop
+ * the last one, which will lead to the object be destroyed immediately.
+ * Must be called after cl_object_kill() against this object.
+ *
+ * The reason we want to do this is: destroying top object will wait for sub
+ * objects being destroyed first, so we can't let bottom layer (e.g. from ASTs)
+ * to initiate top object destroying which may deadlock. See bz22520.
+ */
+static void cl_object_put_last(struct lu_env *env, struct cl_object *obj)
+{
+ struct lu_object_header *header = obj->co_lu.lo_header;
+ cfs_waitlink_t waiter;
+
+ if (unlikely(cfs_atomic_read(&header->loh_ref) != 1)) {
+ struct lu_site *site = obj->co_lu.lo_dev->ld_site;
+ struct lu_site_bkt_data *bkt;
+
+ bkt = lu_site_bkt_from_fid(site, &header->loh_fid);
+
+ cfs_waitlink_init(&waiter);
+ cfs_waitq_add(&bkt->lsb_marche_funebre, &waiter);
+
+ while (1) {
+ cfs_set_current_state(CFS_TASK_UNINT);
+ if (cfs_atomic_read(&header->loh_ref) == 1)
+ break;
+ cfs_waitq_wait(&waiter, CFS_TASK_UNINT);
+ }
+
+ cfs_set_current_state(CFS_TASK_RUNNING);
+ cfs_waitq_del(&bkt->lsb_marche_funebre, &waiter);
+ }
+
+ cl_object_put(env, obj);
+}
+
void cl_inode_fini(struct inode *inode)
{
struct lu_env *env;
int emergency;
if (clob != NULL) {
- struct lu_object_header *head = clob->co_lu.lo_header;
void *cookie;
cookie = cl_env_reenter();
env = cl_env_get(&refcheck);
emergency = IS_ERR(env);
if (emergency) {
- mutex_lock(&ccc_inode_fini_guard);
+ cfs_mutex_lock(&ccc_inode_fini_guard);
LASSERT(ccc_inode_fini_env != NULL);
cl_env_implant(ccc_inode_fini_env, &refcheck);
env = ccc_inode_fini_env;
*/
cl_object_kill(env, clob);
lu_object_ref_del(&clob->co_lu, "inode", inode);
- /* XXX temporary: this is racy */
- LASSERT(atomic_read(&head->loh_ref) == 1);
- cl_object_put(env, clob);
+ cl_object_put_last(env, clob);
lli->lli_clob = NULL;
if (emergency) {
cl_env_unplant(ccc_inode_fini_env, &refcheck);
- mutex_unlock(&ccc_inode_fini_guard);
+ cfs_mutex_unlock(&ccc_inode_fini_guard);
} else
cl_env_put(env, &refcheck);
cl_env_reexit(cookie);
}
}
+
+/**
+ * return IF_* type for given lu_dirent entry.
+ * IF_* flag shld be converted to particular OS file type in
+ * platform llite module.
+ */
+__u16 ll_dirent_type_get(struct lu_dirent *ent)
+{
+ __u16 type = 0;
+ struct luda_type *lt;
+ int len = 0;
+
+ if (le32_to_cpu(ent->lde_attrs) & LUDA_TYPE) {
+ const unsigned align = sizeof(struct luda_type) - 1;
+
+ len = le16_to_cpu(ent->lde_namelen);
+ len = (len + align) & ~align;
+ lt = (void *) ent->lde_name + len;
+ type = CFS_IFTODT(le16_to_cpu(lt->lt_type));
+ }
+ return type;
+}
+
+/**
+ * build inode number from passed @fid */
+__u64 cl_fid_build_ino(const struct lu_fid *fid, int api32)
+{
+ if (BITS_PER_LONG == 32 || api32)
+ RETURN(fid_flatten32(fid));
+ else
+ RETURN(fid_flatten(fid));
+}
+
+/**
+ * build inode generation from passed @fid. If our FID overflows the 32-bit
+ * inode number then return a non-zero generation to distinguish them. */
+__u32 cl_fid_build_gen(const struct lu_fid *fid)
+{
+ __u32 gen;
+ ENTRY;
+
+ if (fid_is_igif(fid)) {
+ gen = lu_igif_gen(fid);
+ RETURN(gen);
+ }
+
+ gen = (fid_flatten(fid) >> 32);
+ RETURN(gen);
+}
+
+struct lov_stripe_md *ccc_inode_lsm_get(struct inode *inode)
+{
+ return lov_lsm_get(cl_i2info(inode)->lli_clob);
+}
+
+void inline ccc_inode_lsm_put(struct inode *inode, struct lov_stripe_md *lsm)
+{
+ lov_lsm_put(cl_i2info(inode)->lli_clob, lsm);
+}