X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fllite%2Flcommon_cl.c;h=f5ba1a842fd79d13febf7e4aa4b17e8cbfa44b06;hb=refs%2Fchanges%2F72%2F40772%2F10;hp=d1c5edb4ae037dfacfc1a3d0b54f76445a6f3bf8;hpb=72057a3af19ee02d9a686bd7e7d074917e381310;p=fs%2Flustre-release.git diff --git a/lustre/llite/lcommon_cl.c b/lustre/llite/lcommon_cl.c index d1c5edb..f5ba1a8 100644 --- a/lustre/llite/lcommon_cl.c +++ b/lustre/llite/lcommon_cl.c @@ -23,7 +23,7 @@ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2011, 2015, Intel Corporation. + * Copyright (c) 2011, 2017, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -47,7 +47,6 @@ #include #include #include -#include #include #include @@ -69,45 +68,52 @@ __u16 cl_inode_fini_refcheck; static DEFINE_MUTEX(cl_inode_fini_guard); int cl_setattr_ost(struct cl_object *obj, const struct iattr *attr, - unsigned int attr_flags) + enum op_xvalid xvalid, unsigned int attr_flags) { - struct lu_env *env; - struct cl_io *io; - int result; - __u16 refcheck; + struct lu_env *env; + struct cl_io *io; + int result; + __u16 refcheck; - ENTRY; + ENTRY; - env = cl_env_get(&refcheck); - if (IS_ERR(env)) - RETURN(PTR_ERR(env)); + env = cl_env_get(&refcheck); + if (IS_ERR(env)) + RETURN(PTR_ERR(env)); io = vvp_env_thread_io(env); io->ci_obj = obj; io->ci_verify_layout = 1; - io->u.ci_setattr.sa_attr.lvb_atime = LTIME_S(attr->ia_atime); - io->u.ci_setattr.sa_attr.lvb_mtime = LTIME_S(attr->ia_mtime); - io->u.ci_setattr.sa_attr.lvb_ctime = LTIME_S(attr->ia_ctime); + io->u.ci_setattr.sa_attr.lvb_atime = attr->ia_atime.tv_sec; + io->u.ci_setattr.sa_attr.lvb_mtime = attr->ia_mtime.tv_sec; + io->u.ci_setattr.sa_attr.lvb_ctime = attr->ia_ctime.tv_sec; io->u.ci_setattr.sa_attr.lvb_size = attr->ia_size; io->u.ci_setattr.sa_attr_flags = attr_flags; - io->u.ci_setattr.sa_valid = attr->ia_valid; + io->u.ci_setattr.sa_avalid = attr->ia_valid; + io->u.ci_setattr.sa_xvalid = xvalid; io->u.ci_setattr.sa_parent_fid = lu_object_fid(&obj->co_lu); - + if (attr->ia_valid & ATTR_SIZE) + io->u.ci_setattr.sa_subtype = CL_SETATTR_TRUNC; again: - if (cl_io_init(env, io, CIT_SETATTR, io->ci_obj) == 0) { + if (attr->ia_valid & ATTR_FILE) + ll_io_set_mirror(io, attr->ia_file); + + if (cl_io_init(env, io, CIT_SETATTR, io->ci_obj) == 0) { struct vvp_io *vio = vvp_env_io(env); if (attr->ia_valid & ATTR_FILE) - /* populate the file descriptor for ftruncate to honor - * group lock - see LU-787 */ - vio->vui_fd = LUSTRE_FPRIVATE(attr->ia_file); - - result = cl_io_loop(env, io); - } else { - result = io->ci_result; - } - cl_io_fini(env, io); + /* + * populate the file descriptor for ftruncate to honor + * group lock - see LU-787 + */ + vio->vui_fd = attr->ia_file->private_data; + + result = cl_io_loop(env, io); + } else { + result = io->ci_result; + } + cl_io_fini(env, io); if (unlikely(io->ci_need_restart)) goto again; @@ -149,37 +155,51 @@ int cl_file_inode_init(struct inode *inode, struct lustre_md *md) site = ll_i2sbi(inode)->ll_site; lli = ll_i2info(inode); - fid = &lli->lli_fid; - LASSERT(fid_is_sane(fid)); - - if (lli->lli_clob == NULL) { - /* clob is slave of inode, empty lli_clob means for new inode, - * there is no clob in cache with the given fid, so it is - * unnecessary to perform lookup-alloc-lookup-insert, just - * alloc and insert directly. */ - LASSERT(inode->i_state & I_NEW); - conf.coc_lu.loc_flags = LOC_F_NEW; - clob = cl_object_find(env, lu2cl_dev(site->ls_top_dev), - fid, &conf); - if (!IS_ERR(clob)) { - /* - * No locking is necessary, as new inode is - * locked by I_NEW bit. - */ - lli->lli_clob = clob; - lu_object_ref_add(&clob->co_lu, "inode", inode); - } else - result = PTR_ERR(clob); + fid = &lli->lli_fid; + LASSERT(fid_is_sane(fid)); + + if (lli->lli_clob == NULL) { + /* clob is slave of inode, empty lli_clob means for new inode, + * there is no clob in cache with the given fid, so it is + * unnecessary to perform lookup-alloc-lookup-insert, just + * alloc and insert directly. + */ + if (!(inode->i_state & I_NEW)) { + result = -EIO; + CERROR("%s: unexpected not-NEW inode "DFID": rc = %d\n", + ll_i2sbi(inode)->ll_fsname, PFID(fid), result); + goto out; + } + + conf.coc_lu.loc_flags = LOC_F_NEW; + clob = cl_object_find(env, lu2cl_dev(site->ls_top_dev), + fid, &conf); + if (!IS_ERR(clob)) { + /* + * No locking is necessary, as new inode is + * locked by I_NEW bit. + */ + lli->lli_clob = clob; + lu_object_ref_add(&clob->co_lu, "inode", inode); + } else { + result = PTR_ERR(clob); + } } else { result = cl_conf_set(env, lli->lli_clob, &conf); + if (result == -EBUSY) { + /* ignore the error since I/O will handle it later */ + result = 0; + } } - cl_env_put(env, &refcheck); + if (result != 0) + CERROR("%s: failed to initialize cl_object "DFID": rc = %d\n", + ll_i2sbi(inode)->ll_fsname, PFID(fid), result); + +out: + cl_env_put(env, &refcheck); - if (result != 0) - CERROR("Failure to initialize cl object "DFID": %d\n", - PFID(fid), result); - return result; + return result; } /** @@ -194,26 +214,14 @@ int cl_file_inode_init(struct inode *inode, struct lustre_md *md) static void cl_object_put_last(struct lu_env *env, struct cl_object *obj) { struct lu_object_header *header = obj->co_lu.lo_header; - wait_queue_t waiter; if (unlikely(atomic_read(&header->loh_ref) != 1)) { struct lu_site *site = obj->co_lu.lo_dev->ld_site; - struct lu_site_bkt_data *bkt; + wait_queue_head_t *wq; - bkt = lu_site_bkt_from_fid(site, &header->loh_fid); - - init_waitqueue_entry(&waiter, current); - add_wait_queue(&bkt->lsb_marche_funebre, &waiter); - - while (1) { - set_current_state(TASK_UNINTERRUPTIBLE); - if (atomic_read(&header->loh_ref) == 1) - break; - schedule(); - } + wq = lu_site_wq_from_fid(site, &header->loh_fid); - set_current_state(TASK_RUNNING); - remove_wait_queue(&bkt->lsb_marche_funebre, &waiter); + wait_event(*wq, atomic_read(&header->loh_ref) == 1); } cl_object_put(env, obj); @@ -253,28 +261,28 @@ void cl_inode_fini(struct inode *inode) } /** - * build inode number from passed @fid */ + * build inode number from passed @fid. + * + * For 32-bit systems or syscalls limit the inode number to a 32-bit value + * to avoid EOVERFLOW errors. This will inevitably result in inode number + * collisions, but fid_flatten32() tries hard to avoid this if possible. + */ __u64 cl_fid_build_ino(const struct lu_fid *fid, int api32) { - if (BITS_PER_LONG == 32 || api32) - RETURN(fid_flatten32(fid)); - else - RETURN(fid_flatten(fid)); + if (BITS_PER_LONG == 32 || api32) + RETURN(fid_flatten32(fid)); + + RETURN(fid_flatten(fid)); } /** * build inode generation from passed @fid. If our FID overflows the 32-bit - * inode number then return a non-zero generation to distinguish them. */ + * inode number then return a non-zero generation to distinguish them. + */ __u32 cl_fid_build_gen(const struct lu_fid *fid) { - __u32 gen; - ENTRY; - - if (fid_is_igif(fid)) { - gen = lu_igif_gen(fid); - RETURN(gen); - } + if (fid_is_igif(fid)) + RETURN(lu_igif_gen(fid)); - gen = (fid_flatten(fid) >> 32); - RETURN(gen); + RETURN(fid_flatten(fid) >> 32); }