4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2014, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * cl code shared between vvp and liblustre (and other Lustre clients in the
39 * Author: Nikita Danilov <nikita.danilov@sun.com>
42 #define DEBUG_SUBSYSTEM S_LLITE
44 #include <libcfs/libcfs.h>
46 #include <linux/sched.h>
48 #include <linux/quotaops.h>
49 #include <linux/highmem.h>
50 #include <linux/pagemap.h>
51 #include <linux/rbtree.h>
54 #include <obd_support.h>
55 #include <lustre_fid.h>
56 #include <lustre_dlm.h>
57 #include <lustre_ver.h>
58 #include <lustre_mdc.h>
59 #include <cl_object.h>
61 #include "llite_internal.h"
63 static const struct cl_req_operations ccc_req_ops;
66 * ccc_ prefix stands for "Common Client Code".
69 static struct kmem_cache *ccc_thread_kmem;
70 static struct kmem_cache *ccc_session_kmem;
71 static struct kmem_cache *ccc_req_kmem;
73 static struct lu_kmem_descr ccc_caches[] = {
75 .ckd_cache = &ccc_thread_kmem,
76 .ckd_name = "ccc_thread_kmem",
77 .ckd_size = sizeof (struct ccc_thread_info),
80 .ckd_cache = &ccc_session_kmem,
81 .ckd_name = "ccc_session_kmem",
82 .ckd_size = sizeof (struct ccc_session)
85 .ckd_cache = &ccc_req_kmem,
86 .ckd_name = "ccc_req_kmem",
87 .ckd_size = sizeof (struct ccc_req)
94 /*****************************************************************************
96 * Vvp device and device type functions.
100 void *ccc_key_init(const struct lu_context *ctx, struct lu_context_key *key)
102 struct ccc_thread_info *info;
104 OBD_SLAB_ALLOC_PTR_GFP(info, ccc_thread_kmem, GFP_NOFS);
106 info = ERR_PTR(-ENOMEM);
110 void ccc_key_fini(const struct lu_context *ctx,
111 struct lu_context_key *key, void *data)
113 struct ccc_thread_info *info = data;
114 OBD_SLAB_FREE_PTR(info, ccc_thread_kmem);
117 void *ccc_session_key_init(const struct lu_context *ctx,
118 struct lu_context_key *key)
120 struct ccc_session *session;
122 OBD_SLAB_ALLOC_PTR_GFP(session, ccc_session_kmem, GFP_NOFS);
124 session = ERR_PTR(-ENOMEM);
128 void ccc_session_key_fini(const struct lu_context *ctx,
129 struct lu_context_key *key, void *data)
131 struct ccc_session *session = data;
132 OBD_SLAB_FREE_PTR(session, ccc_session_kmem);
135 struct lu_context_key ccc_key = {
136 .lct_tags = LCT_CL_THREAD,
137 .lct_init = ccc_key_init,
138 .lct_fini = ccc_key_fini
141 struct lu_context_key ccc_session_key = {
142 .lct_tags = LCT_SESSION,
143 .lct_init = ccc_session_key_init,
144 .lct_fini = ccc_session_key_fini
147 int ccc_req_init(const struct lu_env *env, struct cl_device *dev,
153 OBD_SLAB_ALLOC_PTR_GFP(vrq, ccc_req_kmem, GFP_NOFS);
155 cl_req_slice_add(req, &vrq->crq_cl, dev, &ccc_req_ops);
163 * An `emergency' environment used by ccc_inode_fini() when cl_env_get()
164 * fails. Access to this environment is serialized by ccc_inode_fini_guard
167 static struct lu_env *ccc_inode_fini_env = NULL;
170 * A mutex serializing calls to slp_inode_fini() under extreme memory
171 * pressure, when environments cannot be allocated.
173 static DEFINE_MUTEX(ccc_inode_fini_guard);
174 static int dummy_refcheck;
176 int ccc_global_init(struct lu_device_type *device_type)
180 result = lu_kmem_init(ccc_caches);
184 result = lu_device_type_init(device_type);
188 ccc_inode_fini_env = cl_env_alloc(&dummy_refcheck,
189 LCT_REMEMBER|LCT_NOREF);
190 if (IS_ERR(ccc_inode_fini_env)) {
191 result = PTR_ERR(ccc_inode_fini_env);
195 ccc_inode_fini_env->le_ctx.lc_cookie = 0x4;
198 lu_device_type_fini(device_type);
200 lu_kmem_fini(ccc_caches);
204 void ccc_global_fini(struct lu_device_type *device_type)
206 if (ccc_inode_fini_env != NULL) {
207 cl_env_put(ccc_inode_fini_env, &dummy_refcheck);
208 ccc_inode_fini_env = NULL;
210 lu_device_type_fini(device_type);
211 lu_kmem_fini(ccc_caches);
214 static void vvp_object_size_lock(struct cl_object *obj)
216 struct inode *inode = vvp_object_inode(obj);
218 ll_inode_size_lock(inode);
219 cl_object_attr_lock(obj);
222 static void vvp_object_size_unlock(struct cl_object *obj)
224 struct inode *inode = vvp_object_inode(obj);
226 cl_object_attr_unlock(obj);
227 ll_inode_size_unlock(inode);
230 /*****************************************************************************
236 void ccc_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
238 struct cl_io *io = ios->cis_io;
240 CLOBINVRNT(env, io->ci_obj, vvp_object_invariant(io->ci_obj));
243 int ccc_io_one_lock_index(const struct lu_env *env, struct cl_io *io,
244 __u32 enqflags, enum cl_lock_mode mode,
245 pgoff_t start, pgoff_t end)
247 struct ccc_io *cio = ccc_env_io(env);
248 struct cl_lock_descr *descr = &cio->cui_link.cill_descr;
249 struct cl_object *obj = io->ci_obj;
251 CLOBINVRNT(env, obj, vvp_object_invariant(obj));
254 CDEBUG(D_VFSTRACE, "lock: %d [%lu, %lu]\n", mode, start, end);
256 memset(&cio->cui_link, 0, sizeof cio->cui_link);
258 if (cio->cui_fd && (cio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
259 descr->cld_mode = CLM_GROUP;
260 descr->cld_gid = cio->cui_fd->fd_grouplock.cg_gid;
262 descr->cld_mode = mode;
264 descr->cld_obj = obj;
265 descr->cld_start = start;
266 descr->cld_end = end;
267 descr->cld_enq_flags = enqflags;
269 cl_io_lock_add(env, io, &cio->cui_link);
273 void ccc_io_update_iov(const struct lu_env *env,
274 struct ccc_io *cio, struct cl_io *io)
277 size_t size = io->u.ci_rw.crw_count;
279 cio->cui_iov_olen = 0;
280 if (!cl_is_normalio(env, io) || cio->cui_tot_nrsegs == 0)
283 for (i = 0; i < cio->cui_tot_nrsegs; i++) {
284 struct iovec *iv = &cio->cui_iov[i];
286 if (iv->iov_len < size)
289 if (iv->iov_len > size) {
290 cio->cui_iov_olen = iv->iov_len;
297 cio->cui_nrsegs = i + 1;
298 LASSERTF(cio->cui_tot_nrsegs >= cio->cui_nrsegs,
299 "tot_nrsegs: %lu, nrsegs: %lu\n",
300 cio->cui_tot_nrsegs, cio->cui_nrsegs);
303 int ccc_io_one_lock(const struct lu_env *env, struct cl_io *io,
304 __u32 enqflags, enum cl_lock_mode mode,
305 loff_t start, loff_t end)
307 struct cl_object *obj = io->ci_obj;
308 return ccc_io_one_lock_index(env, io, enqflags, mode,
309 cl_index(obj, start), cl_index(obj, end));
312 void ccc_io_end(const struct lu_env *env, const struct cl_io_slice *ios)
314 CLOBINVRNT(env, ios->cis_io->ci_obj,
315 vvp_object_invariant(ios->cis_io->ci_obj));
318 void ccc_io_advance(const struct lu_env *env,
319 const struct cl_io_slice *ios,
322 struct ccc_io *cio = cl2ccc_io(env, ios);
323 struct cl_io *io = ios->cis_io;
324 struct cl_object *obj = ios->cis_io->ci_obj;
326 CLOBINVRNT(env, obj, vvp_object_invariant(obj));
328 if (!cl_is_normalio(env, io))
331 LASSERT(cio->cui_tot_nrsegs >= cio->cui_nrsegs);
332 LASSERT(cio->cui_tot_count >= nob);
334 cio->cui_iov += cio->cui_nrsegs;
335 cio->cui_tot_nrsegs -= cio->cui_nrsegs;
336 cio->cui_tot_count -= nob;
339 if (cio->cui_iov_olen > 0) {
343 cio->cui_tot_nrsegs++;
344 iv = &cio->cui_iov[0];
345 if (io->ci_continue) {
346 iv->iov_base += iv->iov_len;
347 LASSERT(cio->cui_iov_olen > iv->iov_len);
348 iv->iov_len = cio->cui_iov_olen - iv->iov_len;
350 /* restore the iov_len, in case of restart io. */
351 iv->iov_len = cio->cui_iov_olen;
353 cio->cui_iov_olen = 0;
358 * Helper function that if necessary adjusts file size (inode->i_size), when
359 * position at the offset \a pos is accessed. File size can be arbitrary stale
360 * on a Lustre client, but client at least knows KMS. If accessed area is
361 * inside [0, KMS], set file size to KMS, otherwise glimpse file size.
363 * Locking: cl_isize_lock is used to serialize changes to inode size and to
364 * protect consistency between inode size and cl_object
365 * attributes. cl_object_size_lock() protects consistency between cl_attr's of
366 * top-object and sub-objects.
368 int ccc_prep_size(const struct lu_env *env, struct cl_object *obj,
369 struct cl_io *io, loff_t start, size_t count, int *exceed)
371 struct cl_attr *attr = ccc_env_thread_attr(env);
372 struct inode *inode = vvp_object_inode(obj);
373 loff_t pos = start + count - 1;
378 * Consistency guarantees: following possibilities exist for the
379 * relation between region being accessed and real file size at this
382 * (A): the region is completely inside of the file;
384 * (B-x): x bytes of region are inside of the file, the rest is
387 * (C): the region is completely outside of the file.
389 * This classification is stable under DLM lock already acquired by
390 * the caller, because to change the class, other client has to take
391 * DLM lock conflicting with our lock. Also, any updates to ->i_size
392 * by other threads on this client are serialized by
393 * ll_inode_size_lock(). This guarantees that short reads are handled
394 * correctly in the face of concurrent writes and truncates.
396 vvp_object_size_lock(obj);
397 result = cl_object_attr_get(env, obj, attr);
402 * A glimpse is necessary to determine whether we
403 * return a short read (B) or some zeroes at the end
406 vvp_object_size_unlock(obj);
407 result = cl_glimpse_lock(env, io, inode, obj, 0);
408 if (result == 0 && exceed != NULL) {
409 /* If objective page index exceed end-of-file
410 * page index, return directly. Do not expect
411 * kernel will check such case correctly.
412 * linux-2.6.18-128.1.1 miss to do that.
414 loff_t size = i_size_read(inode);
415 unsigned long cur_index = start >>
418 if ((size == 0 && cur_index != 0) ||
419 (((size - 1) >> PAGE_CACHE_SHIFT) <
426 * region is within kms and, hence, within real file
427 * size (A). We need to increase i_size to cover the
428 * read region so that generic_file_read() will do its
429 * job, but that doesn't mean the kms size is
430 * _correct_, it is only the _minimum_ size. If
431 * someone does a stat they will get the correct size
432 * which will always be >= the kms value here.
435 if (i_size_read(inode) < kms) {
436 i_size_write(inode, kms);
438 DFID" updating i_size "LPU64"\n",
439 PFID(lu_object_fid(&obj->co_lu)),
440 (__u64)i_size_read(inode));
446 vvp_object_size_unlock(obj);
451 /*****************************************************************************
453 * Transfer operations.
457 void ccc_req_completion(const struct lu_env *env,
458 const struct cl_req_slice *slice, int ioret)
463 cl_stats_tally(slice->crs_dev, slice->crs_req->crq_type, ioret);
465 vrq = cl2ccc_req(slice);
466 OBD_SLAB_FREE_PTR(vrq, ccc_req_kmem);
470 * Implementation of struct cl_req_operations::cro_attr_set() for ccc
471 * layer. ccc is responsible for
489 void ccc_req_attr_set(const struct lu_env *env,
490 const struct cl_req_slice *slice,
491 const struct cl_object *obj,
492 struct cl_req_attr *attr, u64 flags)
499 inode = vvp_object_inode(obj);
500 valid_flags = OBD_MD_FLTYPE;
502 if ((flags & OBD_MD_FLOSSCAPA) != 0) {
503 LASSERT(attr->cra_capa == NULL);
504 attr->cra_capa = cl_capa_lookup(inode,
505 slice->crs_req->crq_type);
508 if (slice->crs_req->crq_type == CRT_WRITE) {
509 if (flags & OBD_MD_FLEPOCH) {
510 oa->o_valid |= OBD_MD_FLEPOCH;
511 oa->o_ioepoch = ll_i2info(inode)->lli_ioepoch;
512 valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME |
513 OBD_MD_FLUID | OBD_MD_FLGID;
516 obdo_from_inode(oa, inode, valid_flags & flags);
517 obdo_set_parent_fid(oa, &ll_i2info(inode)->lli_fid);
518 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_INVALID_PFID))
520 memcpy(attr->cra_jobid, ll_i2info(inode)->lli_jobid,
524 static const struct cl_req_operations ccc_req_ops = {
525 .cro_attr_set = ccc_req_attr_set,
526 .cro_completion = ccc_req_completion
529 int cl_setattr_ost(struct inode *inode, const struct iattr *attr,
530 struct obd_capa *capa)
539 env = cl_env_get(&refcheck);
541 RETURN(PTR_ERR(env));
543 io = ccc_env_thread_io(env);
544 io->ci_obj = ll_i2info(inode)->lli_clob;
546 io->u.ci_setattr.sa_attr.lvb_atime = LTIME_S(attr->ia_atime);
547 io->u.ci_setattr.sa_attr.lvb_mtime = LTIME_S(attr->ia_mtime);
548 io->u.ci_setattr.sa_attr.lvb_ctime = LTIME_S(attr->ia_ctime);
549 io->u.ci_setattr.sa_attr.lvb_size = attr->ia_size;
550 io->u.ci_setattr.sa_valid = attr->ia_valid;
551 io->u.ci_setattr.sa_capa = capa;
554 if (cl_io_init(env, io, CIT_SETATTR, io->ci_obj) == 0) {
555 struct ccc_io *cio = ccc_env_io(env);
557 if (attr->ia_valid & ATTR_FILE)
558 /* populate the file descriptor for ftruncate to honor
559 * group lock - see LU-787 */
560 cio->cui_fd = LUSTRE_FPRIVATE(attr->ia_file);
562 result = cl_io_loop(env, io);
564 result = io->ci_result;
567 if (unlikely(io->ci_need_restart))
569 /* HSM import case: file is released, cannot be restored
570 * no need to fail except if restore registration failed
572 if (result == -ENODATA && io->ci_restore_needed &&
573 io->ci_result != -ENODATA)
575 cl_env_put(env, &refcheck);
579 /*****************************************************************************
585 struct ccc_io *cl2ccc_io(const struct lu_env *env,
586 const struct cl_io_slice *slice)
590 cio = container_of(slice, struct ccc_io, cui_cl);
591 LASSERT(cio == ccc_env_io(env));
595 struct ccc_req *cl2ccc_req(const struct cl_req_slice *slice)
597 return container_of0(slice, struct ccc_req, crq_cl);
601 * Initialize or update CLIO structures for regular files when new
602 * meta-data arrives from the server.
604 * \param inode regular file inode
605 * \param md new file metadata from MDS
606 * - allocates cl_object if necessary,
607 * - updated layout, if object was already here.
609 int cl_file_inode_init(struct inode *inode, struct lustre_md *md)
612 struct ll_inode_info *lli;
613 struct cl_object *clob;
614 struct lu_site *site;
616 struct cl_object_conf conf = {
625 LASSERT(md->body->mbo_valid & OBD_MD_FLID);
626 LASSERT(S_ISREG(inode->i_mode));
628 env = cl_env_get(&refcheck);
632 site = ll_i2sbi(inode)->ll_site;
633 lli = ll_i2info(inode);
635 LASSERT(fid_is_sane(fid));
637 if (lli->lli_clob == NULL) {
638 /* clob is slave of inode, empty lli_clob means for new inode,
639 * there is no clob in cache with the given fid, so it is
640 * unnecessary to perform lookup-alloc-lookup-insert, just
641 * alloc and insert directly. */
642 LASSERT(inode->i_state & I_NEW);
643 conf.coc_lu.loc_flags = LOC_F_NEW;
644 clob = cl_object_find(env, lu2cl_dev(site->ls_top_dev),
648 * No locking is necessary, as new inode is
649 * locked by I_NEW bit.
651 lli->lli_clob = clob;
652 lli->lli_has_smd = lsm_has_objects(md->lsm);
653 lu_object_ref_add(&clob->co_lu, "inode", inode);
655 result = PTR_ERR(clob);
657 result = cl_conf_set(env, lli->lli_clob, &conf);
660 cl_env_put(env, &refcheck);
663 CERROR("Failure to initialize cl object "DFID": %d\n",
669 * Wait for others drop their references of the object at first, then we drop
670 * the last one, which will lead to the object be destroyed immediately.
671 * Must be called after cl_object_kill() against this object.
673 * The reason we want to do this is: destroying top object will wait for sub
674 * objects being destroyed first, so we can't let bottom layer (e.g. from ASTs)
675 * to initiate top object destroying which may deadlock. See bz22520.
677 static void cl_object_put_last(struct lu_env *env, struct cl_object *obj)
679 struct lu_object_header *header = obj->co_lu.lo_header;
682 if (unlikely(atomic_read(&header->loh_ref) != 1)) {
683 struct lu_site *site = obj->co_lu.lo_dev->ld_site;
684 struct lu_site_bkt_data *bkt;
686 bkt = lu_site_bkt_from_fid(site, &header->loh_fid);
688 init_waitqueue_entry_current(&waiter);
689 add_wait_queue(&bkt->lsb_marche_funebre, &waiter);
692 set_current_state(TASK_UNINTERRUPTIBLE);
693 if (atomic_read(&header->loh_ref) == 1)
695 waitq_wait(&waiter, TASK_UNINTERRUPTIBLE);
698 set_current_state(TASK_RUNNING);
699 remove_wait_queue(&bkt->lsb_marche_funebre, &waiter);
702 cl_object_put(env, obj);
705 void cl_inode_fini(struct inode *inode)
708 struct ll_inode_info *lli = ll_i2info(inode);
709 struct cl_object *clob = lli->lli_clob;
716 cookie = cl_env_reenter();
717 env = cl_env_get(&refcheck);
718 emergency = IS_ERR(env);
720 mutex_lock(&ccc_inode_fini_guard);
721 LASSERT(ccc_inode_fini_env != NULL);
722 cl_env_implant(ccc_inode_fini_env, &refcheck);
723 env = ccc_inode_fini_env;
726 * cl_object cache is a slave to inode cache (which, in turn
727 * is a slave to dentry cache), don't keep cl_object in memory
728 * when its master is evicted.
730 cl_object_kill(env, clob);
731 lu_object_ref_del(&clob->co_lu, "inode", inode);
732 cl_object_put_last(env, clob);
733 lli->lli_clob = NULL;
735 cl_env_unplant(ccc_inode_fini_env, &refcheck);
736 mutex_unlock(&ccc_inode_fini_guard);
738 cl_env_put(env, &refcheck);
739 cl_env_reexit(cookie);
744 * return IF_* type for given lu_dirent entry.
745 * IF_* flag shld be converted to particular OS file type in
746 * platform llite module.
748 __u16 ll_dirent_type_get(struct lu_dirent *ent)
751 struct luda_type *lt;
754 if (le32_to_cpu(ent->lde_attrs) & LUDA_TYPE) {
755 const unsigned align = sizeof(struct luda_type) - 1;
757 len = le16_to_cpu(ent->lde_namelen);
758 len = (len + align) & ~align;
759 lt = (void *)ent->lde_name + len;
760 type = IFTODT(le16_to_cpu(lt->lt_type));
766 * build inode number from passed @fid */
767 __u64 cl_fid_build_ino(const struct lu_fid *fid, int api32)
769 if (BITS_PER_LONG == 32 || api32)
770 RETURN(fid_flatten32(fid));
772 RETURN(fid_flatten(fid));
776 * build inode generation from passed @fid. If our FID overflows the 32-bit
777 * inode number then return a non-zero generation to distinguish them. */
778 __u32 cl_fid_build_gen(const struct lu_fid *fid)
783 if (fid_is_igif(fid)) {
784 gen = lu_igif_gen(fid);
788 gen = (fid_flatten(fid) >> 32);
792 /* lsm is unreliable after hsm implementation as layout can be changed at
793 * any time. This is only to support old, non-clio-ized interfaces. It will
794 * cause deadlock if clio operations are called with this extra layout refcount
795 * because in case the layout changed during the IO, ll_layout_refresh() will
796 * have to wait for the refcount to become zero to destroy the older layout.
798 * Notice that the lsm returned by this function may not be valid unless called
799 * inside layout lock - MDS_INODELOCK_LAYOUT. */
800 struct lov_stripe_md *ccc_inode_lsm_get(struct inode *inode)
802 return lov_lsm_get(ll_i2info(inode)->lli_clob);
805 void inline ccc_inode_lsm_put(struct inode *inode, struct lov_stripe_md *lsm)
807 lov_lsm_put(ll_i2info(inode)->lli_clob, lsm);