4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2013, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * cl code shared between vvp and liblustre (and other Lustre clients in the
39 * Author: Nikita Danilov <nikita.danilov@sun.com>
42 #define DEBUG_SUBSYSTEM S_LLITE
45 # include <libcfs/libcfs.h>
46 # include <linux/fs.h>
47 # include <linux/sched.h>
48 # include <linux/mm.h>
49 # include <linux/quotaops.h>
50 # include <linux/highmem.h>
51 # include <linux/pagemap.h>
52 # include <linux/rbtree.h>
53 #else /* __KERNEL__ */
58 #include <sys/types.h>
60 #include <sys/queue.h>
62 # include <liblustre.h>
66 #include <obd_support.h>
67 #include <lustre_fid.h>
68 #include <lustre_lite.h>
69 #include <lustre_dlm.h>
70 #include <lustre_ver.h>
71 #include <lustre_mdc.h>
72 #include <cl_object.h>
77 #include "../llite/llite_internal.h"
79 #include "../liblustre/llite_lib.h"
82 const struct cl_req_operations ccc_req_ops;
85 * ccc_ prefix stands for "Common Client Code".
88 static struct kmem_cache *ccc_lock_kmem;
89 static struct kmem_cache *ccc_object_kmem;
90 static struct kmem_cache *ccc_thread_kmem;
91 static struct kmem_cache *ccc_session_kmem;
92 static struct kmem_cache *ccc_req_kmem;
94 static struct lu_kmem_descr ccc_caches[] = {
96 .ckd_cache = &ccc_lock_kmem,
97 .ckd_name = "ccc_lock_kmem",
98 .ckd_size = sizeof (struct ccc_lock)
101 .ckd_cache = &ccc_object_kmem,
102 .ckd_name = "ccc_object_kmem",
103 .ckd_size = sizeof (struct ccc_object)
106 .ckd_cache = &ccc_thread_kmem,
107 .ckd_name = "ccc_thread_kmem",
108 .ckd_size = sizeof (struct ccc_thread_info),
111 .ckd_cache = &ccc_session_kmem,
112 .ckd_name = "ccc_session_kmem",
113 .ckd_size = sizeof (struct ccc_session)
116 .ckd_cache = &ccc_req_kmem,
117 .ckd_name = "ccc_req_kmem",
118 .ckd_size = sizeof (struct ccc_req)
125 /*****************************************************************************
127 * Vvp device and device type functions.
131 void *ccc_key_init(const struct lu_context *ctx,
132 struct lu_context_key *key)
134 struct ccc_thread_info *info;
136 OBD_SLAB_ALLOC_PTR_GFP(info, ccc_thread_kmem, __GFP_IO);
138 info = ERR_PTR(-ENOMEM);
142 void ccc_key_fini(const struct lu_context *ctx,
143 struct lu_context_key *key, void *data)
145 struct ccc_thread_info *info = data;
146 OBD_SLAB_FREE_PTR(info, ccc_thread_kmem);
149 void *ccc_session_key_init(const struct lu_context *ctx,
150 struct lu_context_key *key)
152 struct ccc_session *session;
154 OBD_SLAB_ALLOC_PTR_GFP(session, ccc_session_kmem, __GFP_IO);
156 session = ERR_PTR(-ENOMEM);
160 void ccc_session_key_fini(const struct lu_context *ctx,
161 struct lu_context_key *key, void *data)
163 struct ccc_session *session = data;
164 OBD_SLAB_FREE_PTR(session, ccc_session_kmem);
167 struct lu_context_key ccc_key = {
168 .lct_tags = LCT_CL_THREAD,
169 .lct_init = ccc_key_init,
170 .lct_fini = ccc_key_fini
173 struct lu_context_key ccc_session_key = {
174 .lct_tags = LCT_SESSION,
175 .lct_init = ccc_session_key_init,
176 .lct_fini = ccc_session_key_fini
180 /* type constructor/destructor: ccc_type_{init,fini,start,stop}(). */
181 // LU_TYPE_INIT_FINI(ccc, &ccc_key, &ccc_session_key);
183 int ccc_device_init(const struct lu_env *env, struct lu_device *d,
184 const char *name, struct lu_device *next)
186 struct ccc_device *vdv;
191 vdv->cdv_next = lu2cl_dev(next);
193 LASSERT(d->ld_site != NULL && next->ld_type != NULL);
194 next->ld_site = d->ld_site;
195 rc = next->ld_type->ldt_ops->ldto_device_init(
196 env, next, next->ld_type->ldt_name, NULL);
199 lu_ref_add(&next->ld_reference, "lu-stack", &lu_site_init);
204 struct lu_device *ccc_device_fini(const struct lu_env *env,
207 return cl2lu_dev(lu2ccc_dev(d)->cdv_next);
210 struct lu_device *ccc_device_alloc(const struct lu_env *env,
211 struct lu_device_type *t,
212 struct lustre_cfg *cfg,
213 const struct lu_device_operations *luops,
214 const struct cl_device_operations *clops)
216 struct ccc_device *vdv;
217 struct lu_device *lud;
218 struct cl_site *site;
224 RETURN(ERR_PTR(-ENOMEM));
226 lud = &vdv->cdv_cl.cd_lu_dev;
227 cl_device_init(&vdv->cdv_cl, t);
228 ccc2lu_dev(vdv)->ld_ops = luops;
229 vdv->cdv_cl.cd_ops = clops;
233 rc = cl_site_init(site, &vdv->cdv_cl);
235 rc = lu_site_init_finish(&site->cs_lu);
237 LASSERT(lud->ld_site == NULL);
238 CERROR("Cannot init lu_site, rc %d.\n", rc);
244 ccc_device_free(env, lud);
250 struct lu_device *ccc_device_free(const struct lu_env *env,
253 struct ccc_device *vdv = lu2ccc_dev(d);
254 struct cl_site *site = lu2cl_site(d->ld_site);
255 struct lu_device *next = cl2lu_dev(vdv->cdv_next);
257 if (d->ld_site != NULL) {
261 cl_device_fini(lu2cl_dev(d));
266 int ccc_req_init(const struct lu_env *env, struct cl_device *dev,
272 OBD_SLAB_ALLOC_PTR_GFP(vrq, ccc_req_kmem, __GFP_IO);
274 cl_req_slice_add(req, &vrq->crq_cl, dev, &ccc_req_ops);
282 * An `emergency' environment used by ccc_inode_fini() when cl_env_get()
283 * fails. Access to this environment is serialized by ccc_inode_fini_guard
286 static struct lu_env *ccc_inode_fini_env = NULL;
289 * A mutex serializing calls to slp_inode_fini() under extreme memory
290 * pressure, when environments cannot be allocated.
292 static DEFINE_MUTEX(ccc_inode_fini_guard);
293 static int dummy_refcheck;
295 int ccc_global_init(struct lu_device_type *device_type)
299 result = lu_kmem_init(ccc_caches);
303 result = lu_device_type_init(device_type);
307 ccc_inode_fini_env = cl_env_alloc(&dummy_refcheck,
308 LCT_REMEMBER|LCT_NOREF);
309 if (IS_ERR(ccc_inode_fini_env)) {
310 result = PTR_ERR(ccc_inode_fini_env);
314 ccc_inode_fini_env->le_ctx.lc_cookie = 0x4;
317 lu_device_type_fini(device_type);
319 lu_kmem_fini(ccc_caches);
323 void ccc_global_fini(struct lu_device_type *device_type)
325 if (ccc_inode_fini_env != NULL) {
326 cl_env_put(ccc_inode_fini_env, &dummy_refcheck);
327 ccc_inode_fini_env = NULL;
329 lu_device_type_fini(device_type);
330 lu_kmem_fini(ccc_caches);
333 /*****************************************************************************
339 struct lu_object *ccc_object_alloc(const struct lu_env *env,
340 const struct lu_object_header *unused,
341 struct lu_device *dev,
342 const struct cl_object_operations *clops,
343 const struct lu_object_operations *luops)
345 struct ccc_object *vob;
346 struct lu_object *obj;
348 OBD_SLAB_ALLOC_PTR_GFP(vob, ccc_object_kmem, __GFP_IO);
350 struct cl_object_header *hdr;
353 hdr = &vob->cob_header;
354 cl_object_header_init(hdr);
355 hdr->coh_page_bufsize = cfs_size_round(sizeof(struct cl_page));
357 lu_object_init(obj, &hdr->coh_lu, dev);
358 lu_object_add_top(&hdr->coh_lu, obj);
360 vob->cob_cl.co_ops = clops;
367 int ccc_object_init0(const struct lu_env *env,
368 struct ccc_object *vob,
369 const struct cl_object_conf *conf)
371 vob->cob_inode = conf->coc_inode;
372 vob->cob_transient_pages = 0;
373 cl_object_page_init(&vob->cob_cl, sizeof(struct ccc_page));
377 int ccc_object_init(const struct lu_env *env, struct lu_object *obj,
378 const struct lu_object_conf *conf)
380 struct ccc_device *dev = lu2ccc_dev(obj->lo_dev);
381 struct ccc_object *vob = lu2ccc(obj);
382 struct lu_object *below;
383 struct lu_device *under;
386 under = &dev->cdv_next->cd_lu_dev;
387 below = under->ld_ops->ldo_object_alloc(env, obj->lo_header, under);
389 const struct cl_object_conf *cconf;
391 cconf = lu2cl_conf(conf);
392 CFS_INIT_LIST_HEAD(&vob->cob_pending_list);
393 lu_object_add(obj, below);
394 result = ccc_object_init0(env, vob, cconf);
400 void ccc_object_free(const struct lu_env *env, struct lu_object *obj)
402 struct ccc_object *vob = lu2ccc(obj);
405 lu_object_header_fini(obj->lo_header);
406 OBD_SLAB_FREE_PTR(vob, ccc_object_kmem);
409 int ccc_lock_init(const struct lu_env *env,
410 struct cl_object *obj, struct cl_lock *lock,
411 const struct cl_io *unused,
412 const struct cl_lock_operations *lkops)
414 struct ccc_lock *clk;
417 CLOBINVRNT(env, obj, ccc_object_invariant(obj));
419 OBD_SLAB_ALLOC_PTR_GFP(clk, ccc_lock_kmem, __GFP_IO);
421 cl_lock_slice_add(lock, &clk->clk_cl, obj, lkops);
428 int ccc_attr_set(const struct lu_env *env, struct cl_object *obj,
429 const struct cl_attr *attr, unsigned valid)
434 int ccc_object_glimpse(const struct lu_env *env,
435 const struct cl_object *obj, struct ost_lvb *lvb)
437 struct inode *inode = ccc_object_inode(obj);
440 lvb->lvb_mtime = cl_inode_mtime(inode);
441 lvb->lvb_atime = cl_inode_atime(inode);
442 lvb->lvb_ctime = cl_inode_ctime(inode);
444 * LU-417: Add dirty pages block count lest i_blocks reports 0, some
445 * "cp" or "tar" on remote node may think it's a completely sparse file
448 if (lvb->lvb_size > 0 && lvb->lvb_blocks == 0)
449 lvb->lvb_blocks = dirty_cnt(inode);
455 int ccc_conf_set(const struct lu_env *env, struct cl_object *obj,
456 const struct cl_object_conf *conf)
458 /* TODO: destroy all pages attached to this object. */
462 static void ccc_object_size_lock(struct cl_object *obj)
464 struct inode *inode = ccc_object_inode(obj);
466 cl_isize_lock(inode);
467 cl_object_attr_lock(obj);
470 static void ccc_object_size_unlock(struct cl_object *obj)
472 struct inode *inode = ccc_object_inode(obj);
474 cl_object_attr_unlock(obj);
475 cl_isize_unlock(inode);
478 /*****************************************************************************
484 int ccc_fail(const struct lu_env *env, const struct cl_page_slice *slice)
493 void ccc_transient_page_verify(const struct cl_page *page)
497 int ccc_transient_page_own(const struct lu_env *env,
498 const struct cl_page_slice *slice,
499 struct cl_io *unused,
502 ccc_transient_page_verify(slice->cpl_page);
506 void ccc_transient_page_assume(const struct lu_env *env,
507 const struct cl_page_slice *slice,
508 struct cl_io *unused)
510 ccc_transient_page_verify(slice->cpl_page);
513 void ccc_transient_page_unassume(const struct lu_env *env,
514 const struct cl_page_slice *slice,
515 struct cl_io *unused)
517 ccc_transient_page_verify(slice->cpl_page);
520 void ccc_transient_page_disown(const struct lu_env *env,
521 const struct cl_page_slice *slice,
522 struct cl_io *unused)
524 ccc_transient_page_verify(slice->cpl_page);
527 void ccc_transient_page_discard(const struct lu_env *env,
528 const struct cl_page_slice *slice,
529 struct cl_io *unused)
531 struct cl_page *page = slice->cpl_page;
533 ccc_transient_page_verify(slice->cpl_page);
536 * For transient pages, remove it from the radix tree.
538 cl_page_delete(env, page);
541 int ccc_transient_page_prep(const struct lu_env *env,
542 const struct cl_page_slice *slice,
543 struct cl_io *unused)
546 /* transient page should always be sent. */
550 /*****************************************************************************
556 void ccc_lock_delete(const struct lu_env *env,
557 const struct cl_lock_slice *slice)
559 CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj));
562 void ccc_lock_fini(const struct lu_env *env, struct cl_lock_slice *slice)
564 struct ccc_lock *clk = cl2ccc_lock(slice);
565 OBD_SLAB_FREE_PTR(clk, ccc_lock_kmem);
568 int ccc_lock_enqueue(const struct lu_env *env,
569 const struct cl_lock_slice *slice,
570 struct cl_io *unused, __u32 enqflags)
572 CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj));
576 int ccc_lock_unuse(const struct lu_env *env, const struct cl_lock_slice *slice)
578 CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj));
582 int ccc_lock_wait(const struct lu_env *env, const struct cl_lock_slice *slice)
584 CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj));
589 * Implementation of cl_lock_operations::clo_fits_into() methods for ccc
590 * layer. This function is executed every time io finds an existing lock in
591 * the lock cache while creating new lock. This function has to decide whether
592 * cached lock "fits" into io.
594 * \param slice lock to be checked
595 * \param io IO that wants a lock.
597 * \see lov_lock_fits_into().
599 int ccc_lock_fits_into(const struct lu_env *env,
600 const struct cl_lock_slice *slice,
601 const struct cl_lock_descr *need,
602 const struct cl_io *io)
604 const struct cl_lock *lock = slice->cls_lock;
605 const struct cl_lock_descr *descr = &lock->cll_descr;
606 const struct ccc_io *cio = ccc_env_io(env);
611 * Work around DLM peculiarity: it assumes that glimpse
612 * (LDLM_FL_HAS_INTENT) lock is always LCK_PR, and returns reads lock
613 * when asked for LCK_PW lock with LDLM_FL_HAS_INTENT flag set. Make
614 * sure that glimpse doesn't get CLM_WRITE top-lock, so that it
615 * doesn't enqueue CLM_WRITE sub-locks.
617 if (cio->cui_glimpse)
618 result = descr->cld_mode != CLM_WRITE;
621 * Also, don't match incomplete write locks for read, otherwise read
622 * would enqueue missing sub-locks in the write mode.
624 else if (need->cld_mode != descr->cld_mode)
625 result = lock->cll_state >= CLS_ENQUEUED;
632 * Implements cl_lock_operations::clo_state() method for ccc layer, invoked
633 * whenever lock state changes. Transfers object attributes, that might be
634 * updated as a result of lock acquiring into inode.
636 void ccc_lock_state(const struct lu_env *env,
637 const struct cl_lock_slice *slice,
638 enum cl_lock_state state)
640 struct cl_lock *lock = slice->cls_lock;
644 * Refresh inode attributes when the lock is moving into CLS_HELD
645 * state, and only when this is a result of real enqueue, rather than
646 * of finding lock in the cache.
648 if (state == CLS_HELD && lock->cll_state < CLS_HELD) {
649 struct cl_object *obj;
652 obj = slice->cls_obj;
653 inode = ccc_object_inode(obj);
655 /* vmtruncate() sets the i_size
656 * under both a DLM lock and the
657 * ll_inode_size_lock(). If we don't get the
658 * ll_inode_size_lock() here we can match the DLM lock and
659 * reset i_size. generic_file_write can then trust the
660 * stale i_size when doing appending writes and effectively
661 * cancel the result of the truncate. Getting the
662 * ll_inode_size_lock() after the enqueue maintains the DLM
663 * -> ll_inode_size_lock() acquiring order. */
664 if (lock->cll_descr.cld_start == 0 &&
665 lock->cll_descr.cld_end == CL_PAGE_EOF)
666 cl_merge_lvb(env, inode);
671 /*****************************************************************************
677 void ccc_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
679 struct cl_io *io = ios->cis_io;
681 CLOBINVRNT(env, io->ci_obj, ccc_object_invariant(io->ci_obj));
684 int ccc_io_one_lock_index(const struct lu_env *env, struct cl_io *io,
685 __u32 enqflags, enum cl_lock_mode mode,
686 pgoff_t start, pgoff_t end)
688 struct ccc_io *cio = ccc_env_io(env);
689 struct cl_lock_descr *descr = &cio->cui_link.cill_descr;
690 struct cl_object *obj = io->ci_obj;
692 CLOBINVRNT(env, obj, ccc_object_invariant(obj));
695 CDEBUG(D_VFSTRACE, "lock: %d [%lu, %lu]\n", mode, start, end);
697 memset(&cio->cui_link, 0, sizeof cio->cui_link);
699 if (cio->cui_fd && (cio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
700 descr->cld_mode = CLM_GROUP;
701 descr->cld_gid = cio->cui_fd->fd_grouplock.cg_gid;
703 descr->cld_mode = mode;
705 descr->cld_obj = obj;
706 descr->cld_start = start;
707 descr->cld_end = end;
708 descr->cld_enq_flags = enqflags;
710 cl_io_lock_add(env, io, &cio->cui_link);
714 void ccc_io_update_iov(const struct lu_env *env,
715 struct ccc_io *cio, struct cl_io *io)
718 size_t size = io->u.ci_rw.crw_count;
720 cio->cui_iov_olen = 0;
721 if (!cl_is_normalio(env, io) || cio->cui_tot_nrsegs == 0)
724 for (i = 0; i < cio->cui_tot_nrsegs; i++) {
725 struct iovec *iv = &cio->cui_iov[i];
727 if (iv->iov_len < size)
730 if (iv->iov_len > size) {
731 cio->cui_iov_olen = iv->iov_len;
738 cio->cui_nrsegs = i + 1;
739 LASSERTF(cio->cui_tot_nrsegs >= cio->cui_nrsegs,
740 "tot_nrsegs: %lu, nrsegs: %lu\n",
741 cio->cui_tot_nrsegs, cio->cui_nrsegs);
744 int ccc_io_one_lock(const struct lu_env *env, struct cl_io *io,
745 __u32 enqflags, enum cl_lock_mode mode,
746 loff_t start, loff_t end)
748 struct cl_object *obj = io->ci_obj;
749 return ccc_io_one_lock_index(env, io, enqflags, mode,
750 cl_index(obj, start), cl_index(obj, end));
753 void ccc_io_end(const struct lu_env *env, const struct cl_io_slice *ios)
755 CLOBINVRNT(env, ios->cis_io->ci_obj,
756 ccc_object_invariant(ios->cis_io->ci_obj));
759 void ccc_io_advance(const struct lu_env *env,
760 const struct cl_io_slice *ios,
763 struct ccc_io *cio = cl2ccc_io(env, ios);
764 struct cl_io *io = ios->cis_io;
765 struct cl_object *obj = ios->cis_io->ci_obj;
767 CLOBINVRNT(env, obj, ccc_object_invariant(obj));
769 if (!cl_is_normalio(env, io))
772 LASSERT(cio->cui_tot_nrsegs >= cio->cui_nrsegs);
773 LASSERT(cio->cui_tot_count >= nob);
775 cio->cui_iov += cio->cui_nrsegs;
776 cio->cui_tot_nrsegs -= cio->cui_nrsegs;
777 cio->cui_tot_count -= nob;
780 if (cio->cui_iov_olen > 0) {
784 cio->cui_tot_nrsegs++;
785 iv = &cio->cui_iov[0];
786 if (io->ci_continue) {
787 iv->iov_base += iv->iov_len;
788 LASSERT(cio->cui_iov_olen > iv->iov_len);
789 iv->iov_len = cio->cui_iov_olen - iv->iov_len;
791 /* restore the iov_len, in case of restart io. */
792 iv->iov_len = cio->cui_iov_olen;
794 cio->cui_iov_olen = 0;
799 * Helper function that if necessary adjusts file size (inode->i_size), when
800 * position at the offset \a pos is accessed. File size can be arbitrary stale
801 * on a Lustre client, but client at least knows KMS. If accessed area is
802 * inside [0, KMS], set file size to KMS, otherwise glimpse file size.
804 * Locking: cl_isize_lock is used to serialize changes to inode size and to
805 * protect consistency between inode size and cl_object
806 * attributes. cl_object_size_lock() protects consistency between cl_attr's of
807 * top-object and sub-objects.
809 int ccc_prep_size(const struct lu_env *env, struct cl_object *obj,
810 struct cl_io *io, loff_t start, size_t count, int *exceed)
812 struct cl_attr *attr = ccc_env_thread_attr(env);
813 struct inode *inode = ccc_object_inode(obj);
814 loff_t pos = start + count - 1;
819 * Consistency guarantees: following possibilities exist for the
820 * relation between region being accessed and real file size at this
823 * (A): the region is completely inside of the file;
825 * (B-x): x bytes of region are inside of the file, the rest is
828 * (C): the region is completely outside of the file.
830 * This classification is stable under DLM lock already acquired by
831 * the caller, because to change the class, other client has to take
832 * DLM lock conflicting with our lock. Also, any updates to ->i_size
833 * by other threads on this client are serialized by
834 * ll_inode_size_lock(). This guarantees that short reads are handled
835 * correctly in the face of concurrent writes and truncates.
837 ccc_object_size_lock(obj);
838 result = cl_object_attr_get(env, obj, attr);
843 * A glimpse is necessary to determine whether we
844 * return a short read (B) or some zeroes at the end
847 ccc_object_size_unlock(obj);
848 result = cl_glimpse_lock(env, io, inode, obj, 0);
849 if (result == 0 && exceed != NULL) {
850 /* If objective page index exceed end-of-file
851 * page index, return directly. Do not expect
852 * kernel will check such case correctly.
853 * linux-2.6.18-128.1.1 miss to do that.
855 loff_t size = cl_isize_read(inode);
856 unsigned long cur_index = start >>
859 if ((size == 0 && cur_index != 0) ||
860 (((size - 1) >> PAGE_CACHE_SHIFT) <
867 * region is within kms and, hence, within real file
868 * size (A). We need to increase i_size to cover the
869 * read region so that generic_file_read() will do its
870 * job, but that doesn't mean the kms size is
871 * _correct_, it is only the _minimum_ size. If
872 * someone does a stat they will get the correct size
873 * which will always be >= the kms value here.
876 if (cl_isize_read(inode) < kms) {
877 cl_isize_write_nolock(inode, kms);
879 DFID" updating i_size "LPU64"\n",
880 PFID(lu_object_fid(&obj->co_lu)),
881 (__u64)cl_isize_read(inode));
886 ccc_object_size_unlock(obj);
890 /*****************************************************************************
892 * Transfer operations.
896 void ccc_req_completion(const struct lu_env *env,
897 const struct cl_req_slice *slice, int ioret)
902 cl_stats_tally(slice->crs_dev, slice->crs_req->crq_type, ioret);
904 vrq = cl2ccc_req(slice);
905 OBD_SLAB_FREE_PTR(vrq, ccc_req_kmem);
909 * Implementation of struct cl_req_operations::cro_attr_set() for ccc
910 * layer. ccc is responsible for
928 void ccc_req_attr_set(const struct lu_env *env,
929 const struct cl_req_slice *slice,
930 const struct cl_object *obj,
931 struct cl_req_attr *attr, obd_valid flags)
935 obd_flag valid_flags;
938 inode = ccc_object_inode(obj);
939 valid_flags = OBD_MD_FLTYPE;
941 if ((flags & OBD_MD_FLOSSCAPA) != 0) {
942 LASSERT(attr->cra_capa == NULL);
943 attr->cra_capa = cl_capa_lookup(inode,
944 slice->crs_req->crq_type);
947 if (slice->crs_req->crq_type == CRT_WRITE) {
948 if (flags & OBD_MD_FLEPOCH) {
949 oa->o_valid |= OBD_MD_FLEPOCH;
950 oa->o_ioepoch = cl_i2info(inode)->lli_ioepoch;
951 valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME |
952 OBD_MD_FLUID | OBD_MD_FLGID;
955 obdo_from_inode(oa, inode, valid_flags & flags);
956 obdo_set_parent_fid(oa, &cl_i2info(inode)->lli_fid);
958 memcpy(attr->cra_jobid, cl_i2info(inode)->lli_jobid,
959 JOBSTATS_JOBID_SIZE);
963 const struct cl_req_operations ccc_req_ops = {
964 .cro_attr_set = ccc_req_attr_set,
965 .cro_completion = ccc_req_completion
968 int cl_setattr_ost(struct inode *inode, const struct iattr *attr,
969 struct obd_capa *capa)
978 env = cl_env_get(&refcheck);
980 RETURN(PTR_ERR(env));
982 io = ccc_env_thread_io(env);
983 io->ci_obj = cl_i2info(inode)->lli_clob;
985 io->u.ci_setattr.sa_attr.lvb_atime = LTIME_S(attr->ia_atime);
986 io->u.ci_setattr.sa_attr.lvb_mtime = LTIME_S(attr->ia_mtime);
987 io->u.ci_setattr.sa_attr.lvb_ctime = LTIME_S(attr->ia_ctime);
988 io->u.ci_setattr.sa_attr.lvb_size = attr->ia_size;
989 io->u.ci_setattr.sa_valid = attr->ia_valid;
990 io->u.ci_setattr.sa_capa = capa;
993 if (cl_io_init(env, io, CIT_SETATTR, io->ci_obj) == 0) {
994 struct ccc_io *cio = ccc_env_io(env);
996 if (attr->ia_valid & ATTR_FILE)
997 /* populate the file descriptor for ftruncate to honor
998 * group lock - see LU-787 */
999 cio->cui_fd = cl_iattr2fd(inode, attr);
1001 result = cl_io_loop(env, io);
1003 result = io->ci_result;
1005 cl_io_fini(env, io);
1006 if (unlikely(io->ci_need_restart))
1008 /* HSM import case: file is released, cannot be restored
1009 * no need to fail except if restore registration failed
1011 if (result == -ENODATA && io->ci_restore_needed &&
1012 io->ci_result != -ENODATA)
1014 cl_env_put(env, &refcheck);
1018 /*****************************************************************************
1024 struct lu_device *ccc2lu_dev(struct ccc_device *vdv)
1026 return &vdv->cdv_cl.cd_lu_dev;
1029 struct ccc_device *lu2ccc_dev(const struct lu_device *d)
1031 return container_of0(d, struct ccc_device, cdv_cl.cd_lu_dev);
1034 struct ccc_device *cl2ccc_dev(const struct cl_device *d)
1036 return container_of0(d, struct ccc_device, cdv_cl);
1039 struct lu_object *ccc2lu(struct ccc_object *vob)
1041 return &vob->cob_cl.co_lu;
1044 struct ccc_object *lu2ccc(const struct lu_object *obj)
1046 return container_of0(obj, struct ccc_object, cob_cl.co_lu);
1049 struct ccc_object *cl2ccc(const struct cl_object *obj)
1051 return container_of0(obj, struct ccc_object, cob_cl);
1054 struct ccc_lock *cl2ccc_lock(const struct cl_lock_slice *slice)
1056 return container_of(slice, struct ccc_lock, clk_cl);
1059 struct ccc_io *cl2ccc_io(const struct lu_env *env,
1060 const struct cl_io_slice *slice)
1064 cio = container_of(slice, struct ccc_io, cui_cl);
1065 LASSERT(cio == ccc_env_io(env));
1069 struct ccc_req *cl2ccc_req(const struct cl_req_slice *slice)
1071 return container_of0(slice, struct ccc_req, crq_cl);
1074 struct page *cl2vm_page(const struct cl_page_slice *slice)
1076 return cl2ccc_page(slice)->cpg_page;
1079 /*****************************************************************************
1084 int ccc_object_invariant(const struct cl_object *obj)
1086 struct inode *inode = ccc_object_inode(obj);
1087 struct cl_inode_info *lli = cl_i2info(inode);
1089 return (S_ISREG(cl_inode_mode(inode)) ||
1090 /* i_mode of unlinked inode is zeroed. */
1091 cl_inode_mode(inode) == 0) && lli->lli_clob == obj;
1094 struct inode *ccc_object_inode(const struct cl_object *obj)
1096 return cl2ccc(obj)->cob_inode;
1100 * Returns a pointer to cl_page associated with \a vmpage, without acquiring
1101 * additional reference to the resulting page. This is an unsafe version of
1102 * cl_vmpage_page() that can only be used under vmpage lock.
1104 struct cl_page *ccc_vmpage_page_transient(struct page *vmpage)
1106 KLASSERT(PageLocked(vmpage));
1107 return (struct cl_page *)vmpage->private;
1111 * Initialize or update CLIO structures for regular files when new
1112 * meta-data arrives from the server.
1114 * \param inode regular file inode
1115 * \param md new file metadata from MDS
1116 * - allocates cl_object if necessary,
1117 * - updated layout, if object was already here.
1119 int cl_file_inode_init(struct inode *inode, struct lustre_md *md)
1122 struct cl_inode_info *lli;
1123 struct cl_object *clob;
1124 struct lu_site *site;
1126 struct cl_object_conf conf = {
1135 LASSERT(md->body->valid & OBD_MD_FLID);
1136 LASSERT(S_ISREG(cl_inode_mode(inode)));
1138 env = cl_env_get(&refcheck);
1140 return PTR_ERR(env);
1142 site = cl_i2sbi(inode)->ll_site;
1143 lli = cl_i2info(inode);
1144 fid = &lli->lli_fid;
1145 LASSERT(fid_is_sane(fid));
1147 if (lli->lli_clob == NULL) {
1148 /* clob is slave of inode, empty lli_clob means for new inode,
1149 * there is no clob in cache with the given fid, so it is
1150 * unnecessary to perform lookup-alloc-lookup-insert, just
1151 * alloc and insert directly. */
1153 LASSERT(inode->i_state & I_NEW);
1155 conf.coc_lu.loc_flags = LOC_F_NEW;
1156 clob = cl_object_find(env, lu2cl_dev(site->ls_top_dev),
1158 if (!IS_ERR(clob)) {
1160 * No locking is necessary, as new inode is
1161 * locked by I_NEW bit.
1163 lli->lli_clob = clob;
1164 lli->lli_has_smd = lsm_has_objects(md->lsm);
1165 lu_object_ref_add(&clob->co_lu, "inode", inode);
1167 result = PTR_ERR(clob);
1169 result = cl_conf_set(env, lli->lli_clob, &conf);
1172 cl_env_put(env, &refcheck);
1175 CERROR("Failure to initialize cl object "DFID": %d\n",
1181 * Wait for others drop their references of the object at first, then we drop
1182 * the last one, which will lead to the object be destroyed immediately.
1183 * Must be called after cl_object_kill() against this object.
1185 * The reason we want to do this is: destroying top object will wait for sub
1186 * objects being destroyed first, so we can't let bottom layer (e.g. from ASTs)
1187 * to initiate top object destroying which may deadlock. See bz22520.
1189 static void cl_object_put_last(struct lu_env *env, struct cl_object *obj)
1191 struct lu_object_header *header = obj->co_lu.lo_header;
1192 wait_queue_t waiter;
1194 if (unlikely(atomic_read(&header->loh_ref) != 1)) {
1195 struct lu_site *site = obj->co_lu.lo_dev->ld_site;
1196 struct lu_site_bkt_data *bkt;
1198 bkt = lu_site_bkt_from_fid(site, &header->loh_fid);
1200 init_waitqueue_entry_current(&waiter);
1201 add_wait_queue(&bkt->lsb_marche_funebre, &waiter);
1204 set_current_state(TASK_UNINTERRUPTIBLE);
1205 if (atomic_read(&header->loh_ref) == 1)
1207 waitq_wait(&waiter, TASK_UNINTERRUPTIBLE);
1210 set_current_state(TASK_RUNNING);
1211 remove_wait_queue(&bkt->lsb_marche_funebre, &waiter);
1214 cl_object_put(env, obj);
1217 void cl_inode_fini(struct inode *inode)
1220 struct cl_inode_info *lli = cl_i2info(inode);
1221 struct cl_object *clob = lli->lli_clob;
1228 cookie = cl_env_reenter();
1229 env = cl_env_get(&refcheck);
1230 emergency = IS_ERR(env);
1232 mutex_lock(&ccc_inode_fini_guard);
1233 LASSERT(ccc_inode_fini_env != NULL);
1234 cl_env_implant(ccc_inode_fini_env, &refcheck);
1235 env = ccc_inode_fini_env;
1238 * cl_object cache is a slave to inode cache (which, in turn
1239 * is a slave to dentry cache), don't keep cl_object in memory
1240 * when its master is evicted.
1242 cl_object_kill(env, clob);
1243 lu_object_ref_del(&clob->co_lu, "inode", inode);
1244 cl_object_put_last(env, clob);
1245 lli->lli_clob = NULL;
1247 cl_env_unplant(ccc_inode_fini_env, &refcheck);
1248 mutex_unlock(&ccc_inode_fini_guard);
1250 cl_env_put(env, &refcheck);
1251 cl_env_reexit(cookie);
1256 * return IF_* type for given lu_dirent entry.
1257 * IF_* flag shld be converted to particular OS file type in
1258 * platform llite module.
1260 __u16 ll_dirent_type_get(struct lu_dirent *ent)
1263 struct luda_type *lt;
1266 if (le32_to_cpu(ent->lde_attrs) & LUDA_TYPE) {
1267 const unsigned align = sizeof(struct luda_type) - 1;
1269 len = le16_to_cpu(ent->lde_namelen);
1270 len = (len + align) & ~align;
1271 lt = (void *)ent->lde_name + len;
1272 type = IFTODT(le16_to_cpu(lt->lt_type));
1278 * build inode number from passed @fid */
1279 __u64 cl_fid_build_ino(const struct lu_fid *fid, int api32)
1281 if (BITS_PER_LONG == 32 || api32)
1282 RETURN(fid_flatten32(fid));
1284 RETURN(fid_flatten(fid));
1288 * build inode generation from passed @fid. If our FID overflows the 32-bit
1289 * inode number then return a non-zero generation to distinguish them. */
1290 __u32 cl_fid_build_gen(const struct lu_fid *fid)
1295 if (fid_is_igif(fid)) {
1296 gen = lu_igif_gen(fid);
1300 gen = (fid_flatten(fid) >> 32);
1304 /* lsm is unreliable after hsm implementation as layout can be changed at
1305 * any time. This is only to support old, non-clio-ized interfaces. It will
1306 * cause deadlock if clio operations are called with this extra layout refcount
1307 * because in case the layout changed during the IO, ll_layout_refresh() will
1308 * have to wait for the refcount to become zero to destroy the older layout.
1310 * Notice that the lsm returned by this function may not be valid unless called
1311 * inside layout lock - MDS_INODELOCK_LAYOUT. */
1312 struct lov_stripe_md *ccc_inode_lsm_get(struct inode *inode)
1314 return lov_lsm_get(cl_i2info(inode)->lli_clob);
1317 void inline ccc_inode_lsm_put(struct inode *inode, struct lov_stripe_md *lsm)
1319 lov_lsm_put(cl_i2info(inode)->lli_clob, lsm);