1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * cl code shared between vvp and liblustre (and other Lustre clients in the
39 * Author: Nikita Danilov <nikita.danilov@sun.com>
42 #define DEBUG_SUBSYSTEM S_LLITE
45 # include <libcfs/libcfs.h>
46 # include <linux/fs.h>
47 # include <linux/sched.h>
48 # include <linux/mm.h>
49 # include <linux/smp_lock.h>
50 # include <linux/quotaops.h>
51 # include <linux/highmem.h>
52 # include <linux/pagemap.h>
53 # include <linux/rbtree.h>
54 #else /* __KERNEL__ */
59 #include <sys/types.h>
61 #include <sys/queue.h>
73 # include <liblustre.h>
77 #include <obd_support.h>
78 #include <lustre_fid.h>
79 #include <lustre_lite.h>
80 #include <lustre_dlm.h>
81 #include <lustre_ver.h>
82 #include <lustre_mdc.h>
83 #include <cl_object.h>
88 #include "../llite/llite_internal.h"
90 #include "../liblustre/llite_lib.h"
93 const struct cl_req_operations ccc_req_ops;
96 * ccc_ prefix stands for "Common Client Code".
99 static cfs_mem_cache_t *ccc_lock_kmem;
100 static cfs_mem_cache_t *ccc_object_kmem;
101 static cfs_mem_cache_t *ccc_thread_kmem;
102 static cfs_mem_cache_t *ccc_session_kmem;
103 static cfs_mem_cache_t *ccc_req_kmem;
105 static struct lu_kmem_descr ccc_caches[] = {
107 .ckd_cache = &ccc_lock_kmem,
108 .ckd_name = "ccc_lock_kmem",
109 .ckd_size = sizeof (struct ccc_lock)
112 .ckd_cache = &ccc_object_kmem,
113 .ckd_name = "ccc_object_kmem",
114 .ckd_size = sizeof (struct ccc_object)
117 .ckd_cache = &ccc_thread_kmem,
118 .ckd_name = "ccc_thread_kmem",
119 .ckd_size = sizeof (struct ccc_thread_info),
122 .ckd_cache = &ccc_session_kmem,
123 .ckd_name = "ccc_session_kmem",
124 .ckd_size = sizeof (struct ccc_session)
127 .ckd_cache = &ccc_req_kmem,
128 .ckd_name = "ccc_req_kmem",
129 .ckd_size = sizeof (struct ccc_req)
136 /*****************************************************************************
138 * Vvp device and device type functions.
142 void *ccc_key_init(const struct lu_context *ctx,
143 struct lu_context_key *key)
145 struct ccc_thread_info *info;
147 OBD_SLAB_ALLOC_PTR_GFP(info, ccc_thread_kmem, CFS_ALLOC_IO);
149 info = ERR_PTR(-ENOMEM);
153 void ccc_key_fini(const struct lu_context *ctx,
154 struct lu_context_key *key, void *data)
156 struct ccc_thread_info *info = data;
157 OBD_SLAB_FREE_PTR(info, ccc_thread_kmem);
160 void *ccc_session_key_init(const struct lu_context *ctx,
161 struct lu_context_key *key)
163 struct ccc_session *session;
165 OBD_SLAB_ALLOC_PTR_GFP(session, ccc_session_kmem, CFS_ALLOC_IO);
167 session = ERR_PTR(-ENOMEM);
171 void ccc_session_key_fini(const struct lu_context *ctx,
172 struct lu_context_key *key, void *data)
174 struct ccc_session *session = data;
175 OBD_SLAB_FREE_PTR(session, ccc_session_kmem);
178 struct lu_context_key ccc_key = {
179 .lct_tags = LCT_CL_THREAD,
180 .lct_init = ccc_key_init,
181 .lct_fini = ccc_key_fini
184 struct lu_context_key ccc_session_key = {
185 .lct_tags = LCT_SESSION,
186 .lct_init = ccc_session_key_init,
187 .lct_fini = ccc_session_key_fini
191 /* type constructor/destructor: ccc_type_{init,fini,start,stop}(). */
192 // LU_TYPE_INIT_FINI(ccc, &ccc_key, &ccc_session_key);
194 int ccc_device_init(const struct lu_env *env, struct lu_device *d,
195 const char *name, struct lu_device *next)
197 struct ccc_device *vdv;
202 vdv->cdv_next = lu2cl_dev(next);
204 LASSERT(d->ld_site != NULL && next->ld_type != NULL);
205 next->ld_site = d->ld_site;
206 rc = next->ld_type->ldt_ops->ldto_device_init(
207 env, next, next->ld_type->ldt_name, NULL);
210 lu_ref_add(&next->ld_reference, "lu-stack", &lu_site_init);
215 struct lu_device *ccc_device_fini(const struct lu_env *env,
218 return cl2lu_dev(lu2ccc_dev(d)->cdv_next);
221 struct lu_device *ccc_device_alloc(const struct lu_env *env,
222 struct lu_device_type *t,
223 struct lustre_cfg *cfg,
224 const struct lu_device_operations *luops,
225 const struct cl_device_operations *clops)
227 struct ccc_device *vdv;
228 struct lu_device *lud;
229 struct cl_site *site;
235 RETURN(ERR_PTR(-ENOMEM));
237 lud = &vdv->cdv_cl.cd_lu_dev;
238 cl_device_init(&vdv->cdv_cl, t);
239 ccc2lu_dev(vdv)->ld_ops = luops;
240 vdv->cdv_cl.cd_ops = clops;
244 rc = cl_site_init(site, &vdv->cdv_cl);
246 rc = lu_site_init_finish(&site->cs_lu);
248 LASSERT(lud->ld_site == NULL);
249 CERROR("Cannot init lu_site, rc %d.\n", rc);
255 ccc_device_free(env, lud);
261 struct lu_device *ccc_device_free(const struct lu_env *env,
264 struct ccc_device *vdv = lu2ccc_dev(d);
265 struct cl_site *site = lu2cl_site(d->ld_site);
266 struct lu_device *next = cl2lu_dev(vdv->cdv_next);
268 if (d->ld_site != NULL) {
272 cl_device_fini(lu2cl_dev(d));
277 int ccc_req_init(const struct lu_env *env, struct cl_device *dev,
283 OBD_SLAB_ALLOC_PTR_GFP(vrq, ccc_req_kmem, CFS_ALLOC_IO);
285 cl_req_slice_add(req, &vrq->crq_cl, dev, &ccc_req_ops);
293 * An `emergency' environment used by ccc_inode_fini() when cl_env_get()
294 * fails. Access to this environment is serialized by ccc_inode_fini_guard
297 static struct lu_env *ccc_inode_fini_env = NULL;
300 * A mutex serializing calls to slp_inode_fini() under extreme memory
301 * pressure, when environments cannot be allocated.
303 static CFS_DEFINE_MUTEX(ccc_inode_fini_guard);
304 static int dummy_refcheck;
306 int ccc_global_init(struct lu_device_type *device_type)
310 result = lu_kmem_init(ccc_caches);
312 result = lu_device_type_init(device_type);
313 ccc_inode_fini_env = cl_env_alloc(&dummy_refcheck,
314 LCT_REMEMBER|LCT_NOREF);
315 if (IS_ERR(ccc_inode_fini_env))
316 result = PTR_ERR(ccc_inode_fini_env);
318 ccc_inode_fini_env->le_ctx.lc_cookie = 0x4;
323 void ccc_global_fini(struct lu_device_type *device_type)
325 if (ccc_inode_fini_env != NULL) {
326 cl_env_put(ccc_inode_fini_env, &dummy_refcheck);
327 ccc_inode_fini_env = NULL;
329 lu_device_type_fini(device_type);
330 lu_kmem_fini(ccc_caches);
333 /*****************************************************************************
339 struct lu_object *ccc_object_alloc(const struct lu_env *env,
340 const struct lu_object_header *unused,
341 struct lu_device *dev,
342 const struct cl_object_operations *clops,
343 const struct lu_object_operations *luops)
345 struct ccc_object *vob;
346 struct lu_object *obj;
348 OBD_SLAB_ALLOC_PTR_GFP(vob, ccc_object_kmem, CFS_ALLOC_IO);
350 struct cl_object_header *hdr;
353 hdr = &vob->cob_header;
354 cl_object_header_init(hdr);
355 lu_object_init(obj, &hdr->coh_lu, dev);
356 lu_object_add_top(&hdr->coh_lu, obj);
358 vob->cob_cl.co_ops = clops;
365 int ccc_object_init0(const struct lu_env *env,
366 struct ccc_object *vob,
367 const struct cl_object_conf *conf)
369 vob->cob_inode = conf->coc_inode;
370 vob->cob_transient_pages = 0;
374 int ccc_object_init(const struct lu_env *env, struct lu_object *obj,
375 const struct lu_object_conf *conf)
377 struct ccc_device *dev = lu2ccc_dev(obj->lo_dev);
378 struct ccc_object *vob = lu2ccc(obj);
379 struct lu_object *below;
380 struct lu_device *under;
383 under = &dev->cdv_next->cd_lu_dev;
384 below = under->ld_ops->ldo_object_alloc(env, obj->lo_header, under);
386 const struct cl_object_conf *cconf;
388 cconf = lu2cl_conf(conf);
389 CFS_INIT_LIST_HEAD(&vob->cob_pending_list);
390 lu_object_add(obj, below);
391 result = ccc_object_init0(env, vob, cconf);
397 void ccc_object_free(const struct lu_env *env, struct lu_object *obj)
399 struct ccc_object *vob = lu2ccc(obj);
402 lu_object_header_fini(obj->lo_header);
403 OBD_SLAB_FREE_PTR(vob, ccc_object_kmem);
406 int ccc_lock_init(const struct lu_env *env,
407 struct cl_object *obj, struct cl_lock *lock,
408 const struct cl_io *unused,
409 const struct cl_lock_operations *lkops)
411 struct ccc_lock *clk;
414 CLOBINVRNT(env, obj, ccc_object_invariant(obj));
416 OBD_SLAB_ALLOC_PTR_GFP(clk, ccc_lock_kmem, CFS_ALLOC_IO);
418 cl_lock_slice_add(lock, &clk->clk_cl, obj, lkops);
425 int ccc_attr_set(const struct lu_env *env, struct cl_object *obj,
426 const struct cl_attr *attr, unsigned valid)
431 int ccc_object_glimpse(const struct lu_env *env,
432 const struct cl_object *obj, struct ost_lvb *lvb)
434 struct inode *inode = ccc_object_inode(obj);
437 lvb->lvb_mtime = cl_inode_mtime(inode);
438 lvb->lvb_atime = cl_inode_atime(inode);
439 lvb->lvb_ctime = cl_inode_ctime(inode);
445 int ccc_conf_set(const struct lu_env *env, struct cl_object *obj,
446 const struct cl_object_conf *conf)
448 /* TODO: destroy all pages attached to this object. */
452 /*****************************************************************************
458 cfs_page_t *ccc_page_vmpage(const struct lu_env *env,
459 const struct cl_page_slice *slice)
461 return cl2vm_page(slice);
464 int ccc_page_is_under_lock(const struct lu_env *env,
465 const struct cl_page_slice *slice,
468 struct ccc_io *cio = ccc_env_io(env);
469 struct cl_lock_descr *desc = &ccc_env_info(env)->cti_descr;
470 struct cl_page *page = slice->cpl_page;
476 if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE ||
477 io->ci_type == CIT_FAULT) {
478 if (cio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED)
481 desc->cld_start = page->cp_index;
482 desc->cld_end = page->cp_index;
483 desc->cld_obj = page->cp_obj;
484 desc->cld_mode = CLM_READ;
485 result = cl_queue_match(&io->ci_lockset.cls_done,
493 int ccc_fail(const struct lu_env *env, const struct cl_page_slice *slice)
502 void ccc_transient_page_verify(const struct cl_page *page)
506 int ccc_transient_page_own(const struct lu_env *env,
507 const struct cl_page_slice *slice,
508 struct cl_io *unused,
511 ccc_transient_page_verify(slice->cpl_page);
515 void ccc_transient_page_assume(const struct lu_env *env,
516 const struct cl_page_slice *slice,
517 struct cl_io *unused)
519 ccc_transient_page_verify(slice->cpl_page);
522 void ccc_transient_page_unassume(const struct lu_env *env,
523 const struct cl_page_slice *slice,
524 struct cl_io *unused)
526 ccc_transient_page_verify(slice->cpl_page);
529 void ccc_transient_page_disown(const struct lu_env *env,
530 const struct cl_page_slice *slice,
531 struct cl_io *unused)
533 ccc_transient_page_verify(slice->cpl_page);
536 void ccc_transient_page_discard(const struct lu_env *env,
537 const struct cl_page_slice *slice,
538 struct cl_io *unused)
540 struct cl_page *page = slice->cpl_page;
542 ccc_transient_page_verify(slice->cpl_page);
545 * For transient pages, remove it from the radix tree.
547 cl_page_delete(env, page);
550 int ccc_transient_page_prep(const struct lu_env *env,
551 const struct cl_page_slice *slice,
552 struct cl_io *unused)
555 /* transient page should always be sent. */
559 /*****************************************************************************
565 void ccc_lock_delete(const struct lu_env *env,
566 const struct cl_lock_slice *slice)
568 CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj));
571 void ccc_lock_fini(const struct lu_env *env, struct cl_lock_slice *slice)
573 struct ccc_lock *clk = cl2ccc_lock(slice);
574 OBD_SLAB_FREE_PTR(clk, ccc_lock_kmem);
577 int ccc_lock_enqueue(const struct lu_env *env,
578 const struct cl_lock_slice *slice,
579 struct cl_io *unused, __u32 enqflags)
581 CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj));
585 int ccc_lock_unuse(const struct lu_env *env, const struct cl_lock_slice *slice)
587 CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj));
591 int ccc_lock_wait(const struct lu_env *env, const struct cl_lock_slice *slice)
593 CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj));
598 * Implementation of cl_lock_operations::clo_fits_into() methods for ccc
599 * layer. This function is executed every time io finds an existing lock in
600 * the lock cache while creating new lock. This function has to decide whether
601 * cached lock "fits" into io.
603 * \param slice lock to be checked
604 * \param io IO that wants a lock.
606 * \see lov_lock_fits_into().
608 int ccc_lock_fits_into(const struct lu_env *env,
609 const struct cl_lock_slice *slice,
610 const struct cl_lock_descr *need,
611 const struct cl_io *io)
613 const struct cl_lock *lock = slice->cls_lock;
614 const struct cl_lock_descr *descr = &lock->cll_descr;
615 const struct ccc_io *cio = ccc_env_io(env);
620 * Work around DLM peculiarity: it assumes that glimpse
621 * (LDLM_FL_HAS_INTENT) lock is always LCK_PR, and returns reads lock
622 * when asked for LCK_PW lock with LDLM_FL_HAS_INTENT flag set. Make
623 * sure that glimpse doesn't get CLM_WRITE top-lock, so that it
624 * doesn't enqueue CLM_WRITE sub-locks.
626 if (cio->cui_glimpse)
627 result = descr->cld_mode != CLM_WRITE;
630 * Also, don't match incomplete write locks for read, otherwise read
631 * would enqueue missing sub-locks in the write mode.
633 else if (need->cld_mode != descr->cld_mode)
634 result = lock->cll_state >= CLS_ENQUEUED;
641 * Implements cl_lock_operations::clo_state() method for ccc layer, invoked
642 * whenever lock state changes. Transfers object attributes, that might be
643 * updated as a result of lock acquiring into inode.
645 void ccc_lock_state(const struct lu_env *env,
646 const struct cl_lock_slice *slice,
647 enum cl_lock_state state)
649 struct cl_lock *lock;
650 struct cl_object *obj;
652 struct cl_attr *attr;
655 lock = slice->cls_lock;
658 * Refresh inode attributes when the lock is moving into CLS_HELD
659 * state, and only when this is a result of real enqueue, rather than
660 * of finding lock in the cache.
662 if (state == CLS_HELD && lock->cll_state < CLS_HELD) {
665 obj = slice->cls_obj;
666 inode = ccc_object_inode(obj);
667 attr = ccc_env_thread_attr(env);
669 /* vmtruncate()->ll_truncate() first sets the i_size and then
670 * the kms under both a DLM lock and the
671 * ll_inode_size_lock(). If we don't get the
672 * ll_inode_size_lock() here we can match the DLM lock and
673 * reset i_size from the kms before the truncating path has
674 * updated the kms. generic_file_write can then trust the
675 * stale i_size when doing appending writes and effectively
676 * cancel the result of the truncate. Getting the
677 * ll_inode_size_lock() after the enqueue maintains the DLM
678 * -> ll_inode_size_lock() acquiring order. */
679 cl_isize_lock(inode, 0);
680 cl_object_attr_lock(obj);
681 rc = cl_object_attr_get(env, obj, attr);
683 if (lock->cll_descr.cld_start == 0 &&
684 lock->cll_descr.cld_end == CL_PAGE_EOF) {
685 cl_isize_write_nolock(inode, attr->cat_kms);
686 CDEBUG(D_INODE, DFID" updating i_size "LPU64"\n",
687 PFID(lu_object_fid(&obj->co_lu)),
688 (__u64)cl_isize_read(inode));
690 cl_inode_mtime(inode) = attr->cat_mtime;
691 cl_inode_atime(inode) = attr->cat_atime;
692 cl_inode_ctime(inode) = attr->cat_ctime;
694 CL_LOCK_DEBUG(D_INFO, env, lock, "attr_get: %i\n", rc);
696 cl_object_attr_unlock(obj);
697 cl_isize_unlock(inode, 0);
702 /*****************************************************************************
708 void ccc_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
710 struct cl_io *io = ios->cis_io;
712 CLOBINVRNT(env, io->ci_obj, ccc_object_invariant(io->ci_obj));
715 int ccc_io_one_lock_index(const struct lu_env *env, struct cl_io *io,
716 __u32 enqflags, enum cl_lock_mode mode,
717 pgoff_t start, pgoff_t end)
719 struct ccc_io *cio = ccc_env_io(env);
720 struct cl_lock_descr *descr = &cio->cui_link.cill_descr;
721 struct cl_object *obj = io->ci_obj;
723 CLOBINVRNT(env, obj, ccc_object_invariant(obj));
726 CDEBUG(D_VFSTRACE, "lock: %i [%lu, %lu]\n", mode, start, end);
728 memset(&cio->cui_link, 0, sizeof cio->cui_link);
730 if (cio->cui_fd && (cio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
731 descr->cld_mode = CLM_GROUP;
732 descr->cld_gid = cio->cui_fd->fd_grouplock.cg_gid;
734 descr->cld_mode = mode;
736 descr->cld_obj = obj;
737 descr->cld_start = start;
738 descr->cld_end = end;
739 descr->cld_enq_flags = enqflags;
741 cl_io_lock_add(env, io, &cio->cui_link);
745 void ccc_io_update_iov(const struct lu_env *env,
746 struct ccc_io *cio, struct cl_io *io)
749 size_t size = io->u.ci_rw.crw_count;
751 cio->cui_iov_olen = 0;
752 if (!cl_is_normalio(env, io))
755 for (i = 0; i < cio->cui_tot_nrsegs; i++) {
756 struct iovec *iv = &cio->cui_iov[i];
758 if (iv->iov_len < size)
761 if (iv->iov_len > size) {
762 cio->cui_iov_olen = iv->iov_len;
769 cio->cui_nrsegs = i + 1;
772 int ccc_io_one_lock(const struct lu_env *env, struct cl_io *io,
773 __u32 enqflags, enum cl_lock_mode mode,
774 loff_t start, loff_t end)
776 struct cl_object *obj = io->ci_obj;
777 return ccc_io_one_lock_index(env, io, enqflags, mode,
778 cl_index(obj, start), cl_index(obj, end));
781 void ccc_io_end(const struct lu_env *env, const struct cl_io_slice *ios)
783 CLOBINVRNT(env, ios->cis_io->ci_obj,
784 ccc_object_invariant(ios->cis_io->ci_obj));
787 void ccc_io_advance(const struct lu_env *env,
788 const struct cl_io_slice *ios,
791 struct ccc_io *cio = cl2ccc_io(env, ios);
792 struct cl_io *io = ios->cis_io;
793 struct cl_object *obj = ios->cis_io->ci_obj;
795 CLOBINVRNT(env, obj, ccc_object_invariant(obj));
797 if (cl_is_normalio(env, io) && io->ci_continue) {
799 LASSERT(cio->cui_tot_nrsegs >= cio->cui_nrsegs);
800 LASSERT(cio->cui_tot_count >= nob);
802 cio->cui_iov += cio->cui_nrsegs;
803 cio->cui_tot_nrsegs -= cio->cui_nrsegs;
804 cio->cui_tot_count -= nob;
806 if (cio->cui_iov_olen) {
810 cio->cui_tot_nrsegs++;
811 iv = &cio->cui_iov[0];
812 iv->iov_base += iv->iov_len;
813 LASSERT(cio->cui_iov_olen > iv->iov_len);
814 iv->iov_len = cio->cui_iov_olen - iv->iov_len;
819 static void ccc_object_size_lock(struct cl_object *obj, int vfslock)
821 struct inode *inode = ccc_object_inode(obj);
824 cl_isize_lock(inode, 0);
825 cl_object_attr_lock(obj);
828 static void ccc_object_size_unlock(struct cl_object *obj, int vfslock)
830 struct inode *inode = ccc_object_inode(obj);
832 cl_object_attr_unlock(obj);
834 cl_isize_unlock(inode, 0);
838 * Helper function that if necessary adjusts file size (inode->i_size), when
839 * position at the offset \a pos is accessed. File size can be arbitrary stale
840 * on a Lustre client, but client at least knows KMS. If accessed area is
841 * inside [0, KMS], set file size to KMS, otherwise glimpse file size.
843 * Locking: cl_isize_lock is used to serialize changes to inode size and to
844 * protect consistency between inode size and cl_object
845 * attributes. cl_object_size_lock() protects consistency between cl_attr's of
846 * top-object and sub-objects.
848 * In page fault path cl_isize_lock cannot be taken, client has to live with
849 * the resulting races.
851 int ccc_prep_size(const struct lu_env *env, struct cl_object *obj,
852 struct cl_io *io, loff_t start, size_t count, int vfslock,
855 struct cl_attr *attr = ccc_env_thread_attr(env);
856 struct inode *inode = ccc_object_inode(obj);
857 loff_t pos = start + count - 1;
862 * Consistency guarantees: following possibilities exist for the
863 * relation between region being accessed and real file size at this
866 * (A): the region is completely inside of the file;
868 * (B-x): x bytes of region are inside of the file, the rest is
871 * (C): the region is completely outside of the file.
873 * This classification is stable under DLM lock already acquired by
874 * the caller, because to change the class, other client has to take
875 * DLM lock conflicting with our lock. Also, any updates to ->i_size
876 * by other threads on this client are serialized by
877 * ll_inode_size_lock(). This guarantees that short reads are handled
878 * correctly in the face of concurrent writes and truncates.
880 ccc_object_size_lock(obj, vfslock);
881 result = cl_object_attr_get(env, obj, attr);
886 * A glimpse is necessary to determine whether we
887 * return a short read (B) or some zeroes at the end
890 ccc_object_size_unlock(obj, vfslock);
891 result = cl_glimpse_lock(env, io, inode, obj);
892 if (result == 0 && exceed != NULL) {
893 /* If objective page index exceed end-of-file
894 * page index, return directly. Do not expect
895 * kernel will check such case correctly.
896 * linux-2.6.18-128.1.1 miss to do that.
898 loff_t size = cl_isize_read(inode);
899 unsigned long cur_index = start >> CFS_PAGE_SHIFT;
901 if ((size == 0 && cur_index != 0) ||
902 (((size - 1) >> CFS_PAGE_SHIFT) < cur_index))
908 * region is within kms and, hence, within real file
909 * size (A). We need to increase i_size to cover the
910 * read region so that generic_file_read() will do its
911 * job, but that doesn't mean the kms size is
912 * _correct_, it is only the _minimum_ size. If
913 * someone does a stat they will get the correct size
914 * which will always be >= the kms value here.
918 * XXX in a page fault path, change inode size without
919 * ll_inode_size_lock() held! there is a race
920 * condition with truncate path. (see ll_extent_lock)
923 * XXX i_size_write() is not used because it is not
924 * safe to take the ll_inode_size_lock() due to a
925 * potential lock inversion (bug 6077). And since
926 * it's not safe to use i_size_write() without a
927 * covering mutex we do the assignment directly. It
928 * is not critical that the size be correct.
930 if (cl_isize_read(inode) < kms) {
932 cl_isize_write_nolock(inode, kms);
934 cl_isize_write(inode, kms);
938 ccc_object_size_unlock(obj, vfslock);
942 /*****************************************************************************
944 * Transfer operations.
948 void ccc_req_completion(const struct lu_env *env,
949 const struct cl_req_slice *slice, int ioret)
953 vrq = cl2ccc_req(slice);
954 OBD_SLAB_FREE_PTR(vrq, ccc_req_kmem);
958 * Implementation of struct cl_req_operations::cro_attr_set() for ccc
959 * layer. ccc is responsible for
965 * - o_fid (filled with inode number?!)
975 void ccc_req_attr_set(const struct lu_env *env,
976 const struct cl_req_slice *slice,
977 const struct cl_object *obj,
978 struct cl_req_attr *attr, obd_valid flags)
982 obd_flag valid_flags;
985 inode = ccc_object_inode(obj);
986 valid_flags = OBD_MD_FLTYPE|OBD_MD_FLATIME;
988 if (flags != (obd_valid)~0ULL)
989 valid_flags |= OBD_MD_FLMTIME|OBD_MD_FLCTIME|OBD_MD_FLATIME;
991 LASSERT(attr->cra_capa == NULL);
992 attr->cra_capa = cl_capa_lookup(inode,
993 slice->crs_req->crq_type);
996 if (slice->crs_req->crq_type == CRT_WRITE) {
997 if (flags & OBD_MD_FLEPOCH) {
998 oa->o_valid |= OBD_MD_FLEPOCH;
999 oa->o_ioepoch = cl_i2info(inode)->lli_ioepoch;
1000 valid_flags |= OBD_MD_FLMTIME|OBD_MD_FLCTIME|
1001 OBD_MD_FLUID|OBD_MD_FLGID|
1002 OBD_MD_FLFID|OBD_MD_FLGENER;
1005 obdo_from_inode(oa, inode, valid_flags & flags);
1008 const struct cl_req_operations ccc_req_ops = {
1009 .cro_attr_set = ccc_req_attr_set,
1010 .cro_completion = ccc_req_completion
1013 int cl_setattr_ost(struct inode *inode, const struct iattr *attr,
1014 struct obd_capa *capa)
1023 env = cl_env_get(&refcheck);
1025 RETURN(PTR_ERR(env));
1027 io = &ccc_env_info(env)->cti_io;
1028 io->ci_obj = cl_i2info(inode)->lli_clob;
1030 io->u.ci_setattr.sa_attr.lvb_atime = LTIME_S(attr->ia_atime);
1031 io->u.ci_setattr.sa_attr.lvb_mtime = LTIME_S(attr->ia_mtime);
1032 io->u.ci_setattr.sa_attr.lvb_ctime = LTIME_S(attr->ia_ctime);
1033 io->u.ci_setattr.sa_attr.lvb_size = attr->ia_size;
1034 io->u.ci_setattr.sa_valid = attr->ia_valid;
1035 io->u.ci_setattr.sa_capa = capa;
1037 if (cl_io_init(env, io, CIT_SETATTR, io->ci_obj) == 0)
1038 result = cl_io_loop(env, io);
1040 result = io->ci_result;
1041 cl_io_fini(env, io);
1042 cl_env_put(env, &refcheck);
1046 /*****************************************************************************
1052 struct lu_device *ccc2lu_dev(struct ccc_device *vdv)
1054 return &vdv->cdv_cl.cd_lu_dev;
1057 struct ccc_device *lu2ccc_dev(const struct lu_device *d)
1059 return container_of0(d, struct ccc_device, cdv_cl.cd_lu_dev);
1062 struct ccc_device *cl2ccc_dev(const struct cl_device *d)
1064 return container_of0(d, struct ccc_device, cdv_cl);
1067 struct lu_object *ccc2lu(struct ccc_object *vob)
1069 return &vob->cob_cl.co_lu;
1072 struct ccc_object *lu2ccc(const struct lu_object *obj)
1074 return container_of0(obj, struct ccc_object, cob_cl.co_lu);
1077 struct ccc_object *cl2ccc(const struct cl_object *obj)
1079 return container_of0(obj, struct ccc_object, cob_cl);
1082 struct ccc_lock *cl2ccc_lock(const struct cl_lock_slice *slice)
1084 return container_of(slice, struct ccc_lock, clk_cl);
1087 struct ccc_io *cl2ccc_io(const struct lu_env *env,
1088 const struct cl_io_slice *slice)
1092 cio = container_of(slice, struct ccc_io, cui_cl);
1093 LASSERT(cio == ccc_env_io(env));
1097 struct ccc_req *cl2ccc_req(const struct cl_req_slice *slice)
1099 return container_of0(slice, struct ccc_req, crq_cl);
1102 cfs_page_t *cl2vm_page(const struct cl_page_slice *slice)
1104 return cl2ccc_page(slice)->cpg_page;
1107 /*****************************************************************************
1112 int ccc_object_invariant(const struct cl_object *obj)
1114 struct inode *inode = ccc_object_inode(obj);
1115 struct cl_inode_info *lli = cl_i2info(inode);
1117 return (S_ISREG(cl_inode_mode(inode)) ||
1118 /* i_mode of unlinked inode is zeroed. */
1119 cl_inode_mode(inode) == 0) && lli->lli_clob == obj;
1122 struct inode *ccc_object_inode(const struct cl_object *obj)
1124 return cl2ccc(obj)->cob_inode;
1128 * Returns a pointer to cl_page associated with \a vmpage, without acquiring
1129 * additional reference to the resulting page. This is an unsafe version of
1130 * cl_vmpage_page() that can only be used under vmpage lock.
1132 struct cl_page *ccc_vmpage_page_transient(cfs_page_t *vmpage)
1134 KLASSERT(PageLocked(vmpage));
1135 return (struct cl_page *)vmpage->private;
1139 * Initializes or updates CLIO part when new meta-data arrives from the
1142 * - allocates cl_object if necessary,
1143 * - updated layout, if object was already here.
1145 int cl_inode_init(struct inode *inode, struct lustre_md *md)
1148 struct cl_inode_info *lli;
1149 struct cl_object *clob;
1150 struct lu_site *site;
1152 const struct cl_object_conf conf = {
1161 /* LASSERT(inode->i_state & I_NEW); */
1162 LASSERT(md->body->valid & OBD_MD_FLID);
1164 if (!S_ISREG(cl_inode_mode(inode)))
1167 env = cl_env_get(&refcheck);
1169 return PTR_ERR(env);
1171 site = cl_i2sbi(inode)->ll_site;
1172 lli = cl_i2info(inode);
1173 fid = &lli->lli_fid;
1174 LASSERT(fid_is_sane(fid));
1176 if (lli->lli_clob == NULL) {
1177 clob = cl_object_find(env, lu2cl_dev(site->ls_top_dev),
1179 if (!IS_ERR(clob)) {
1181 * No locking is necessary, as new inode is
1182 * locked by I_NEW bit.
1184 * XXX not true for call from ll_update_inode().
1186 lli->lli_clob = clob;
1187 lu_object_ref_add(&clob->co_lu, "inode", inode);
1189 result = PTR_ERR(clob);
1191 result = cl_conf_set(env, lli->lli_clob, &conf);
1192 cl_env_put(env, &refcheck);
1195 CERROR("Failure to initialize cl object "DFID": %d\n",
1200 void cl_inode_fini(struct inode *inode)
1203 struct cl_inode_info *lli = cl_i2info(inode);
1204 struct cl_object *clob = lli->lli_clob;
1211 cookie = cl_env_reenter();
1212 env = cl_env_get(&refcheck);
1213 emergency = IS_ERR(env);
1215 cfs_mutex_lock(&ccc_inode_fini_guard);
1216 LASSERT(ccc_inode_fini_env != NULL);
1217 cl_env_implant(ccc_inode_fini_env, &refcheck);
1218 env = ccc_inode_fini_env;
1221 * cl_object cache is a slave to inode cache (which, in turn
1222 * is a slave to dentry cache), don't keep cl_object in memory
1223 * when its master is evicted.
1225 cl_object_kill(env, clob);
1226 lu_object_ref_del(&clob->co_lu, "inode", inode);
1227 cl_object_put(env, clob);
1228 lli->lli_clob = NULL;
1230 cl_env_unplant(ccc_inode_fini_env, &refcheck);
1231 cfs_mutex_unlock(&ccc_inode_fini_guard);
1233 cl_env_put(env, &refcheck);
1234 cl_env_reexit(cookie);
1239 * return IF_* type for given lu_dirent entry.
1240 * IF_* flag shld be converted to particular OS file type in
1241 * platform llite module.
1243 __u16 ll_dirent_type_get(struct lu_dirent *ent)
1246 struct luda_type *lt;
1249 if (le32_to_cpu(ent->lde_attrs) & LUDA_TYPE) {
1250 const unsigned align = sizeof(struct luda_type) - 1;
1252 len = le16_to_cpu(ent->lde_namelen);
1253 len = (len + align) & ~align;
1254 lt = (void *) ent->lde_name + len;
1255 type = CFS_IFTODT(le16_to_cpu(lt->lt_type));
1261 * build inode number from passed @fid */
1262 ino_t cl_fid_build_ino(const struct lu_fid *fid)
1267 if (fid_is_igif(fid)) {
1268 ino = lu_igif_ino(fid);
1272 /* Very stupid and having many downsides inode allocation algorithm
1274 ino = fid_flatten(fid) & 0xFFFFFFFF;
1276 if (unlikely(ino == 0))
1277 /* the first result ino is 0xFFC001, so this is rarely used */
1279 ino = ino | 0x80000000;
1284 * build inode generation from passed @fid. If our FID overflows the 32-bit
1285 * inode number then return a non-zero generation to distinguish them. */
1286 __u32 cl_fid_build_gen(const struct lu_fid *fid)
1291 if (fid_is_igif(fid)) {
1292 gen = lu_igif_gen(fid);
1296 gen = (fid_flatten(fid) >> 32);