4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2013, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * Implementation of cl_io for VVP layer.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
39 * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
42 #define DEBUG_SUBSYSTEM S_LLITE
46 #include "vvp_internal.h"
48 static struct vvp_io *cl2vvp_io(const struct lu_env *env,
49 const struct cl_io_slice *slice);
52 * True, if \a io is a normal io, False for splice_{read,write}
54 int cl_is_normalio(const struct lu_env *env, const struct cl_io *io)
56 struct vvp_io *vio = vvp_env_io(env);
58 LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
60 return vio->cui_io_subtype == IO_NORMAL;
64 * For swapping layout. The file's layout may have changed.
65 * To avoid populating pages to a wrong stripe, we have to verify the
66 * correctness of layout. It works because swapping layout processes
67 * have to acquire group lock.
69 static bool can_populate_pages(const struct lu_env *env, struct cl_io *io,
72 struct ll_inode_info *lli = ll_i2info(inode);
73 struct ccc_io *cio = ccc_env_io(env);
76 switch (io->ci_type) {
79 /* don't need lock here to check lli_layout_gen as we have held
80 * extent lock and GROUP lock has to hold to swap layout */
81 if (ll_layout_version_get(lli) != cio->cui_layout_gen) {
82 io->ci_need_restart = 1;
83 /* this will return application a short read/write */
88 /* fault is okay because we've already had a page. */
96 /*****************************************************************************
102 static int vvp_io_write_iter_init(const struct lu_env *env,
103 const struct cl_io_slice *ios)
105 struct ccc_io *cio = cl2ccc_io(env, ios);
107 cl_page_list_init(&cio->u.write.cui_queue);
108 cio->u.write.cui_written = 0;
109 cio->u.write.cui_from = 0;
110 cio->u.write.cui_to = PAGE_SIZE;
115 static void vvp_io_write_iter_fini(const struct lu_env *env,
116 const struct cl_io_slice *ios)
118 struct ccc_io *cio = cl2ccc_io(env, ios);
120 LASSERT(cio->u.write.cui_queue.pl_nr == 0);
123 static int vvp_io_fault_iter_init(const struct lu_env *env,
124 const struct cl_io_slice *ios)
126 struct vvp_io *vio = cl2vvp_io(env, ios);
127 struct inode *inode = ccc_object_inode(ios->cis_obj);
130 cl2ccc_io(env, ios)->cui_fd->fd_file->f_dentry->d_inode);
131 vio->u.fault.ft_mtime = LTIME_S(inode->i_mtime);
135 static void vvp_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
137 struct cl_io *io = ios->cis_io;
138 struct cl_object *obj = io->ci_obj;
139 struct ccc_io *cio = cl2ccc_io(env, ios);
140 struct inode *inode = ccc_object_inode(obj);
142 CLOBINVRNT(env, obj, ccc_object_invariant(obj));
144 CDEBUG(D_VFSTRACE, DFID" ignore/verify layout %d/%d, layout version %d "
145 "restore needed %d\n",
146 PFID(lu_object_fid(&obj->co_lu)),
147 io->ci_ignore_layout, io->ci_verify_layout,
148 cio->cui_layout_gen, io->ci_restore_needed);
150 if (io->ci_restore_needed == 1) {
153 /* file was detected release, we need to restore it
154 * before finishing the io
156 rc = ll_layout_restore(inode, 0, OBD_OBJECT_EOF);
157 /* if restore registration failed, no restart,
158 * we will return -ENODATA */
159 /* The layout will change after restore, so we need to
160 * block on layout lock hold by the MDT
161 * as MDT will not send new layout in lvb (see LU-3124)
162 * we have to explicitly fetch it, all this will be done
163 * by ll_layout_refresh()
166 io->ci_restore_needed = 0;
167 io->ci_need_restart = 1;
168 io->ci_verify_layout = 1;
170 io->ci_restore_needed = 1;
171 io->ci_need_restart = 0;
172 io->ci_verify_layout = 0;
177 if (!io->ci_ignore_layout && io->ci_verify_layout) {
180 /* check layout version */
181 ll_layout_refresh(inode, &gen);
182 io->ci_need_restart = cio->cui_layout_gen != gen;
183 if (io->ci_need_restart) {
185 DFID" layout changed from %d to %d.\n",
186 PFID(lu_object_fid(&obj->co_lu)),
187 cio->cui_layout_gen, gen);
188 /* today successful restore is the only possible
190 /* restore was done, clear restoring state */
191 ll_i2info(ccc_object_inode(obj))->lli_flags &=
192 ~LLIF_FILE_RESTORING;
197 static void vvp_io_fault_fini(const struct lu_env *env,
198 const struct cl_io_slice *ios)
200 struct cl_io *io = ios->cis_io;
201 struct cl_page *page = io->u.ci_fault.ft_page;
203 CLOBINVRNT(env, io->ci_obj, ccc_object_invariant(io->ci_obj));
206 lu_ref_del(&page->cp_reference, "fault", io);
207 cl_page_put(env, page);
208 io->u.ci_fault.ft_page = NULL;
210 vvp_io_fini(env, ios);
213 static enum cl_lock_mode vvp_mode_from_vma(struct vm_area_struct *vma)
216 * we only want to hold PW locks if the mmap() can generate
217 * writes back to the file and that only happens in shared
220 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
225 static int vvp_mmap_locks(const struct lu_env *env,
226 struct ccc_io *vio, struct cl_io *io)
228 struct ccc_thread_info *cti = ccc_env_info(env);
229 struct mm_struct *mm = current->mm;
230 struct vm_area_struct *vma;
231 struct cl_lock_descr *descr = &cti->cti_descr;
232 ldlm_policy_data_t policy;
239 LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
241 if (!cl_is_normalio(env, io))
244 if (vio->cui_iov == NULL) /* nfs or loop back device write */
247 /* No MM (e.g. NFS)? No vmas too. */
251 for (seg = 0; seg < vio->cui_nrsegs; seg++) {
252 const struct iovec *iv = &vio->cui_iov[seg];
254 addr = (unsigned long)iv->iov_base;
259 count += addr & (~CFS_PAGE_MASK);
260 addr &= CFS_PAGE_MASK;
262 down_read(&mm->mmap_sem);
263 while((vma = our_vma(mm, addr, count)) != NULL) {
264 struct inode *inode = vma->vm_file->f_dentry->d_inode;
265 int flags = CEF_MUST;
267 if (ll_file_nolock(vma->vm_file)) {
269 * For no lock case, a lockless lock will be
276 * XXX: Required lock mode can be weakened: CIT_WRITE
277 * io only ever reads user level buffer, and CIT_READ
280 policy_from_vma(&policy, vma, addr, count);
281 descr->cld_mode = vvp_mode_from_vma(vma);
282 descr->cld_obj = ll_i2info(inode)->lli_clob;
283 descr->cld_start = cl_index(descr->cld_obj,
284 policy.l_extent.start);
285 descr->cld_end = cl_index(descr->cld_obj,
286 policy.l_extent.end);
287 descr->cld_enq_flags = flags;
288 result = cl_io_lock_alloc_add(env, io, descr);
290 CDEBUG(D_VFSTRACE, "lock: %d: [%lu, %lu]\n",
291 descr->cld_mode, descr->cld_start,
295 up_read(&mm->mmap_sem);
299 if (vma->vm_end - addr >= count)
302 count -= vma->vm_end - addr;
305 up_read(&mm->mmap_sem);
310 static int vvp_io_rw_lock(const struct lu_env *env, struct cl_io *io,
311 enum cl_lock_mode mode, loff_t start, loff_t end)
313 struct ccc_io *cio = ccc_env_io(env);
317 LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
320 ccc_io_update_iov(env, cio, io);
322 if (io->u.ci_rw.crw_nonblock)
323 ast_flags |= CEF_NONBLOCK;
324 result = vvp_mmap_locks(env, cio, io);
326 result = ccc_io_one_lock(env, io, ast_flags, mode, start, end);
330 static int vvp_io_read_lock(const struct lu_env *env,
331 const struct cl_io_slice *ios)
333 struct cl_io *io = ios->cis_io;
334 struct cl_io_rw_common *rd = &io->u.ci_rd.rd;
338 result = vvp_io_rw_lock(env, io, CLM_READ, rd->crw_pos,
339 rd->crw_pos + rd->crw_count - 1);
343 static int vvp_io_fault_lock(const struct lu_env *env,
344 const struct cl_io_slice *ios)
346 struct cl_io *io = ios->cis_io;
347 struct vvp_io *vio = cl2vvp_io(env, ios);
349 * XXX LDLM_FL_CBPENDING
351 return ccc_io_one_lock_index
352 (env, io, 0, vvp_mode_from_vma(vio->u.fault.ft_vma),
353 io->u.ci_fault.ft_index, io->u.ci_fault.ft_index);
356 static int vvp_io_write_lock(const struct lu_env *env,
357 const struct cl_io_slice *ios)
359 struct cl_io *io = ios->cis_io;
363 if (io->u.ci_wr.wr_append) {
365 end = OBD_OBJECT_EOF;
367 start = io->u.ci_wr.wr.crw_pos;
368 end = start + io->u.ci_wr.wr.crw_count - 1;
370 return vvp_io_rw_lock(env, io, CLM_WRITE, start, end);
373 static int vvp_io_setattr_iter_init(const struct lu_env *env,
374 const struct cl_io_slice *ios)
380 * Implementation of cl_io_operations::cio_lock() method for CIT_SETATTR io.
382 * Handles "lockless io" mode when extent locking is done by server.
384 static int vvp_io_setattr_lock(const struct lu_env *env,
385 const struct cl_io_slice *ios)
387 struct ccc_io *cio = ccc_env_io(env);
388 struct cl_io *io = ios->cis_io;
392 if (cl_io_is_trunc(io)) {
393 new_size = io->u.ci_setattr.sa_attr.lvb_size;
395 enqflags = CEF_DISCARD_DATA;
397 if ((io->u.ci_setattr.sa_attr.lvb_mtime >=
398 io->u.ci_setattr.sa_attr.lvb_ctime) ||
399 (io->u.ci_setattr.sa_attr.lvb_atime >=
400 io->u.ci_setattr.sa_attr.lvb_ctime))
404 cio->u.setattr.cui_local_lock = SETATTR_EXTENT_LOCK;
405 return ccc_io_one_lock(env, io, enqflags, CLM_WRITE,
406 new_size, OBD_OBJECT_EOF);
409 static int vvp_do_vmtruncate(struct inode *inode, size_t size)
414 * Only ll_inode_size_lock is taken at this level.
416 ll_inode_size_lock(inode);
417 result = inode_newsize_ok(inode, size);
419 ll_inode_size_unlock(inode);
422 i_size_write(inode, size);
424 ll_truncate_pagecache(inode, size);
425 ll_inode_size_unlock(inode);
429 static int vvp_io_setattr_trunc(const struct lu_env *env,
430 const struct cl_io_slice *ios,
431 struct inode *inode, loff_t size)
433 inode_dio_wait(inode);
437 static int vvp_io_setattr_time(const struct lu_env *env,
438 const struct cl_io_slice *ios)
440 struct cl_io *io = ios->cis_io;
441 struct cl_object *obj = io->ci_obj;
442 struct cl_attr *attr = ccc_env_thread_attr(env);
444 unsigned valid = CAT_CTIME;
446 cl_object_attr_lock(obj);
447 attr->cat_ctime = io->u.ci_setattr.sa_attr.lvb_ctime;
448 if (io->u.ci_setattr.sa_valid & ATTR_ATIME_SET) {
449 attr->cat_atime = io->u.ci_setattr.sa_attr.lvb_atime;
452 if (io->u.ci_setattr.sa_valid & ATTR_MTIME_SET) {
453 attr->cat_mtime = io->u.ci_setattr.sa_attr.lvb_mtime;
456 result = cl_object_attr_set(env, obj, attr, valid);
457 cl_object_attr_unlock(obj);
462 static int vvp_io_setattr_start(const struct lu_env *env,
463 const struct cl_io_slice *ios)
465 struct cl_io *io = ios->cis_io;
466 struct inode *inode = ccc_object_inode(io->ci_obj);
469 mutex_lock(&inode->i_mutex);
470 if (cl_io_is_trunc(io))
471 result = vvp_io_setattr_trunc(env, ios, inode,
472 io->u.ci_setattr.sa_attr.lvb_size);
474 result = vvp_io_setattr_time(env, ios);
478 static void vvp_io_setattr_end(const struct lu_env *env,
479 const struct cl_io_slice *ios)
481 struct cl_io *io = ios->cis_io;
482 struct inode *inode = ccc_object_inode(io->ci_obj);
484 if (cl_io_is_trunc(io)) {
485 /* Truncate in memory pages - they must be clean pages
486 * because osc has already notified to destroy osc_extents. */
487 vvp_do_vmtruncate(inode, io->u.ci_setattr.sa_attr.lvb_size);
488 inode_dio_write_done(inode);
490 mutex_unlock(&inode->i_mutex);
493 static void vvp_io_setattr_fini(const struct lu_env *env,
494 const struct cl_io_slice *ios)
496 vvp_io_fini(env, ios);
499 static int vvp_io_read_start(const struct lu_env *env,
500 const struct cl_io_slice *ios)
502 struct vvp_io *vio = cl2vvp_io(env, ios);
503 struct ccc_io *cio = cl2ccc_io(env, ios);
504 struct cl_io *io = ios->cis_io;
505 struct cl_object *obj = io->ci_obj;
506 struct inode *inode = ccc_object_inode(obj);
507 struct ll_ra_read *bead = &vio->cui_bead;
508 struct file *file = cio->cui_fd->fd_file;
511 loff_t pos = io->u.ci_rd.rd.crw_pos;
512 long cnt = io->u.ci_rd.rd.crw_count;
513 long tot = cio->cui_tot_count;
516 CLOBINVRNT(env, obj, ccc_object_invariant(obj));
518 CDEBUG(D_VFSTRACE, "read: -> [%lli, %lli)\n", pos, pos + cnt);
520 if (!can_populate_pages(env, io, inode))
523 result = ccc_prep_size(env, obj, io, pos, tot, &exceed);
526 else if (exceed != 0)
529 LU_OBJECT_HEADER(D_INODE, env, &obj->co_lu,
530 "Read ino %lu, %lu bytes, offset %lld, size %llu\n",
531 inode->i_ino, cnt, pos, i_size_read(inode));
533 /* turn off the kernel's read-ahead */
534 cio->cui_fd->fd_file->f_ra.ra_pages = 0;
536 /* initialize read-ahead window once per syscall */
537 if (!vio->cui_ra_window_set) {
538 vio->cui_ra_window_set = 1;
539 bead->lrr_start = cl_index(obj, pos);
540 bead->lrr_count = cl_index(obj, tot + PAGE_CACHE_SIZE - 1);
541 ll_ra_read_in(file, bead);
546 switch (vio->cui_io_subtype) {
548 LASSERT(cio->cui_iocb->ki_pos == pos);
549 result = generic_file_aio_read(cio->cui_iocb,
550 cio->cui_iov, cio->cui_nrsegs,
551 cio->cui_iocb->ki_pos);
554 result = generic_file_splice_read(file, &pos,
555 vio->u.splice.cui_pipe, cnt,
556 vio->u.splice.cui_flags);
557 /* LU-1109: do splice read stripe by stripe otherwise if it
558 * may make nfsd stuck if this read occupied all internal pipe
563 CERROR("Wrong IO type %u\n", vio->cui_io_subtype);
571 io->ci_nob += result;
572 ll_rw_stats_tally(ll_i2sbi(inode), current->pid, cio->cui_fd,
580 static void vvp_io_read_fini(const struct lu_env *env, const struct cl_io_slice *ios)
582 struct vvp_io *vio = cl2vvp_io(env, ios);
583 struct ccc_io *cio = cl2ccc_io(env, ios);
585 if (vio->cui_ra_window_set)
586 ll_ra_read_ex(cio->cui_fd->fd_file, &vio->cui_bead);
588 vvp_io_fini(env, ios);
591 static int vvp_io_commit_sync(const struct lu_env *env, struct cl_io *io,
592 struct cl_page_list *plist, int from, int to)
594 struct cl_2queue *queue = &io->ci_queue;
595 struct cl_page *page;
596 unsigned int bytes = 0;
600 if (plist->pl_nr == 0)
603 if (from > 0 || to != PAGE_SIZE) {
604 page = cl_page_list_first(plist);
605 if (plist->pl_nr == 1) {
606 cl_page_clip(env, page, from, to);
609 cl_page_clip(env, page, from, PAGE_SIZE);
610 if (to != PAGE_SIZE) {
611 page = cl_page_list_last(plist);
612 cl_page_clip(env, page, 0, to);
617 cl_2queue_init(queue);
618 cl_page_list_splice(plist, &queue->c2_qin);
619 rc = cl_io_submit_sync(env, io, CRT_WRITE, queue, 0);
621 /* plist is not sorted any more */
622 cl_page_list_splice(&queue->c2_qin, plist);
623 cl_page_list_splice(&queue->c2_qout, plist);
624 cl_2queue_fini(env, queue);
627 /* calculate bytes */
628 bytes = plist->pl_nr << PAGE_SHIFT;
629 bytes -= from + PAGE_SIZE - to;
631 while (plist->pl_nr > 0) {
632 page = cl_page_list_first(plist);
633 cl_page_list_del(env, plist, page);
635 cl_page_clip(env, page, 0, PAGE_SIZE);
637 SetPageUptodate(cl_page_vmpage(page));
638 cl_page_disown(env, io, page);
640 /* held in ll_cl_init() */
641 lu_ref_del(&page->cp_reference, "cl_io", io);
642 cl_page_put(env, page);
646 RETURN(bytes > 0 ? bytes : rc);
649 static void write_commit_callback(const struct lu_env *env, struct cl_io *io,
650 struct cl_page *page)
653 struct page *vmpage = page->cp_vmpage;
654 struct cl_object *clob = cl_io_top(io)->ci_obj;
656 SetPageUptodate(vmpage);
657 set_page_dirty(vmpage);
659 cp = cl2ccc_page(cl_object_page_slice(clob, page));
660 vvp_write_pending(cl2ccc(clob), cp);
662 cl_page_disown(env, io, page);
664 /* held in ll_cl_init() */
665 lu_ref_del(&page->cp_reference, "cl_io", cl_io_top(io));
666 cl_page_put(env, page);
669 /* make sure the page list is contiguous */
670 static bool page_list_sanity_check(struct cl_object *obj,
671 struct cl_page_list *plist)
673 struct cl_page *page;
674 pgoff_t index = CL_PAGE_EOF;
676 cl_page_list_for_each(page, plist) {
677 struct ccc_page *cp = cl_object_page_slice(obj, page);
679 if (index == CL_PAGE_EOF) {
680 index = ccc_index(cp);
685 if (index == ccc_index(cp))
693 /* Return how many bytes have queued or written */
694 int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io)
696 struct cl_object *obj = io->ci_obj;
697 struct inode *inode = ccc_object_inode(obj);
698 struct ccc_io *cio = ccc_env_io(env);
699 struct cl_page_list *queue = &cio->u.write.cui_queue;
700 struct cl_page *page;
703 unsigned int npages = cio->u.write.cui_queue.pl_nr;
709 CDEBUG(D_VFSTRACE, "commit async pages: %d, from %d, to %d\n",
710 npages, cio->u.write.cui_from, cio->u.write.cui_to);
712 LASSERT(page_list_sanity_check(obj, queue));
714 /* submit IO with async write */
715 rc = cl_io_commit_async(env, io, queue,
716 cio->u.write.cui_from, cio->u.write.cui_to,
717 write_commit_callback);
718 npages -= queue->pl_nr; /* already committed pages */
720 /* calculate how many bytes were written */
721 bytes = npages << PAGE_SHIFT;
724 bytes -= cio->u.write.cui_from;
725 if (queue->pl_nr == 0) /* last page */
726 bytes -= PAGE_SIZE - cio->u.write.cui_to;
727 LASSERTF(bytes > 0, "bytes = %d, pages = %d\n", bytes, npages);
729 cio->u.write.cui_written += bytes;
731 CDEBUG(D_VFSTRACE, "Committed %d pages %d bytes, tot: %ld\n",
732 npages, bytes, cio->u.write.cui_written);
734 /* the first page must have been written. */
735 cio->u.write.cui_from = 0;
737 LASSERT(page_list_sanity_check(obj, queue));
738 LASSERT(ergo(rc == 0, queue->pl_nr == 0));
740 /* out of quota, try sync write */
741 if (rc == -EDQUOT && !cl_io_is_mkwrite(io)) {
742 rc = vvp_io_commit_sync(env, io, queue,
743 cio->u.write.cui_from,
744 cio->u.write.cui_to);
746 cio->u.write.cui_written += rc;
751 /* update inode size */
752 ll_merge_lvb(env, inode);
754 /* Now the pages in queue were failed to commit, discard them
755 * unless they were dirtied before. */
756 while (queue->pl_nr > 0) {
757 page = cl_page_list_first(queue);
758 cl_page_list_del(env, queue, page);
760 if (!PageDirty(cl_page_vmpage(page)))
761 cl_page_discard(env, io, page);
763 cl_page_disown(env, io, page);
765 /* held in ll_cl_init() */
766 lu_ref_del(&page->cp_reference, "cl_io", io);
767 cl_page_put(env, page);
769 cl_page_list_fini(env, queue);
774 static int vvp_io_write_start(const struct lu_env *env,
775 const struct cl_io_slice *ios)
777 struct ccc_io *cio = cl2ccc_io(env, ios);
778 struct cl_io *io = ios->cis_io;
779 struct cl_object *obj = io->ci_obj;
780 struct inode *inode = ccc_object_inode(obj);
782 loff_t pos = io->u.ci_wr.wr.crw_pos;
783 size_t cnt = io->u.ci_wr.wr.crw_count;
787 if (!can_populate_pages(env, io, inode))
790 if (cl_io_is_append(io)) {
792 * PARALLEL IO This has to be changed for parallel IO doing
793 * out-of-order writes.
795 pos = io->u.ci_wr.wr.crw_pos = i_size_read(inode);
796 cio->cui_iocb->ki_pos = pos;
798 LASSERT(cio->cui_iocb->ki_pos == pos);
801 CDEBUG(D_VFSTRACE, "write: [%lli, %lli)\n", pos, pos + (long long)cnt);
803 if (cio->cui_iov == NULL) {
804 /* from a temp io in ll_cl_init(). */
808 * When using the locked AIO function (generic_file_aio_write())
809 * testing has shown the inode mutex to be a limiting factor
810 * with multi-threaded single shared file performance. To get
811 * around this, we now use the lockless version. To maintain
812 * consistency, proper locking to protect against writes,
813 * trucates, etc. is handled in the higher layers of lustre.
815 result = __generic_file_aio_write(cio->cui_iocb,
816 cio->cui_iov, cio->cui_nrsegs,
817 &cio->cui_iocb->ki_pos);
818 if (result > 0 || result == -EIOCBQUEUED) {
821 err = generic_write_sync(cio->cui_iocb->ki_filp,
823 if (err < 0 && result > 0)
829 result = vvp_io_write_commit(env, io);
830 if (cio->u.write.cui_written > 0) {
831 result = cio->u.write.cui_written;
832 io->ci_nob += result;
834 CDEBUG(D_VFSTRACE, "write: nob %zd, result: %zd\n",
839 struct ll_inode_info *lli = ll_i2info(inode);
841 spin_lock(&lli->lli_lock);
842 lli->lli_flags |= LLIF_DATA_MODIFIED;
843 spin_unlock(&lli->lli_lock);
847 ll_rw_stats_tally(ll_i2sbi(inode), current->pid,
848 cio->cui_fd, pos, result, WRITE);
855 static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
857 struct vm_fault *vmf = cfio->fault.ft_vmf;
859 cfio->fault.ft_flags = filemap_fault(cfio->ft_vma, vmf);
860 cfio->fault.ft_flags_valid = 1;
863 LL_CDEBUG_PAGE(D_PAGE, vmf->page, "got addr %p type NOPAGE\n",
864 vmf->virtual_address);
865 if (unlikely(!(cfio->fault.ft_flags & VM_FAULT_LOCKED))) {
866 lock_page(vmf->page);
867 cfio->fault.ft_flags |= VM_FAULT_LOCKED;
870 cfio->ft_vmpage = vmf->page;
874 if (cfio->fault.ft_flags & VM_FAULT_SIGBUS) {
875 CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", vmf->virtual_address);
879 if (cfio->fault.ft_flags & VM_FAULT_OOM) {
880 CDEBUG(D_PAGE, "got addr %p - OOM\n", vmf->virtual_address);
884 if (cfio->fault.ft_flags & VM_FAULT_RETRY)
887 CERROR("unknow error in page fault %d!\n", cfio->fault.ft_flags);
891 static void mkwrite_commit_callback(const struct lu_env *env, struct cl_io *io,
892 struct cl_page *page)
895 struct cl_object *clob = cl_io_top(io)->ci_obj;
897 set_page_dirty(page->cp_vmpage);
899 cp = cl2ccc_page(cl_object_page_slice(clob, page));
900 vvp_write_pending(cl2ccc(clob), cp);
903 static int vvp_io_fault_start(const struct lu_env *env,
904 const struct cl_io_slice *ios)
906 struct vvp_io *vio = cl2vvp_io(env, ios);
907 struct cl_io *io = ios->cis_io;
908 struct cl_object *obj = io->ci_obj;
909 struct inode *inode = ccc_object_inode(obj);
910 struct cl_fault_io *fio = &io->u.ci_fault;
911 struct vvp_fault_io *cfio = &vio->u.fault;
914 struct page *vmpage = NULL;
915 struct cl_page *page;
920 if (fio->ft_executable &&
921 LTIME_S(inode->i_mtime) != vio->u.fault.ft_mtime)
923 " changed while waiting for the page fault lock\n",
924 PFID(lu_object_fid(&obj->co_lu)));
926 /* offset of the last byte on the page */
927 offset = cl_offset(obj, fio->ft_index + 1) - 1;
928 LASSERT(cl_index(obj, offset) == fio->ft_index);
929 result = ccc_prep_size(env, obj, io, 0, offset + 1, NULL);
933 /* must return locked page */
934 if (fio->ft_mkwrite) {
935 LASSERT(cfio->ft_vmpage != NULL);
936 lock_page(cfio->ft_vmpage);
938 result = vvp_io_kernel_fault(cfio);
943 vmpage = cfio->ft_vmpage;
944 LASSERT(PageLocked(vmpage));
946 if (OBD_FAIL_CHECK(OBD_FAIL_LLITE_FAULT_TRUNC_RACE))
947 ll_invalidate_page(vmpage);
949 size = i_size_read(inode);
950 /* Though we have already held a cl_lock upon this page, but
951 * it still can be truncated locally. */
952 if (unlikely((vmpage->mapping != inode->i_mapping) ||
953 (page_offset(vmpage) > size))) {
954 CDEBUG(D_PAGE, "llite: fault and truncate race happened!\n");
956 /* return +1 to stop cl_io_loop() and ll_fault() will catch
958 GOTO(out, result = +1);
961 last_index = cl_index(obj, size - 1);
963 if (fio->ft_mkwrite ) {
965 * Capture the size while holding the lli_trunc_sem from above
966 * we want to make sure that we complete the mkwrite action
967 * while holding this lock. We need to make sure that we are
968 * not past the end of the file.
970 if (last_index < fio->ft_index) {
972 "llite: mkwrite and truncate race happened: "
974 vmpage->mapping,fio->ft_index,last_index);
976 * We need to return if we are
977 * passed the end of the file. This will propagate
978 * up the call stack to ll_page_mkwrite where
979 * we will return VM_FAULT_NOPAGE. Any non-negative
980 * value returned here will be silently
981 * converted to 0. If the vmpage->mapping is null
982 * the error code would be converted back to ENODATA
983 * in ll_page_mkwrite0. Thus we return -ENODATA
984 * to handle both cases
986 GOTO(out, result = -ENODATA);
990 page = cl_page_find(env, obj, fio->ft_index, vmpage, CPT_CACHEABLE);
992 GOTO(out, result = PTR_ERR(page));
994 /* if page is going to be written, we should add this page into cache
996 if (fio->ft_mkwrite) {
997 wait_on_page_writeback(vmpage);
998 if (!PageDirty(vmpage)) {
999 struct cl_page_list *plist = &io->ci_queue.c2_qin;
1000 struct ccc_page *cp = cl_object_page_slice(obj, page);
1003 /* vvp_page_assume() calls wait_on_page_writeback(). */
1004 cl_page_assume(env, io, page);
1006 cl_page_list_init(plist);
1007 cl_page_list_add(plist, page);
1010 if (last_index == ccc_index(cp))
1011 to = size & ~CFS_PAGE_MASK;
1013 /* Do not set Dirty bit here so that in case IO is
1014 * started before the page is really made dirty, we
1015 * still have chance to detect it. */
1016 result = cl_io_commit_async(env, io, plist, 0, to,
1017 mkwrite_commit_callback);
1018 LASSERT(cl_page_is_owned(page, io));
1019 cl_page_list_fini(env, plist);
1023 cl_page_discard(env, io, page);
1024 cl_page_disown(env, io, page);
1026 cl_page_put(env, page);
1028 /* we're in big trouble, what can we do now? */
1029 if (result == -EDQUOT)
1033 cl_page_disown(env, io, page);
1038 * The ft_index is only used in the case of
1039 * a mkwrite action. We need to check
1040 * our assertions are correct, since
1041 * we should have caught this above
1043 LASSERT(!fio->ft_mkwrite || fio->ft_index <= last_index);
1044 if (fio->ft_index == last_index)
1046 * Last page is mapped partially.
1048 fio->ft_nob = size - cl_offset(obj, fio->ft_index);
1050 fio->ft_nob = cl_page_size(obj);
1052 lu_ref_add(&page->cp_reference, "fault", io);
1053 fio->ft_page = page;
1057 /* return unlocked vmpage to avoid deadlocking */
1059 unlock_page(vmpage);
1060 cfio->fault.ft_flags &= ~VM_FAULT_LOCKED;
1064 static int vvp_io_fsync_start(const struct lu_env *env,
1065 const struct cl_io_slice *ios)
1067 /* we should mark TOWRITE bit to each dirty page in radix tree to
1068 * verify pages have been written, but this is difficult because of
1073 static int vvp_io_read_page(const struct lu_env *env,
1074 const struct cl_io_slice *ios,
1075 const struct cl_page_slice *slice)
1077 struct cl_io *io = ios->cis_io;
1078 struct ccc_page *cp = cl2ccc_page(slice);
1079 struct cl_page *page = slice->cpl_page;
1080 struct inode *inode = ccc_object_inode(slice->cpl_obj);
1081 struct ll_sb_info *sbi = ll_i2sbi(inode);
1082 struct ll_file_data *fd = cl2ccc_io(env, ios)->cui_fd;
1083 struct ll_readahead_state *ras = &fd->fd_ras;
1084 struct cl_2queue *queue = &io->ci_queue;
1088 if (sbi->ll_ra_info.ra_max_pages_per_file > 0 &&
1089 sbi->ll_ra_info.ra_max_pages > 0)
1090 ras_update(sbi, inode, ras, ccc_index(cp),
1091 cp->cpg_defer_uptodate);
1093 if (cp->cpg_defer_uptodate) {
1094 cp->cpg_ra_used = 1;
1095 cl_page_export(env, page, 1);
1099 * Add page into the queue even when it is marked uptodate above.
1100 * this will unlock it automatically as part of cl_page_list_disown().
1102 cl_2queue_add(queue, page);
1103 if (sbi->ll_ra_info.ra_max_pages_per_file > 0 &&
1104 sbi->ll_ra_info.ra_max_pages > 0)
1105 ll_readahead(env, io, &queue->c2_qin, ras,
1106 cp->cpg_defer_uptodate);
1111 static const struct cl_io_operations vvp_io_ops = {
1114 .cio_fini = vvp_io_read_fini,
1115 .cio_lock = vvp_io_read_lock,
1116 .cio_start = vvp_io_read_start,
1117 .cio_advance = ccc_io_advance
1120 .cio_fini = vvp_io_fini,
1121 .cio_iter_init = vvp_io_write_iter_init,
1122 .cio_iter_fini = vvp_io_write_iter_fini,
1123 .cio_lock = vvp_io_write_lock,
1124 .cio_start = vvp_io_write_start,
1125 .cio_advance = ccc_io_advance
1128 .cio_fini = vvp_io_setattr_fini,
1129 .cio_iter_init = vvp_io_setattr_iter_init,
1130 .cio_lock = vvp_io_setattr_lock,
1131 .cio_start = vvp_io_setattr_start,
1132 .cio_end = vvp_io_setattr_end
1135 .cio_fini = vvp_io_fault_fini,
1136 .cio_iter_init = vvp_io_fault_iter_init,
1137 .cio_lock = vvp_io_fault_lock,
1138 .cio_start = vvp_io_fault_start,
1139 .cio_end = ccc_io_end
1142 .cio_start = vvp_io_fsync_start,
1143 .cio_fini = vvp_io_fini
1146 .cio_fini = vvp_io_fini
1149 .cio_read_page = vvp_io_read_page,
1152 int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
1155 struct vvp_io *vio = vvp_env_io(env);
1156 struct ccc_io *cio = ccc_env_io(env);
1157 struct inode *inode = ccc_object_inode(obj);
1160 CLOBINVRNT(env, obj, ccc_object_invariant(obj));
1163 CDEBUG(D_VFSTRACE, DFID" ignore/verify layout %d/%d, layout version %d "
1164 "restore needed %d\n",
1165 PFID(lu_object_fid(&obj->co_lu)),
1166 io->ci_ignore_layout, io->ci_verify_layout,
1167 cio->cui_layout_gen, io->ci_restore_needed);
1169 CL_IO_SLICE_CLEAN(cio, cui_cl);
1170 cl_io_slice_add(io, &cio->cui_cl, obj, &vvp_io_ops);
1171 vio->cui_ra_window_set = 0;
1173 if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE) {
1175 struct ll_inode_info *lli = ll_i2info(inode);
1177 count = io->u.ci_rw.crw_count;
1178 /* "If nbyte is 0, read() will return 0 and have no other
1179 * results." -- Single Unix Spec */
1183 cio->cui_tot_count = count;
1184 cio->cui_tot_nrsegs = 0;
1187 /* for read/write, we store the jobid in the inode, and
1188 * it'll be fetched by osc when building RPC.
1190 * it's not accurate if the file is shared by different
1193 lustre_get_jobid(lli->lli_jobid);
1194 } else if (io->ci_type == CIT_SETATTR) {
1195 if (!cl_io_is_trunc(io))
1196 io->ci_lockreq = CILR_MANDATORY;
1199 /* ignore layout change for generic CIT_MISC but not for glimpse.
1200 * io context for glimpse must set ci_verify_layout to true,
1201 * see cl_glimpse_size0() for details. */
1202 if (io->ci_type == CIT_MISC && !io->ci_verify_layout)
1203 io->ci_ignore_layout = 1;
1205 /* Enqueue layout lock and get layout version. We need to do this
1206 * even for operations requiring to open file, such as read and write,
1207 * because it might not grant layout lock in IT_OPEN. */
1208 if (result == 0 && !io->ci_ignore_layout) {
1209 result = ll_layout_refresh(inode, &cio->cui_layout_gen);
1210 if (result == -ENOENT)
1211 /* If the inode on MDS has been removed, but the objects
1212 * on OSTs haven't been destroyed (async unlink), layout
1213 * fetch will return -ENOENT, we'd ingore this error
1214 * and continue with dirty flush. LU-3230. */
1217 CERROR("%s: refresh file layout " DFID " error %d.\n",
1218 ll_get_fsname(inode->i_sb, NULL, 0),
1219 PFID(lu_object_fid(&obj->co_lu)), result);
1225 static struct vvp_io *cl2vvp_io(const struct lu_env *env,
1226 const struct cl_io_slice *slice)
1228 /* Caling just for assertion */
1229 cl2ccc_io(env, slice);
1230 return vvp_env_io(env);