Whamcloud - gitweb
3c500a592d5c809b96a94edcdfd53231b550b5d4
[fs/lustre-release.git] / lustre / llite / vvp_io.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * Implementation of cl_io for VVP layer.
32  *
33  *   Author: Nikita Danilov <nikita.danilov@sun.com>
34  *   Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
35  */
36
37 #define DEBUG_SUBSYSTEM S_LLITE
38
39 #include <obd.h>
40 #include <linux/pagevec.h>
41 #include <linux/memcontrol.h>
42 #include <linux/falloc.h>
43
44 #include "llite_internal.h"
45 #include "vvp_internal.h"
46 #include <libcfs/linux/linux-misc.h>
47
48 static struct vvp_io *cl2vvp_io(const struct lu_env *env,
49                                 const struct cl_io_slice *slice)
50 {
51         struct vvp_io *vio;
52
53         vio = container_of(slice, struct vvp_io, vui_cl);
54         LASSERT(vio == vvp_env_io(env));
55
56         return vio;
57 }
58
59 /**
60  * For swapping layout. The file's layout may have changed.
61  * To avoid populating pages to a wrong stripe, we have to verify the
62  * correctness of layout. It works because swapping layout processes
63  * have to acquire group lock.
64  */
65 static bool can_populate_pages(const struct lu_env *env, struct cl_io *io,
66                                 struct inode *inode)
67 {
68         struct ll_inode_info    *lli = ll_i2info(inode);
69         struct vvp_io           *vio = vvp_env_io(env);
70         bool rc = true;
71
72         switch (io->ci_type) {
73         case CIT_READ:
74         case CIT_WRITE:
75                 /* don't need lock here to check lli_layout_gen as we have held
76                  * extent lock and GROUP lock has to hold to swap layout */
77                 if (ll_layout_version_get(lli) != vio->vui_layout_gen ||
78                     OBD_FAIL_CHECK_RESET(OBD_FAIL_LLITE_LOST_LAYOUT, 0)) {
79                         io->ci_need_restart = 1;
80                         /* this will cause a short read/write */
81                         io->ci_continue = 0;
82                         rc = false;
83                 }
84         case CIT_FAULT:
85                 /* fault is okay because we've already had a page. */
86         default:
87                 break;
88         }
89
90         return rc;
91 }
92
93 static void vvp_object_size_lock(struct cl_object *obj)
94 {
95         struct inode *inode = vvp_object_inode(obj);
96
97         ll_inode_size_lock(inode);
98         cl_object_attr_lock(obj);
99 }
100
101 static void vvp_object_size_unlock(struct cl_object *obj)
102 {
103         struct inode *inode = vvp_object_inode(obj);
104
105         cl_object_attr_unlock(obj);
106         ll_inode_size_unlock(inode);
107 }
108
109 /**
110  * Helper function that if necessary adjusts file size (inode->i_size), when
111  * position at the offset \a pos is accessed. File size can be arbitrary stale
112  * on a Lustre client, but client at least knows KMS. If accessed area is
113  * inside [0, KMS], set file size to KMS, otherwise glimpse file size.
114  *
115  * Locking: i_size_lock is used to serialize changes to inode size and to
116  * protect consistency between inode size and cl_object
117  * attributes. cl_object_size_lock() protects consistency between cl_attr's of
118  * top-object and sub-objects.
119  */
120 static int vvp_prep_size(const struct lu_env *env, struct cl_object *obj,
121                          struct cl_io *io, loff_t start, size_t count,
122                          int *exceed)
123 {
124         struct cl_attr *attr  = vvp_env_thread_attr(env);
125         struct inode   *inode = vvp_object_inode(obj);
126         loff_t          pos   = start + count - 1;
127         loff_t kms;
128         int result;
129
130         /*
131          * Consistency guarantees: following possibilities exist for the
132          * relation between region being accessed and real file size at this
133          * moment:
134          *
135          *  (A): the region is completely inside of the file;
136          *
137          *  (B-x): x bytes of region are inside of the file, the rest is
138          *  outside;
139          *
140          *  (C): the region is completely outside of the file.
141          *
142          * This classification is stable under DLM lock already acquired by
143          * the caller, because to change the class, other client has to take
144          * DLM lock conflicting with our lock. Also, any updates to ->i_size
145          * by other threads on this client are serialized by
146          * ll_inode_size_lock(). This guarantees that short reads are handled
147          * correctly in the face of concurrent writes and truncates.
148          */
149         vvp_object_size_lock(obj);
150         result = cl_object_attr_get(env, obj, attr);
151         if (result == 0) {
152                 kms = attr->cat_kms;
153                 if (pos > kms || !attr->cat_kms_valid) {
154                         /*
155                          * A glimpse is necessary to determine whether we
156                          * return a short read (B) or some zeroes at the end
157                          * of the buffer (C)
158                          */
159                         vvp_object_size_unlock(obj);
160                         result = cl_glimpse_lock(env, io, inode, obj, 0);
161                         if (result == 0 && exceed != NULL) {
162                                 /* If objective page index exceed end-of-file
163                                  * page index, return directly. Do not expect
164                                  * kernel will check such case correctly.
165                                  * linux-2.6.18-128.1.1 miss to do that.
166                                  * --bug 17336 */
167                                 loff_t size = i_size_read(inode);
168                                 unsigned long cur_index = start >>
169                                         PAGE_SHIFT;
170
171                                 if ((size == 0 && cur_index != 0) ||
172                                     (((size - 1) >> PAGE_SHIFT) <
173                                      cur_index))
174                                         *exceed = 1;
175                         }
176
177                         return result;
178                 } else {
179                         /*
180                          * region is within kms and, hence, within real file
181                          * size (A). We need to increase i_size to cover the
182                          * read region so that generic_file_read() will do its
183                          * job, but that doesn't mean the kms size is
184                          * _correct_, it is only the _minimum_ size. If
185                          * someone does a stat they will get the correct size
186                          * which will always be >= the kms value here.
187                          * b=11081
188                          */
189                         if (i_size_read(inode) < kms) {
190                                 i_size_write(inode, kms);
191                                 CDEBUG(D_VFSTRACE,
192                                        DFID" updating i_size %llu\n",
193                                        PFID(lu_object_fid(&obj->co_lu)),
194                                        (__u64)i_size_read(inode));
195                         }
196                 }
197         }
198
199         vvp_object_size_unlock(obj);
200
201         return result;
202 }
203
204 /*****************************************************************************
205  *
206  * io operations.
207  *
208  */
209
210 static int vvp_io_one_lock_index(const struct lu_env *env, struct cl_io *io,
211                                  __u32 enqflags, enum cl_lock_mode mode,
212                                  pgoff_t start, pgoff_t end)
213 {
214         struct vvp_io          *vio   = vvp_env_io(env);
215         struct cl_lock_descr   *descr = &vio->vui_link.cill_descr;
216         struct cl_object       *obj   = io->ci_obj;
217
218         CLOBINVRNT(env, obj, vvp_object_invariant(obj));
219         ENTRY;
220
221         CDEBUG(D_VFSTRACE, "lock: %d [%lu, %lu]\n", mode, start, end);
222
223         memset(&vio->vui_link, 0, sizeof vio->vui_link);
224
225         if (vio->vui_fd && (vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
226                 descr->cld_mode = CLM_GROUP;
227                 descr->cld_gid  = vio->vui_fd->fd_grouplock.lg_gid;
228                 enqflags |= CEF_LOCK_MATCH;
229         } else {
230                 descr->cld_mode  = mode;
231         }
232
233         descr->cld_obj   = obj;
234         descr->cld_start = start;
235         descr->cld_end   = end;
236         descr->cld_enq_flags = enqflags;
237
238         cl_io_lock_add(env, io, &vio->vui_link);
239
240         RETURN(0);
241 }
242
243 static int vvp_io_one_lock(const struct lu_env *env, struct cl_io *io,
244                            __u32 enqflags, enum cl_lock_mode mode,
245                            loff_t start, loff_t end)
246 {
247         struct cl_object *obj = io->ci_obj;
248
249         return vvp_io_one_lock_index(env, io, enqflags, mode,
250                                      cl_index(obj, start), cl_index(obj, end));
251 }
252
253 static int vvp_io_write_iter_init(const struct lu_env *env,
254                                   const struct cl_io_slice *ios)
255 {
256         struct vvp_io *vio = cl2vvp_io(env, ios);
257
258         cl_page_list_init(&vio->u.readwrite.vui_queue);
259         vio->u.readwrite.vui_written = 0;
260         vio->u.readwrite.vui_from = 0;
261         vio->u.readwrite.vui_to = PAGE_SIZE;
262
263         return 0;
264 }
265
266 static int vvp_io_read_iter_init(const struct lu_env *env,
267                                  const struct cl_io_slice *ios)
268 {
269         struct vvp_io *vio = cl2vvp_io(env, ios);
270
271         vio->u.readwrite.vui_read = 0;
272
273         return 0;
274 }
275
276 static void vvp_io_write_iter_fini(const struct lu_env *env,
277                                    const struct cl_io_slice *ios)
278 {
279         struct vvp_io *vio = cl2vvp_io(env, ios);
280
281         LASSERT(vio->u.readwrite.vui_queue.pl_nr == 0);
282 }
283
284 static int vvp_io_fault_iter_init(const struct lu_env *env,
285                                   const struct cl_io_slice *ios)
286 {
287         struct vvp_io *vio   = cl2vvp_io(env, ios);
288         struct inode  *inode = vvp_object_inode(ios->cis_obj);
289
290         LASSERT(inode == file_inode(vio->vui_fd->fd_file));
291
292         return 0;
293 }
294
295 static void vvp_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
296 {
297         struct cl_io     *io  = ios->cis_io;
298         struct cl_object *obj = io->ci_obj;
299         struct vvp_io    *vio = cl2vvp_io(env, ios);
300         struct inode     *inode = vvp_object_inode(obj);
301         __u32             gen = 0;
302         int rc;
303         ENTRY;
304
305         CLOBINVRNT(env, obj, vvp_object_invariant(obj));
306
307         CDEBUG(D_VFSTRACE, DFID" ignore/verify layout %d/%d, layout version %d "
308                            "need write layout %d, restore needed %d\n",
309                PFID(lu_object_fid(&obj->co_lu)),
310                io->ci_ignore_layout, io->ci_verify_layout,
311                vio->vui_layout_gen, io->ci_need_write_intent,
312                io->ci_restore_needed);
313
314         if (io->ci_restore_needed) {
315                 /* file was detected release, we need to restore it
316                  * before finishing the io
317                  */
318                 rc = ll_layout_restore(inode, 0, OBD_OBJECT_EOF);
319                 /* if restore registration failed, no restart,
320                  * we will return -ENODATA */
321                 /* The layout will change after restore, so we need to
322                  * block on layout lock held by the MDT
323                  * as MDT will not send new layout in lvb (see LU-3124)
324                  * we have to explicitly fetch it, all this will be done
325                  * by ll_layout_refresh().
326                  * Even if ll_layout_restore() returns zero, it doesn't mean
327                  * that restore has been successful. Therefore it sets
328                  * ci_verify_layout so that it will check layout at the end
329                  * of this function.
330                  */
331                 if (rc) {
332                         io->ci_restore_needed = 1;
333                         io->ci_need_restart = 0;
334                         io->ci_verify_layout = 0;
335                         io->ci_result = rc;
336                         GOTO(out, rc);
337                 }
338
339                 io->ci_restore_needed = 0;
340
341                 /* Even if ll_layout_restore() returns zero, it doesn't mean
342                  * that restore has been successful. Therefore it should verify
343                  * if there was layout change and restart I/O correspondingly.
344                  */
345                 ll_layout_refresh(inode, &gen);
346                 io->ci_need_restart = vio->vui_layout_gen != gen;
347                 if (io->ci_need_restart) {
348                         CDEBUG(D_VFSTRACE,
349                                DFID" layout changed from %d to %d.\n",
350                                PFID(lu_object_fid(&obj->co_lu)),
351                                vio->vui_layout_gen, gen);
352                         /* today successful restore is the only possible
353                          * case */
354                         /* restore was done, clear restoring state */
355                         clear_bit(LLIF_FILE_RESTORING,
356                                   &ll_i2info(vvp_object_inode(obj))->lli_flags);
357                 }
358                 GOTO(out, 0);
359         }
360
361         /**
362          * dynamic layout change needed, send layout intent
363          * RPC.
364          */
365         if (io->ci_need_write_intent) {
366                 enum layout_intent_opc opc = LAYOUT_INTENT_WRITE;
367
368                 io->ci_need_write_intent = 0;
369
370                 LASSERT(io->ci_type == CIT_WRITE || cl_io_is_fallocate(io) ||
371                         cl_io_is_trunc(io) || cl_io_is_mkwrite(io));
372
373                 CDEBUG(D_VFSTRACE, DFID" write layout, type %u "DEXT"\n",
374                        PFID(lu_object_fid(&obj->co_lu)), io->ci_type,
375                        PEXT(&io->ci_write_intent));
376
377                 if (cl_io_is_trunc(io))
378                         opc = LAYOUT_INTENT_TRUNC;
379
380                 rc = ll_layout_write_intent(inode, opc, &io->ci_write_intent);
381                 io->ci_result = rc;
382                 if (!rc)
383                         io->ci_need_restart = 1;
384                 GOTO(out, rc);
385         }
386
387         if (!io->ci_need_restart &&
388             !io->ci_ignore_layout && io->ci_verify_layout) {
389                 /* check layout version */
390                 ll_layout_refresh(inode, &gen);
391                 io->ci_need_restart = vio->vui_layout_gen != gen;
392                 if (io->ci_need_restart) {
393                         CDEBUG(D_VFSTRACE,
394                                DFID" layout changed from %d to %d.\n",
395                                PFID(lu_object_fid(&obj->co_lu)),
396                                vio->vui_layout_gen, gen);
397                 }
398                 GOTO(out, 0);
399         }
400 out:
401         EXIT;
402 }
403
404 static void vvp_io_fault_fini(const struct lu_env *env,
405                               const struct cl_io_slice *ios)
406 {
407         struct cl_io   *io   = ios->cis_io;
408         struct cl_page *page = io->u.ci_fault.ft_page;
409
410         CLOBINVRNT(env, io->ci_obj, vvp_object_invariant(io->ci_obj));
411
412         if (page != NULL) {
413                 lu_ref_del(&page->cp_reference, "fault", io);
414                 cl_page_put(env, page);
415                 io->u.ci_fault.ft_page = NULL;
416         }
417         vvp_io_fini(env, ios);
418 }
419
420 static enum cl_lock_mode vvp_mode_from_vma(struct vm_area_struct *vma)
421 {
422         /*
423          * we only want to hold PW locks if the mmap() can generate
424          * writes back to the file and that only happens in shared
425          * writable vmas
426          */
427         if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
428                 return CLM_WRITE;
429         return CLM_READ;
430 }
431
432 static int vvp_mmap_locks(const struct lu_env *env,
433                           struct vvp_io *vio, struct cl_io *io)
434 {
435         struct vvp_thread_info *vti = vvp_env_info(env);
436         struct mm_struct *mm = current->mm;
437         struct vm_area_struct *vma;
438         struct cl_lock_descr *descr = &vti->vti_descr;
439         union ldlm_policy_data policy;
440         struct iovec iov;
441         struct iov_iter i;
442         unsigned long addr;
443         ssize_t count;
444         int result = 0;
445         ENTRY;
446
447         LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
448
449         /* nfs or loop back device write */
450         if (vio->vui_iter == NULL)
451                 RETURN(0);
452
453         /* No MM (e.g. NFS)? No vmas too. */
454         if (mm == NULL)
455                 RETURN(0);
456
457         if (!iter_is_iovec(vio->vui_iter) && !iov_iter_is_kvec(vio->vui_iter))
458                 RETURN(0);
459
460         for (i = *vio->vui_iter;
461              iov_iter_count(&i);
462              iov_iter_advance(&i, iov.iov_len)) {
463                 iov = iov_iter_iovec(&i);
464                 addr = (unsigned long)iov.iov_base;
465                 count = iov.iov_len;
466
467                 if (count == 0)
468                         continue;
469
470                 count += addr & ~PAGE_MASK;
471                 addr &= PAGE_MASK;
472
473                 mmap_read_lock(mm);
474                 while ((vma = our_vma(mm, addr, count)) != NULL) {
475                         struct dentry *de = file_dentry(vma->vm_file);
476                         struct inode *inode = de->d_inode;
477                         int flags = CEF_MUST;
478
479                         if (ll_file_nolock(vma->vm_file)) {
480                                 /*
481                                  * For no lock case is not allowed for mmap
482                                  */
483                                 result = -EINVAL;
484                                 break;
485                         }
486
487                         /*
488                          * XXX: Required lock mode can be weakened: CIT_WRITE
489                          * io only ever reads user level buffer, and CIT_READ
490                          * only writes on it.
491                          */
492                         policy_from_vma(&policy, vma, addr, count);
493                         descr->cld_mode = vvp_mode_from_vma(vma);
494                         descr->cld_obj = ll_i2info(inode)->lli_clob;
495                         descr->cld_start = cl_index(descr->cld_obj,
496                                                     policy.l_extent.start);
497                         descr->cld_end = cl_index(descr->cld_obj,
498                                                   policy.l_extent.end);
499                         descr->cld_enq_flags = flags;
500                         result = cl_io_lock_alloc_add(env, io, descr);
501
502                         CDEBUG(D_VFSTRACE, "lock: %d: [%lu, %lu]\n",
503                                descr->cld_mode, descr->cld_start,
504                                descr->cld_end);
505
506                         if (result < 0)
507                                 break;
508
509                         if (vma->vm_end - addr >= count)
510                                 break;
511
512                         count -= vma->vm_end - addr;
513                         addr = vma->vm_end;
514                 }
515                 mmap_read_unlock(mm);
516                 if (result < 0)
517                         break;
518         }
519         RETURN(result);
520 }
521
522 static void vvp_io_advance(const struct lu_env *env,
523                            const struct cl_io_slice *ios,
524                            size_t nob)
525 {
526         struct cl_object *obj = ios->cis_io->ci_obj;
527         struct vvp_io *vio = cl2vvp_io(env, ios);
528
529         CLOBINVRNT(env, obj, vvp_object_invariant(obj));
530
531         /*
532          * Since 3.16(26978b8b4) vfs revert iov iter to
533          * original position even io succeed, so instead
534          * of relying on VFS, we move iov iter by ourselves.
535          */
536         iov_iter_advance(vio->vui_iter, nob);
537         CDEBUG(D_VFSTRACE, "advancing %ld bytes\n", nob);
538         vio->vui_tot_count -= nob;
539         iov_iter_reexpand(vio->vui_iter, vio->vui_tot_count);
540 }
541
542 static void vvp_io_update_iov(const struct lu_env *env,
543                               struct vvp_io *vio, struct cl_io *io)
544 {
545         size_t size = io->u.ci_rw.crw_count;
546
547         if (!vio->vui_iter)
548                 return;
549
550         iov_iter_truncate(vio->vui_iter, size);
551 }
552
553 static int vvp_io_rw_lock(const struct lu_env *env, struct cl_io *io,
554                           enum cl_lock_mode mode, loff_t start, loff_t end)
555 {
556         struct vvp_io *vio = vvp_env_io(env);
557         int result;
558         int ast_flags = 0;
559
560         LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
561         ENTRY;
562
563         vvp_io_update_iov(env, vio, io);
564
565         if (io->u.ci_rw.crw_nonblock)
566                 ast_flags |= CEF_NONBLOCK;
567         if (io->ci_lock_no_expand)
568                 ast_flags |= CEF_LOCK_NO_EXPAND;
569         if (vio->vui_fd) {
570                 /* Group lock held means no lockless any more */
571                 if (vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED)
572                         io->ci_dio_lock = 1;
573
574                 if (ll_file_nolock(vio->vui_fd->fd_file) ||
575                     (vio->vui_fd->fd_file->f_flags & O_DIRECT &&
576                      !io->ci_dio_lock))
577                         ast_flags |= CEF_NEVER;
578         }
579
580         result = vvp_mmap_locks(env, vio, io);
581         if (result == 0)
582                 result = vvp_io_one_lock(env, io, ast_flags, mode, start, end);
583
584         RETURN(result);
585 }
586
587 static int vvp_io_read_lock(const struct lu_env *env,
588                             const struct cl_io_slice *ios)
589 {
590         struct cl_io *io = ios->cis_io;
591         struct cl_io_rw_common *rd = &io->u.ci_rd.rd;
592         int result;
593
594         ENTRY;
595         result = vvp_io_rw_lock(env, io, CLM_READ, rd->crw_pos,
596                                 rd->crw_pos + rd->crw_count - 1);
597         RETURN(result);
598 }
599
600 static int vvp_io_fault_lock(const struct lu_env *env,
601                              const struct cl_io_slice *ios)
602 {
603         struct cl_io *io   = ios->cis_io;
604         struct vvp_io *vio = cl2vvp_io(env, ios);
605         /*
606          * XXX LDLM_FL_CBPENDING
607          */
608         return vvp_io_one_lock_index(env,
609                                      io, 0,
610                                      vvp_mode_from_vma(vio->u.fault.ft_vma),
611                                      io->u.ci_fault.ft_index,
612                                      io->u.ci_fault.ft_index);
613 }
614
615 static int vvp_io_write_lock(const struct lu_env *env,
616                              const struct cl_io_slice *ios)
617 {
618         struct cl_io *io = ios->cis_io;
619         loff_t start;
620         loff_t end;
621
622         if (io->u.ci_wr.wr_append) {
623                 start = 0;
624                 end   = OBD_OBJECT_EOF;
625         } else {
626                 start = io->u.ci_wr.wr.crw_pos;
627                 end   = start + io->u.ci_wr.wr.crw_count - 1;
628         }
629
630         RETURN(vvp_io_rw_lock(env, io, CLM_WRITE, start, end));
631 }
632
633 static int vvp_io_setattr_iter_init(const struct lu_env *env,
634                                     const struct cl_io_slice *ios)
635
636 {
637         return 0;
638 }
639
640 /**
641  * Implementation of cl_io_operations::cio_lock() method for CIT_SETATTR io.
642  *
643  * Handles "lockless io" mode when extent locking is done by server.
644  */
645 static int vvp_io_setattr_lock(const struct lu_env *env,
646                                const struct cl_io_slice *ios)
647 {
648         struct cl_io  *io  = ios->cis_io;
649         __u64 lock_start = 0;
650         __u64 lock_end = OBD_OBJECT_EOF;
651         __u32 enqflags = 0;
652
653         if (cl_io_is_trunc(io)) {
654                 struct inode *inode = vvp_object_inode(io->ci_obj);
655
656                 /* set enqueue flags to CEF_MUST in case of encrypted file,
657                  * to prevent lockless truncate
658                  */
659                 if (S_ISREG(inode->i_mode) && IS_ENCRYPTED(inode))
660                         enqflags = CEF_MUST;
661                 else if (io->u.ci_setattr.sa_attr.lvb_size == 0)
662                         enqflags = CEF_DISCARD_DATA;
663         } else if (cl_io_is_fallocate(io)) {
664                 lock_start = io->u.ci_setattr.sa_falloc_offset;
665                 lock_end = io->u.ci_setattr.sa_falloc_end - 1;
666         } else {
667                 unsigned int valid = io->u.ci_setattr.sa_avalid;
668
669                 if (!(valid & TIMES_SET_FLAGS))
670                         return 0;
671
672                 if ((!(valid & ATTR_MTIME) ||
673                      io->u.ci_setattr.sa_attr.lvb_mtime >=
674                      io->u.ci_setattr.sa_attr.lvb_ctime) &&
675                     (!(valid & ATTR_ATIME) ||
676                      io->u.ci_setattr.sa_attr.lvb_atime >=
677                      io->u.ci_setattr.sa_attr.lvb_ctime))
678                         return 0;
679         }
680
681         return vvp_io_one_lock(env, io, enqflags, CLM_WRITE,
682                                lock_start, lock_end);
683 }
684
685 static int vvp_do_vmtruncate(struct inode *inode, size_t size)
686 {
687         int     result;
688
689         /*
690          * Only ll_inode_size_lock is taken at this level.
691          */
692         ll_inode_size_lock(inode);
693         result = inode_newsize_ok(inode, size);
694         if (result < 0) {
695                 ll_inode_size_unlock(inode);
696                 return result;
697         }
698         i_size_write(inode, size);
699
700         ll_truncate_pagecache(inode, size);
701         ll_inode_size_unlock(inode);
702         return result;
703 }
704
705 static int vvp_io_setattr_time(const struct lu_env *env,
706                                const struct cl_io_slice *ios)
707 {
708         struct cl_io       *io    = ios->cis_io;
709         struct cl_object   *obj   = io->ci_obj;
710         struct cl_attr     *attr  = vvp_env_thread_attr(env);
711         int result;
712         unsigned valid = CAT_CTIME;
713
714         cl_object_attr_lock(obj);
715         attr->cat_ctime = io->u.ci_setattr.sa_attr.lvb_ctime;
716         if (io->u.ci_setattr.sa_avalid & ATTR_ATIME_SET) {
717                 attr->cat_atime = io->u.ci_setattr.sa_attr.lvb_atime;
718                 valid |= CAT_ATIME;
719         }
720         if (io->u.ci_setattr.sa_avalid & ATTR_MTIME_SET) {
721                 attr->cat_mtime = io->u.ci_setattr.sa_attr.lvb_mtime;
722                 valid |= CAT_MTIME;
723         }
724         result = cl_object_attr_update(env, obj, attr, valid);
725         cl_object_attr_unlock(obj);
726
727         return result;
728 }
729
730 static int vvp_io_setattr_start(const struct lu_env *env,
731                                 const struct cl_io_slice *ios)
732 {
733         struct cl_io *io = ios->cis_io;
734         struct inode *inode = vvp_object_inode(io->ci_obj);
735         struct ll_inode_info *lli = ll_i2info(inode);
736         int mode = io->u.ci_setattr.sa_falloc_mode;
737
738         if (cl_io_is_trunc(io)) {
739                 trunc_sem_down_write(&lli->lli_trunc_sem);
740                 mutex_lock(&lli->lli_setattr_mutex);
741                 inode_dio_wait(inode);
742         } else if (cl_io_is_fallocate(io)) {
743                 loff_t size;
744
745                 trunc_sem_down_write(&lli->lli_trunc_sem);
746                 mutex_lock(&lli->lli_setattr_mutex);
747                 inode_dio_wait(inode);
748
749                 ll_merge_attr(env, inode);
750                 size = i_size_read(inode);
751                 if (io->u.ci_setattr.sa_falloc_end > size &&
752                     !(mode & FALLOC_FL_KEEP_SIZE)) {
753                         size = io->u.ci_setattr.sa_falloc_end;
754                         io->u.ci_setattr.sa_avalid |= ATTR_SIZE;
755                 }
756                 io->u.ci_setattr.sa_attr.lvb_size = size;
757         } else {
758                 mutex_lock(&lli->lli_setattr_mutex);
759         }
760
761         if (io->u.ci_setattr.sa_avalid & TIMES_SET_FLAGS)
762                 return vvp_io_setattr_time(env, ios);
763
764         return 0;
765 }
766
767 static void vvp_io_setattr_end(const struct lu_env *env,
768                                const struct cl_io_slice *ios)
769 {
770         struct cl_io *io = ios->cis_io;
771         struct inode *inode = vvp_object_inode(io->ci_obj);
772         struct ll_inode_info *lli = ll_i2info(inode);
773         loff_t size = io->u.ci_setattr.sa_attr.lvb_size;
774
775         if (cl_io_is_trunc(io)) {
776                 /* Truncate in memory pages - they must be clean pages
777                  * because osc has already notified to destroy osc_extents. */
778                 vvp_do_vmtruncate(inode, size);
779                 mutex_unlock(&lli->lli_setattr_mutex);
780                 trunc_sem_up_write(&lli->lli_trunc_sem);
781         } else if (cl_io_is_fallocate(io)) {
782                 int mode = io->u.ci_setattr.sa_falloc_mode;
783
784                 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
785                     size > i_size_read(inode)) {
786                         ll_inode_size_lock(inode);
787                         i_size_write(inode, size);
788                         ll_inode_size_unlock(inode);
789                 }
790                 inode->i_ctime = current_time(inode);
791                 mutex_unlock(&lli->lli_setattr_mutex);
792                 trunc_sem_up_write(&lli->lli_trunc_sem);
793         } else {
794                 mutex_unlock(&lli->lli_setattr_mutex);
795         }
796 }
797
798 static void vvp_io_setattr_fini(const struct lu_env *env,
799                                 const struct cl_io_slice *ios)
800 {
801         bool restore_needed = ios->cis_io->ci_restore_needed;
802         struct inode *inode = vvp_object_inode(ios->cis_obj);
803
804         vvp_io_fini(env, ios);
805
806         if (restore_needed && !ios->cis_io->ci_restore_needed) {
807                 /* restore finished, set data modified flag for HSM */
808                 set_bit(LLIF_DATA_MODIFIED, &ll_i2info(inode)->lli_flags);
809         }
810 }
811
812 static int vvp_io_read_start(const struct lu_env *env,
813                              const struct cl_io_slice *ios)
814 {
815         struct vvp_io *vio = cl2vvp_io(env, ios);
816         struct cl_io *io = ios->cis_io;
817         struct cl_object *obj = io->ci_obj;
818         struct inode *inode = vvp_object_inode(obj);
819         struct ll_inode_info *lli = ll_i2info(inode);
820         struct file *file = vio->vui_fd->fd_file;
821         loff_t pos = io->u.ci_rd.rd.crw_pos;
822         size_t cnt = io->u.ci_rd.rd.crw_count;
823         size_t tot = vio->vui_tot_count;
824         int exceed = 0;
825         int result;
826         struct iov_iter iter;
827         pgoff_t page_offset;
828
829         ENTRY;
830
831         CLOBINVRNT(env, obj, vvp_object_invariant(obj));
832
833         CDEBUG(D_VFSTRACE, "%s: read [%llu, %llu)\n",
834                 file_dentry(file)->d_name.name,
835                 pos, pos + cnt);
836
837         trunc_sem_down_read(&lli->lli_trunc_sem);
838
839         if (io->ci_async_readahead) {
840                 file_accessed(file);
841                 RETURN(0);
842         }
843
844         if (!can_populate_pages(env, io, inode))
845                 RETURN(0);
846
847         if (!(file->f_flags & O_DIRECT)) {
848                 result = cl_io_lru_reserve(env, io, pos, cnt);
849                 if (result)
850                         RETURN(result);
851         }
852
853         /* Unless this is reading a sparse file, otherwise the lock has already
854          * been acquired so vvp_prep_size() is an empty op. */
855         result = vvp_prep_size(env, obj, io, pos, cnt, &exceed);
856         if (result != 0)
857                 RETURN(result);
858         else if (exceed != 0)
859                 GOTO(out, result);
860
861         LU_OBJECT_HEADER(D_INODE, env, &obj->co_lu,
862                          "Read ino %lu, %zu bytes, offset %lld, size %llu\n",
863                          inode->i_ino, cnt, pos, i_size_read(inode));
864
865         /* initialize read-ahead window once per syscall */
866         if (!vio->vui_ra_valid) {
867                 vio->vui_ra_valid = true;
868                 vio->vui_ra_start_idx = cl_index(obj, pos);
869                 vio->vui_ra_pages = 0;
870                 page_offset = pos & ~PAGE_MASK;
871                 if (page_offset) {
872                         vio->vui_ra_pages++;
873                         if (tot > PAGE_SIZE - page_offset)
874                                 tot -= (PAGE_SIZE - page_offset);
875                         else
876                                 tot = 0;
877                 }
878                 vio->vui_ra_pages += (tot + PAGE_SIZE - 1) >> PAGE_SHIFT;
879
880                 CDEBUG(D_READA, "tot %zu, ra_start %lu, ra_count %lu\n",
881                        vio->vui_tot_count, vio->vui_ra_start_idx,
882                        vio->vui_ra_pages);
883         }
884
885         /* BUG: 5972 */
886         file_accessed(file);
887         LASSERT(vio->vui_iocb->ki_pos == pos);
888         iter = *vio->vui_iter;
889         result = generic_file_read_iter(vio->vui_iocb, &iter);
890 out:
891         if (result >= 0) {
892                 if (result < cnt)
893                         io->ci_continue = 0;
894                 io->ci_nob += result;
895                 result = 0;
896         } else if (result == -EIOCBQUEUED) {
897                 io->ci_nob += vio->u.readwrite.vui_read;
898                 vio->vui_iocb->ki_pos = pos + vio->u.readwrite.vui_read;
899         }
900
901         return result;
902 }
903
904 static int vvp_io_commit_sync(const struct lu_env *env, struct cl_io *io,
905                               struct cl_page_list *plist, int from, int to)
906 {
907         struct cl_2queue *queue = &io->ci_queue;
908         struct cl_page *page;
909         unsigned int bytes = 0;
910         int rc = 0;
911         ENTRY;
912
913         if (plist->pl_nr == 0)
914                 RETURN(0);
915
916         if (from > 0 || to != PAGE_SIZE) {
917                 page = cl_page_list_first(plist);
918                 if (plist->pl_nr == 1) {
919                         cl_page_clip(env, page, from, to);
920                 } else {
921                         if (from > 0)
922                                 cl_page_clip(env, page, from, PAGE_SIZE);
923                         if (to != PAGE_SIZE) {
924                                 page = cl_page_list_last(plist);
925                                 cl_page_clip(env, page, 0, to);
926                         }
927                 }
928         }
929
930         cl_2queue_init(queue);
931         cl_page_list_splice(plist, &queue->c2_qin);
932         rc = cl_io_submit_sync(env, io, CRT_WRITE, queue, 0);
933
934         /* plist is not sorted any more */
935         cl_page_list_splice(&queue->c2_qin, plist);
936         cl_page_list_splice(&queue->c2_qout, plist);
937         cl_2queue_fini(env, queue);
938
939         if (rc == 0) {
940                 /* calculate bytes */
941                 bytes = plist->pl_nr << PAGE_SHIFT;
942                 bytes -= from + PAGE_SIZE - to;
943
944                 while (plist->pl_nr > 0) {
945                         page = cl_page_list_first(plist);
946                         cl_page_list_del(env, plist, page);
947
948                         cl_page_clip(env, page, 0, PAGE_SIZE);
949
950                         SetPageUptodate(cl_page_vmpage(page));
951                         cl_page_disown(env, io, page);
952
953                         /* held in ll_cl_init() */
954                         lu_ref_del(&page->cp_reference, "cl_io", io);
955                         cl_page_put(env, page);
956                 }
957         }
958
959         RETURN(bytes > 0 ? bytes : rc);
960 }
961
962 /*
963  * From kernel v4.19-rc5-248-g9b89a0355144 use XArrary
964  * Prior kernels use radix_tree for tags
965  */
966 static inline void ll_page_tag_dirty(struct page *page,
967                                      struct address_space *mapping)
968 {
969 #ifndef HAVE_RADIX_TREE_TAG_SET
970         __xa_set_mark(&mapping->i_pages, page_index(page), PAGECACHE_TAG_DIRTY);
971 #else
972         radix_tree_tag_set(&mapping->page_tree, page_index(page),
973                            PAGECACHE_TAG_DIRTY);
974 #endif
975 }
976
977 /*
978  * Kernels 4.2 - 4.5 pass memcg argument to account_page_dirtied()
979  * Kernel v5.2-5678-gac1c3e4 no longer exports account_page_dirtied
980  */
981 static inline void ll_account_page_dirtied(struct page *page,
982                                            struct address_space *mapping)
983 {
984 #ifdef HAVE_ACCOUNT_PAGE_DIRTIED_3ARGS
985         struct mem_cgroup *memcg = mem_cgroup_begin_page_stat(page);
986
987         account_page_dirtied(page, mapping, memcg);
988         mem_cgroup_end_page_stat(memcg);
989 #elif defined(HAVE_ACCOUNT_PAGE_DIRTIED_EXPORT)
990         account_page_dirtied(page, mapping);
991 #else
992         vvp_account_page_dirtied(page, mapping);
993 #endif
994         ll_page_tag_dirty(page, mapping);
995 }
996
997 /* Taken from kernel set_page_dirty, __set_page_dirty_nobuffers
998  * Last change to this area: b93b016313b3ba8003c3b8bb71f569af91f19fc7
999  *
1000  * Current with Linus tip of tree (7/13/2019):
1001  * v5.2-rc4-224-ge01e060fe0
1002  *
1003  * Backwards compat for 3.x, 5.x kernels relating to memcg handling
1004  * & rename of radix tree to xarray.
1005  */
1006 void vvp_set_pagevec_dirty(struct pagevec *pvec)
1007 {
1008         struct page *page = pvec->pages[0];
1009         int count = pagevec_count(pvec);
1010         int i;
1011 #ifdef HAVE_KALLSYMS_LOOKUP_NAME
1012         struct address_space *mapping = page->mapping;
1013         unsigned long flags;
1014         unsigned long skip_pages = 0;
1015         int dirtied = 0;
1016 #endif
1017
1018         ENTRY;
1019
1020         BUILD_BUG_ON(PAGEVEC_SIZE > BITS_PER_LONG);
1021         LASSERTF(page->mapping,
1022                  "mapping must be set. page %p, page->private (cl_page) %p\n",
1023                  page, (void *) page->private);
1024
1025         /*
1026          * kernels without HAVE_KALLSYMS_LOOKUP_NAME also don't have
1027          * account_dirty_page exported, and if we can't access that symbol,
1028          * we can't do page dirtying in batch (taking the xarray lock only once)
1029          * so we just fall back to a looped call to __set_page_dirty_nobuffers
1030          */
1031 #ifndef HAVE_ACCOUNT_PAGE_DIRTIED_EXPORT
1032         if (!vvp_account_page_dirtied) {
1033                 for (i = 0; i < count; i++)
1034                         __set_page_dirty_nobuffers(pvec->pages[i]);
1035                 EXIT;
1036         }
1037 #endif
1038
1039 #ifdef HAVE_KALLSYMS_LOOKUP_NAME
1040         for (i = 0; i < count; i++) {
1041                 page = pvec->pages[i];
1042
1043                 ClearPageReclaim(page);
1044
1045                 vvp_lock_page_memcg(page);
1046                 if (TestSetPageDirty(page)) {
1047                         /* page is already dirty .. no extra work needed
1048                          * set a flag for the i'th page to be skipped
1049                          */
1050                         vvp_unlock_page_memcg(page);
1051                         skip_pages |= (1 << i);
1052                 }
1053         }
1054
1055         ll_xa_lock_irqsave(&mapping->i_pages, flags);
1056
1057         /* Notes on differences with __set_page_dirty_nobuffers:
1058          * 1. We don't need to call page_mapping because we know this is a page
1059          * cache page.
1060          * 2. We have the pages locked, so there is no need for the careful
1061          * mapping/mapping2 dance.
1062          * 3. No mapping is impossible. (Race w/truncate mentioned in
1063          * dirty_nobuffers should be impossible because we hold the page lock.)
1064          * 4. All mappings are the same because i/o is only to one file.
1065          */
1066         for (i = 0; i < count; i++) {
1067                 page = pvec->pages[i];
1068                 /* if the i'th page was unlocked above, skip it here */
1069                 if ((skip_pages >> i) & 1)
1070                         continue;
1071
1072                 LASSERTF(page->mapping == mapping,
1073                          "all pages must have the same mapping.  page %p, mapping %p, first mapping %p\n",
1074                          page, page->mapping, mapping);
1075                 WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
1076                 ll_account_page_dirtied(page, mapping);
1077                 dirtied++;
1078                 vvp_unlock_page_memcg(page);
1079         }
1080         ll_xa_unlock_irqrestore(&mapping->i_pages, flags);
1081
1082         CDEBUG(D_VFSTRACE, "mapping %p, count %d, dirtied %d\n", mapping,
1083                count, dirtied);
1084
1085         if (mapping->host && dirtied) {
1086                 /* !PageAnon && !swapper_space */
1087                 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1088         }
1089 #endif
1090         EXIT;
1091 }
1092
1093 static void write_commit_callback(const struct lu_env *env, struct cl_io *io,
1094                                   struct pagevec *pvec)
1095 {
1096         int count = 0;
1097         int i = 0;
1098
1099         ENTRY;
1100
1101         count = pagevec_count(pvec);
1102         LASSERT(count > 0);
1103
1104         for (i = 0; i < count; i++) {
1105                 struct page *vmpage = pvec->pages[i];
1106                 SetPageUptodate(vmpage);
1107         }
1108
1109         vvp_set_pagevec_dirty(pvec);
1110
1111         for (i = 0; i < count; i++) {
1112                 struct page *vmpage = pvec->pages[i];
1113                 struct cl_page *page = (struct cl_page *) vmpage->private;
1114                 cl_page_disown(env, io, page);
1115                 lu_ref_del(&page->cp_reference, "cl_io", cl_io_top(io));
1116                 cl_page_put(env, page);
1117         }
1118
1119         EXIT;
1120 }
1121
1122 /* make sure the page list is contiguous */
1123 static bool page_list_sanity_check(struct cl_object *obj,
1124                                    struct cl_page_list *plist)
1125 {
1126         struct cl_page *page;
1127         pgoff_t index = CL_PAGE_EOF;
1128
1129         cl_page_list_for_each(page, plist) {
1130                 if (index == CL_PAGE_EOF) {
1131                         index = cl_page_index(page);
1132                         continue;
1133                 }
1134
1135                 ++index;
1136                 if (index == cl_page_index(page))
1137                         continue;
1138
1139                 return false;
1140         }
1141         return true;
1142 }
1143
1144 /* Return how many bytes have queued or written */
1145 int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io)
1146 {
1147         struct cl_object *obj = io->ci_obj;
1148         struct inode *inode = vvp_object_inode(obj);
1149         struct vvp_io *vio = vvp_env_io(env);
1150         struct cl_page_list *queue = &vio->u.readwrite.vui_queue;
1151         struct cl_page *page;
1152         int rc = 0;
1153         int bytes = 0;
1154         unsigned int npages = vio->u.readwrite.vui_queue.pl_nr;
1155         ENTRY;
1156
1157         if (npages == 0)
1158                 RETURN(0);
1159
1160         CDEBUG(D_VFSTRACE, "commit async pages: %d, from %d, to %d\n",
1161                 npages, vio->u.readwrite.vui_from, vio->u.readwrite.vui_to);
1162
1163         LASSERT(page_list_sanity_check(obj, queue));
1164
1165         /* submit IO with async write */
1166         rc = cl_io_commit_async(env, io, queue,
1167                                 vio->u.readwrite.vui_from,
1168                                 vio->u.readwrite.vui_to,
1169                                 write_commit_callback);
1170         npages -= queue->pl_nr; /* already committed pages */
1171         if (npages > 0) {
1172                 /* calculate how many bytes were written */
1173                 bytes = npages << PAGE_SHIFT;
1174
1175                 /* first page */
1176                 bytes -= vio->u.readwrite.vui_from;
1177                 if (queue->pl_nr == 0) /* last page */
1178                         bytes -= PAGE_SIZE - vio->u.readwrite.vui_to;
1179                 LASSERTF(bytes > 0, "bytes = %d, pages = %d\n", bytes, npages);
1180
1181                 vio->u.readwrite.vui_written += bytes;
1182
1183                 CDEBUG(D_VFSTRACE, "Committed %d pages %d bytes, tot: %ld\n",
1184                         npages, bytes, vio->u.readwrite.vui_written);
1185
1186                 /* the first page must have been written. */
1187                 vio->u.readwrite.vui_from = 0;
1188         }
1189         LASSERT(page_list_sanity_check(obj, queue));
1190         LASSERT(ergo(rc == 0, queue->pl_nr == 0));
1191
1192         /* out of quota, try sync write */
1193         if (rc == -EDQUOT && !cl_io_is_mkwrite(io)) {
1194                 struct ll_inode_info *lli = ll_i2info(inode);
1195
1196                 rc = vvp_io_commit_sync(env, io, queue,
1197                                         vio->u.readwrite.vui_from,
1198                                         vio->u.readwrite.vui_to);
1199                 if (rc > 0) {
1200                         vio->u.readwrite.vui_written += rc;
1201                         rc = 0;
1202                 }
1203                 if (lli->lli_clob != NULL)
1204                         lov_read_and_clear_async_rc(lli->lli_clob);
1205                 lli->lli_async_rc = 0;
1206         }
1207
1208         /* update inode size */
1209         ll_merge_attr(env, inode);
1210
1211         /* Now the pages in queue were failed to commit, discard them
1212          * unless they were dirtied before. */
1213         while (queue->pl_nr > 0) {
1214                 page = cl_page_list_first(queue);
1215                 cl_page_list_del(env, queue, page);
1216
1217                 if (!PageDirty(cl_page_vmpage(page)))
1218                         cl_page_discard(env, io, page);
1219
1220                 cl_page_disown(env, io, page);
1221
1222                 /* held in ll_cl_init() */
1223                 lu_ref_del(&page->cp_reference, "cl_io", io);
1224                 cl_page_put(env, page);
1225         }
1226         cl_page_list_fini(env, queue);
1227
1228         RETURN(rc);
1229 }
1230
1231 static int vvp_io_write_start(const struct lu_env *env,
1232                               const struct cl_io_slice *ios)
1233 {
1234         struct vvp_io           *vio   = cl2vvp_io(env, ios);
1235         struct cl_io            *io    = ios->cis_io;
1236         struct cl_object        *obj   = io->ci_obj;
1237         struct inode            *inode = vvp_object_inode(obj);
1238         struct ll_inode_info    *lli   = ll_i2info(inode);
1239         struct file             *file  = vio->vui_fd->fd_file;
1240         ssize_t                  result = 0;
1241         loff_t                   pos = io->u.ci_wr.wr.crw_pos;
1242         size_t                   cnt = io->u.ci_wr.wr.crw_count;
1243         bool                     lock_inode = !IS_NOSEC(inode);
1244         size_t nob = io->ci_nob;
1245         struct iov_iter iter;
1246         size_t written = 0;
1247
1248         ENTRY;
1249
1250         trunc_sem_down_read(&lli->lli_trunc_sem);
1251
1252         if (!can_populate_pages(env, io, inode))
1253                 RETURN(0);
1254
1255         if (cl_io_is_append(io)) {
1256                 /*
1257                  * PARALLEL IO This has to be changed for parallel IO doing
1258                  * out-of-order writes.
1259                  */
1260                 ll_merge_attr(env, inode);
1261                 pos = io->u.ci_wr.wr.crw_pos = i_size_read(inode);
1262                 vio->vui_iocb->ki_pos = pos;
1263         } else {
1264                 LASSERTF(vio->vui_iocb->ki_pos == pos,
1265                          "ki_pos %lld [%lld, %lld)\n",
1266                          vio->vui_iocb->ki_pos,
1267                          pos, pos + cnt);
1268         }
1269
1270         CDEBUG(D_VFSTRACE, "%s: write [%llu, %llu)\n",
1271                 file_dentry(file)->d_name.name,
1272                 pos, pos + cnt);
1273
1274         /* The maximum Lustre file size is variable, based on the OST maximum
1275          * object size and number of stripes.  This needs another check in
1276          * addition to the VFS checks earlier. */
1277         if (pos + cnt > ll_file_maxbytes(inode)) {
1278                 CDEBUG(D_INODE,
1279                        "%s: file %s ("DFID") offset %llu > maxbytes %llu\n",
1280                        ll_i2sbi(inode)->ll_fsname,
1281                        file_dentry(file)->d_name.name,
1282                        PFID(ll_inode2fid(inode)), pos + cnt,
1283                        ll_file_maxbytes(inode));
1284                 RETURN(-EFBIG);
1285         }
1286
1287         /* Tests to verify we take the i_mutex correctly */
1288         if (OBD_FAIL_CHECK(OBD_FAIL_LLITE_IMUTEX_SEC) && !lock_inode)
1289                 RETURN(-EINVAL);
1290
1291         if (OBD_FAIL_CHECK(OBD_FAIL_LLITE_IMUTEX_NOSEC) && lock_inode)
1292                 RETURN(-EINVAL);
1293
1294         if (!(file->f_flags & O_DIRECT)) {
1295                 result = cl_io_lru_reserve(env, io, pos, cnt);
1296                 if (result)
1297                         RETURN(result);
1298         }
1299
1300         if (vio->vui_iter == NULL) {
1301                 /* from a temp io in ll_cl_init(). */
1302                 result = 0;
1303         } else {
1304                 /*
1305                  * When using the locked AIO function (generic_file_aio_write())
1306                  * testing has shown the inode mutex to be a limiting factor
1307                  * with multi-threaded single shared file performance. To get
1308                  * around this, we now use the lockless version. To maintain
1309                  * consistency, proper locking to protect against writes,
1310                  * trucates, etc. is handled in the higher layers of lustre.
1311                  */
1312                 lock_inode = !IS_NOSEC(inode);
1313                 iter = *vio->vui_iter;
1314
1315                 if (unlikely(lock_inode))
1316                         inode_lock(inode);
1317                 result = __generic_file_write_iter(vio->vui_iocb, &iter);
1318                 if (unlikely(lock_inode))
1319                         inode_unlock(inode);
1320
1321                 written = result;
1322                 if (result > 0)
1323 #ifdef HAVE_GENERIC_WRITE_SYNC_2ARGS
1324                         result = generic_write_sync(vio->vui_iocb, result);
1325 #else
1326                 {
1327                         ssize_t err;
1328
1329                         err = generic_write_sync(vio->vui_iocb->ki_filp, pos,
1330                                                  result);
1331                         if (err < 0 && result > 0)
1332                                 result = err;
1333                 }
1334 #endif
1335         }
1336
1337         if (result > 0) {
1338                 result = vvp_io_write_commit(env, io);
1339                 /* Simulate short commit */
1340                 if (CFS_FAULT_CHECK(OBD_FAIL_LLITE_SHORT_COMMIT)) {
1341                         vio->u.readwrite.vui_written >>= 1;
1342                         if (vio->u.readwrite.vui_written > 0)
1343                                 io->ci_need_restart = 1;
1344                 }
1345                 if (vio->u.readwrite.vui_written > 0) {
1346                         result = vio->u.readwrite.vui_written;
1347                         CDEBUG(D_VFSTRACE, "%s: write nob %zd, result: %zd\n",
1348                                 file_dentry(file)->d_name.name,
1349                                 io->ci_nob, result);
1350                         io->ci_nob += result;
1351                 } else {
1352                         io->ci_continue = 0;
1353                 }
1354         }
1355         if (vio->vui_iocb->ki_pos != (pos + io->ci_nob - nob)) {
1356                 CDEBUG(D_VFSTRACE,
1357                        "%s: write position mismatch: ki_pos %lld vs. pos %lld, written %zd, commit %zd: rc = %zd\n",
1358                        file_dentry(file)->d_name.name,
1359                        vio->vui_iocb->ki_pos, pos + io->ci_nob - nob,
1360                        written, io->ci_nob - nob, result);
1361                 /*
1362                  * Rewind ki_pos and vui_iter to where it has
1363                  * successfully committed.
1364                  */
1365                 vio->vui_iocb->ki_pos = pos + io->ci_nob - nob;
1366         }
1367         if (result > 0 || result == -EIOCBQUEUED) {
1368                 set_bit(LLIF_DATA_MODIFIED, &ll_i2info(inode)->lli_flags);
1369
1370                 if (result != -EIOCBQUEUED && result < cnt)
1371                         io->ci_continue = 0;
1372                 if (result > 0)
1373                         result = 0;
1374                 /* move forward */
1375                 if (result == -EIOCBQUEUED) {
1376                         io->ci_nob += vio->u.readwrite.vui_written;
1377                         vio->vui_iocb->ki_pos = pos +
1378                                         vio->u.readwrite.vui_written;
1379                 }
1380         }
1381
1382         RETURN(result);
1383 }
1384
1385 static void vvp_io_rw_end(const struct lu_env *env,
1386                           const struct cl_io_slice *ios)
1387 {
1388         struct inode            *inode = vvp_object_inode(ios->cis_obj);
1389         struct ll_inode_info    *lli = ll_i2info(inode);
1390
1391         trunc_sem_up_read(&lli->lli_trunc_sem);
1392 }
1393
1394 static void detach_and_deref_page(struct cl_page *clp, struct page *vmpage)
1395 {
1396         if (!clp->cp_defer_detach)
1397                 return;
1398
1399         /**
1400          * cl_page_delete0() took a vmpage reference, but not unlink the vmpage
1401          * from its cl_page.
1402          */
1403         clp->cp_defer_detach = 0;
1404         ClearPagePrivate(vmpage);
1405         vmpage->private = 0;
1406
1407         put_page(vmpage);
1408         refcount_dec(&clp->cp_ref);
1409 }
1410
1411 static int vvp_io_kernel_fault(const struct lu_env *env,
1412                                struct vvp_fault_io *cfio)
1413 {
1414         struct vm_fault *vmf = cfio->ft_vmf;
1415         struct file *vmff = cfio->ft_vma->vm_file;
1416         struct address_space *mapping = vmff->f_mapping;
1417         struct inode *inode = mapping->host;
1418         struct page *vmpage = NULL;
1419         struct cl_page *clp = NULL;
1420         int rc = 0;
1421         ENTRY;
1422
1423         ll_inode_size_lock(inode);
1424 retry:
1425         cfio->ft_flags = ll_filemap_fault(cfio->ft_vma, vmf);
1426         cfio->ft_flags_valid = 1;
1427
1428         if (vmf->page) {
1429                 /* success, vmpage is locked */
1430                 LL_CDEBUG_PAGE(D_PAGE, vmf->page, "got addr %p type NOPAGE\n",
1431                                get_vmf_address(vmf));
1432                 if (unlikely(!(cfio->ft_flags & VM_FAULT_LOCKED))) {
1433                         lock_page(vmf->page);
1434                         cfio->ft_flags |= VM_FAULT_LOCKED;
1435                 }
1436
1437                 cfio->ft_vmpage = vmf->page;
1438
1439                 /**
1440                  * ll_filemap_fault()->ll_readpage() could get an extra cl_page
1441                  * reference. So we have to get the cl_page's to check its
1442                  * cp_fault_ref and drop the reference later.
1443                  */
1444                 clp = cl_vmpage_page(vmf->page, NULL);
1445
1446                 GOTO(unlock, rc = 0);
1447         }
1448
1449         /* filemap_fault() fails, vmpage is not locked */
1450         if (clp == NULL) {
1451                 vmpage = find_get_page(mapping, vmf->pgoff);
1452                 if (vmpage) {
1453                         lock_page(vmpage);
1454                         clp = cl_vmpage_page(vmpage, NULL);
1455                         unlock_page(vmpage);
1456                 }
1457         }
1458
1459         if (cfio->ft_flags & VM_FAULT_SIGBUS) {
1460                 pgoff_t max_idx;
1461
1462                 /**
1463                  * ll_filemap_fault()->ll_readpage() could fill vmpage
1464                  * correctly, and unlock the vmpage, while memory pressure or
1465                  * truncate could detach cl_page from vmpage, and kernel
1466                  * filemap_fault() will wait_on_page_locked(vmpage) and find
1467                  * out that the vmpage has been cleared its uptodate bit,
1468                  * so it returns VM_FAULT_SIGBUS.
1469                  *
1470                  * In this case, we'd retry the filemap_fault()->ll_readpage()
1471                  * to rebuild the cl_page and fill vmpage with uptodated data.
1472                  */
1473                 if (likely(vmpage)) {
1474                         bool need_retry = false;
1475
1476                         if (clp) {
1477                                 if (clp->cp_defer_detach) {
1478                                         detach_and_deref_page(clp, vmpage);
1479                                         /**
1480                                          * check i_size to make sure it's not
1481                                          * over EOF, we don't want to call
1482                                          * filemap_fault() repeatedly since it
1483                                          * returns VM_FAULT_SIGBUS without even
1484                                          * trying if vmf->pgoff is over EOF.
1485                                          */
1486                                         max_idx = DIV_ROUND_UP(
1487                                                 i_size_read(inode), PAGE_SIZE);
1488                                         if (vmf->pgoff < max_idx)
1489                                                 need_retry = true;
1490                                 }
1491                                 if (clp->cp_fault_ref) {
1492                                         clp->cp_fault_ref = 0;
1493                                         /* ref not released in ll_readpage() */
1494                                         cl_page_put(env, clp);
1495                                 }
1496                                 if (need_retry)
1497                                         goto retry;
1498                         }
1499                 }
1500
1501                 CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", get_vmf_address(vmf));
1502                 GOTO(unlock, rc = -EFAULT);
1503         }
1504
1505         if (cfio->ft_flags & VM_FAULT_OOM) {
1506                 CDEBUG(D_PAGE, "got addr %p - OOM\n", get_vmf_address(vmf));
1507                 GOTO(unlock, rc = -ENOMEM);
1508         }
1509
1510         if (cfio->ft_flags & VM_FAULT_RETRY)
1511                 GOTO(unlock, rc = -EAGAIN);
1512
1513         CERROR("unknown error in page fault %d\n", cfio->ft_flags);
1514         rc = -EINVAL;
1515 unlock:
1516         ll_inode_size_unlock(inode);
1517         if (clp) {
1518                 if (clp->cp_defer_detach && vmpage)
1519                         detach_and_deref_page(clp, vmpage);
1520
1521                 /* additional cl_page ref has been taken in ll_readpage() */
1522                 if (clp->cp_fault_ref) {
1523                         clp->cp_fault_ref = 0;
1524                         /* ref not released in ll_readpage() */
1525                         cl_page_put(env, clp);
1526                 }
1527                 /* ref taken in this function */
1528                 cl_page_put(env, clp);
1529         }
1530         if (vmpage)
1531                 put_page(vmpage);
1532         return rc;
1533 }
1534
1535 static void mkwrite_commit_callback(const struct lu_env *env, struct cl_io *io,
1536                                     struct pagevec *pvec)
1537 {
1538         vvp_set_pagevec_dirty(pvec);
1539 }
1540
1541 static int vvp_io_fault_start(const struct lu_env *env,
1542                               const struct cl_io_slice *ios)
1543 {
1544         struct vvp_io           *vio   = cl2vvp_io(env, ios);
1545         struct cl_io            *io    = ios->cis_io;
1546         struct cl_object        *obj   = io->ci_obj;
1547         struct inode            *inode = vvp_object_inode(obj);
1548         struct ll_inode_info    *lli   = ll_i2info(inode);
1549         struct cl_fault_io      *fio   = &io->u.ci_fault;
1550         struct vvp_fault_io     *cfio  = &vio->u.fault;
1551         loff_t                   offset;
1552         int                      result = 0;
1553         struct page             *vmpage = NULL;
1554         struct cl_page          *page;
1555         loff_t                   size;
1556         pgoff_t                  last_index;
1557         ENTRY;
1558
1559         trunc_sem_down_read_nowait(&lli->lli_trunc_sem);
1560
1561         /* offset of the last byte on the page */
1562         offset = cl_offset(obj, fio->ft_index + 1) - 1;
1563         LASSERT(cl_index(obj, offset) == fio->ft_index);
1564         result = vvp_prep_size(env, obj, io, 0, offset + 1, NULL);
1565         if (result != 0)
1566                 RETURN(result);
1567
1568         /* must return locked page */
1569         if (fio->ft_mkwrite) {
1570                 LASSERT(cfio->ft_vmpage != NULL);
1571                 lock_page(cfio->ft_vmpage);
1572         } else {
1573                 result = vvp_io_kernel_fault(env, cfio);
1574                 if (result != 0)
1575                         RETURN(result);
1576         }
1577
1578         vmpage = cfio->ft_vmpage;
1579         LASSERT(PageLocked(vmpage));
1580
1581         if (OBD_FAIL_CHECK(OBD_FAIL_LLITE_FAULT_TRUNC_RACE))
1582                 generic_error_remove_page(vmpage->mapping, vmpage);
1583
1584         size = i_size_read(inode);
1585         /* Though we have already held a cl_lock upon this page, but
1586          * it still can be truncated locally. */
1587         if (unlikely((vmpage->mapping != inode->i_mapping) ||
1588                      (page_offset(vmpage) > size))) {
1589                 CDEBUG(D_PAGE, "llite: fault and truncate race happened!\n");
1590
1591                 /* return +1 to stop cl_io_loop() and ll_fault() will catch
1592                  * and retry. */
1593                 GOTO(out, result = +1);
1594         }
1595
1596         last_index = cl_index(obj, size - 1);
1597
1598         if (fio->ft_mkwrite ) {
1599                 /*
1600                  * Capture the size while holding the lli_trunc_sem from above
1601                  * we want to make sure that we complete the mkwrite action
1602                  * while holding this lock. We need to make sure that we are
1603                  * not past the end of the file.
1604                  */
1605                 if (last_index < fio->ft_index) {
1606                         CDEBUG(D_PAGE,
1607                                 "llite: mkwrite and truncate race happened: "
1608                                 "%p: 0x%lx 0x%lx\n",
1609                                 vmpage->mapping,fio->ft_index,last_index);
1610                         /*
1611                          * We need to return if we are
1612                          * passed the end of the file. This will propagate
1613                          * up the call stack to ll_page_mkwrite where
1614                          * we will return VM_FAULT_NOPAGE. Any non-negative
1615                          * value returned here will be silently
1616                          * converted to 0. If the vmpage->mapping is null
1617                          * the error code would be converted back to ENODATA
1618                          * in ll_page_mkwrite0. Thus we return -ENODATA
1619                          * to handle both cases
1620                          */
1621                         GOTO(out, result = -ENODATA);
1622                 }
1623         }
1624
1625         page = cl_page_find(env, obj, fio->ft_index, vmpage, CPT_CACHEABLE);
1626         if (IS_ERR(page))
1627                 GOTO(out, result = PTR_ERR(page));
1628
1629         /* if page is going to be written, we should add this page into cache
1630          * earlier. */
1631         if (fio->ft_mkwrite) {
1632                 wait_on_page_writeback(vmpage);
1633                 if (!PageDirty(vmpage)) {
1634                         struct cl_page_list *plist = &vio->u.fault.ft_queue;
1635                         int to = PAGE_SIZE;
1636
1637                         /* vvp_page_assume() calls wait_on_page_writeback(). */
1638                         cl_page_assume(env, io, page);
1639
1640                         cl_page_list_init(plist);
1641                         cl_page_list_add(plist, page, true);
1642
1643                         /* size fixup */
1644                         if (last_index == cl_page_index(page))
1645                                 to = ((size - 1) & ~PAGE_MASK) + 1;
1646
1647                         /* Do not set Dirty bit here so that in case IO is
1648                          * started before the page is really made dirty, we
1649                          * still have chance to detect it. */
1650                         result = cl_io_commit_async(env, io, plist, 0, to,
1651                                                     mkwrite_commit_callback);
1652                         /* Have overquota flag, trying sync write to check
1653                          * whether indeed out of quota */
1654                         if (result == -EDQUOT) {
1655                                 cl_page_get(page);
1656                                 result = vvp_io_commit_sync(env, io,
1657                                                             plist, 0, to);
1658                                 if (result >= 0) {
1659                                         io->ci_noquota = 1;
1660                                         cl_page_own(env, io, page);
1661                                         cl_page_list_add(plist, page, true);
1662                                         lu_ref_add(&page->cp_reference,
1663                                                    "cl_io", io);
1664                                         result = cl_io_commit_async(env, io,
1665                                                 plist, 0, to,
1666                                                 mkwrite_commit_callback);
1667                                         io->ci_noquota = 0;
1668                                 } else {
1669                                         cl_page_put(env, page);
1670                                 }
1671                         }
1672
1673                         LASSERT(cl_page_is_owned(page, io));
1674                         cl_page_list_fini(env, plist);
1675
1676                         vmpage = NULL;
1677                         if (result < 0) {
1678                                 cl_page_discard(env, io, page);
1679                                 cl_page_disown(env, io, page);
1680
1681                                 cl_page_put(env, page);
1682
1683                                 /* we're in big trouble, what can we do now? */
1684                                 if (result == -EDQUOT)
1685                                         result = -ENOSPC;
1686                                 GOTO(out, result);
1687                         } else {
1688                                 cl_page_disown(env, io, page);
1689                         }
1690                 }
1691         }
1692
1693         /*
1694          * The ft_index is only used in the case of
1695          * a mkwrite action. We need to check
1696          * our assertions are correct, since
1697          * we should have caught this above
1698          */
1699         LASSERT(!fio->ft_mkwrite || fio->ft_index <= last_index);
1700         if (fio->ft_index == last_index)
1701                 /*
1702                  * Last page is mapped partially.
1703                  */
1704                 fio->ft_nob = size - cl_offset(obj, fio->ft_index);
1705         else
1706                 fio->ft_nob = cl_page_size(obj);
1707
1708         lu_ref_add(&page->cp_reference, "fault", io);
1709         fio->ft_page = page;
1710         EXIT;
1711
1712 out:
1713         /* return unlocked vmpage to avoid deadlocking */
1714         if (vmpage != NULL)
1715                 unlock_page(vmpage);
1716
1717         cfio->ft_flags &= ~VM_FAULT_LOCKED;
1718
1719         return result;
1720 }
1721
1722 static void vvp_io_fault_end(const struct lu_env *env,
1723                              const struct cl_io_slice *ios)
1724 {
1725         struct inode            *inode = vvp_object_inode(ios->cis_obj);
1726         struct ll_inode_info    *lli   = ll_i2info(inode);
1727
1728         CLOBINVRNT(env, ios->cis_io->ci_obj,
1729                    vvp_object_invariant(ios->cis_io->ci_obj));
1730         trunc_sem_up_read(&lli->lli_trunc_sem);
1731 }
1732
1733 static int vvp_io_fsync_start(const struct lu_env *env,
1734                               const struct cl_io_slice *ios)
1735 {
1736         /* we should mark TOWRITE bit to each dirty page in radix tree to
1737          * verify pages have been written, but this is difficult because of
1738          * race. */
1739         return 0;
1740 }
1741
1742 static int vvp_io_read_ahead(const struct lu_env *env,
1743                              const struct cl_io_slice *ios,
1744                              pgoff_t start, struct cl_read_ahead *ra)
1745 {
1746         int result = 0;
1747         ENTRY;
1748
1749         if (ios->cis_io->ci_type == CIT_READ ||
1750             ios->cis_io->ci_type == CIT_FAULT) {
1751                 struct vvp_io *vio = cl2vvp_io(env, ios);
1752
1753                 if (unlikely(vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
1754                         ra->cra_end_idx = CL_PAGE_EOF;
1755                         result = +1; /* no need to call down */
1756                 }
1757         }
1758
1759         RETURN(result);
1760 }
1761
1762 static int vvp_io_lseek_lock(const struct lu_env *env,
1763                              const struct cl_io_slice *ios)
1764 {
1765         struct cl_io *io = ios->cis_io;
1766         __u64 lock_start = io->u.ci_lseek.ls_start;
1767         __u64 lock_end = OBD_OBJECT_EOF;
1768         __u32 enqflags = CEF_MUST; /* always take client lock */
1769
1770         return vvp_io_one_lock(env, io, enqflags, CLM_READ,
1771                                lock_start, lock_end);
1772 }
1773
1774 static int vvp_io_lseek_start(const struct lu_env *env,
1775                               const struct cl_io_slice *ios)
1776 {
1777         struct cl_io *io = ios->cis_io;
1778         struct inode *inode = vvp_object_inode(io->ci_obj);
1779         __u64 start = io->u.ci_lseek.ls_start;
1780
1781         inode_lock(inode);
1782         inode_dio_wait(inode);
1783
1784         /* At the moment we have DLM lock so just update inode
1785          * to know the file size.
1786          */
1787         ll_merge_attr(env, inode);
1788         if (start >= i_size_read(inode)) {
1789                 io->u.ci_lseek.ls_result = -ENXIO;
1790                 return -ENXIO;
1791         }
1792         return 0;
1793 }
1794
1795 static void vvp_io_lseek_end(const struct lu_env *env,
1796                              const struct cl_io_slice *ios)
1797 {
1798         struct cl_io *io = ios->cis_io;
1799         struct inode *inode = vvp_object_inode(io->ci_obj);
1800
1801         if (io->u.ci_lseek.ls_result > i_size_read(inode))
1802                 io->u.ci_lseek.ls_result = -ENXIO;
1803
1804         inode_unlock(inode);
1805 }
1806
1807 static const struct cl_io_operations vvp_io_ops = {
1808         .op = {
1809                 [CIT_READ] = {
1810                         .cio_fini       = vvp_io_fini,
1811                         .cio_iter_init = vvp_io_read_iter_init,
1812                         .cio_lock       = vvp_io_read_lock,
1813                         .cio_start      = vvp_io_read_start,
1814                         .cio_end        = vvp_io_rw_end,
1815                         .cio_advance    = vvp_io_advance,
1816                 },
1817                 [CIT_WRITE] = {
1818                         .cio_fini      = vvp_io_fini,
1819                         .cio_iter_init = vvp_io_write_iter_init,
1820                         .cio_iter_fini = vvp_io_write_iter_fini,
1821                         .cio_lock      = vvp_io_write_lock,
1822                         .cio_start     = vvp_io_write_start,
1823                         .cio_end       = vvp_io_rw_end,
1824                         .cio_advance   = vvp_io_advance,
1825                 },
1826                 [CIT_SETATTR] = {
1827                         .cio_fini       = vvp_io_setattr_fini,
1828                         .cio_iter_init  = vvp_io_setattr_iter_init,
1829                         .cio_lock       = vvp_io_setattr_lock,
1830                         .cio_start      = vvp_io_setattr_start,
1831                         .cio_end        = vvp_io_setattr_end
1832                 },
1833                 [CIT_FAULT] = {
1834                         .cio_fini      = vvp_io_fault_fini,
1835                         .cio_iter_init = vvp_io_fault_iter_init,
1836                         .cio_lock      = vvp_io_fault_lock,
1837                         .cio_start     = vvp_io_fault_start,
1838                         .cio_end       = vvp_io_fault_end,
1839                 },
1840                 [CIT_FSYNC] = {
1841                         .cio_start      = vvp_io_fsync_start,
1842                         .cio_fini       = vvp_io_fini
1843                 },
1844                 [CIT_GLIMPSE] = {
1845                         .cio_fini       = vvp_io_fini
1846                 },
1847                 [CIT_MISC] = {
1848                         .cio_fini       = vvp_io_fini
1849                 },
1850                 [CIT_LADVISE] = {
1851                         .cio_fini       = vvp_io_fini
1852                 },
1853                 [CIT_LSEEK] = {
1854                         .cio_fini      = vvp_io_fini,
1855                         .cio_lock      = vvp_io_lseek_lock,
1856                         .cio_start     = vvp_io_lseek_start,
1857                         .cio_end       = vvp_io_lseek_end,
1858                 },
1859         },
1860         .cio_read_ahead = vvp_io_read_ahead
1861 };
1862
1863 int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
1864                 struct cl_io *io)
1865 {
1866         struct vvp_io      *vio   = vvp_env_io(env);
1867         struct inode       *inode = vvp_object_inode(obj);
1868         int                 result;
1869
1870         CLOBINVRNT(env, obj, vvp_object_invariant(obj));
1871         ENTRY;
1872
1873         CDEBUG(D_VFSTRACE, DFID" ignore/verify layout %d/%d, layout version %d "
1874                "restore needed %d\n",
1875                PFID(lu_object_fid(&obj->co_lu)),
1876                io->ci_ignore_layout, io->ci_verify_layout,
1877                vio->vui_layout_gen, io->ci_restore_needed);
1878
1879         CL_IO_SLICE_CLEAN(vio, vui_cl);
1880         cl_io_slice_add(io, &vio->vui_cl, obj, &vvp_io_ops);
1881         vio->vui_ra_valid = false;
1882         result = 0;
1883         if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE) {
1884                 size_t count;
1885                 struct ll_inode_info *lli = ll_i2info(inode);
1886
1887                 count = io->u.ci_rw.crw_count;
1888                 /* "If nbyte is 0, read() will return 0 and have no other
1889                  *  results."  -- Single Unix Spec */
1890                 if (count == 0)
1891                         result = 1;
1892                 else
1893                         vio->vui_tot_count = count;
1894
1895                 /* for read/write, we store the jobid in the inode, and
1896                  * it'll be fetched by osc when building RPC.
1897                  *
1898                  * it's not accurate if the file is shared by different
1899                  * jobs.
1900                  */
1901                 lustre_get_jobid(lli->lli_jobid, sizeof(lli->lli_jobid));
1902         } else if (io->ci_type == CIT_SETATTR) {
1903                 if (!cl_io_is_trunc(io))
1904                         io->ci_lockreq = CILR_MANDATORY;
1905         }
1906
1907         /* Enqueue layout lock and get layout version. We need to do this
1908          * even for operations requiring to open file, such as read and write,
1909          * because it might not grant layout lock in IT_OPEN. */
1910         if (result == 0 && !io->ci_ignore_layout) {
1911                 result = ll_layout_refresh(inode, &vio->vui_layout_gen);
1912                 if (result == -ENOENT)
1913                         /* If the inode on MDS has been removed, but the objects
1914                          * on OSTs haven't been destroyed (async unlink), layout
1915                          * fetch will return -ENOENT, we'd ingore this error
1916                          * and continue with dirty flush. LU-3230. */
1917                         result = 0;
1918                 if (result < 0)
1919                         CERROR("%s: refresh file layout " DFID " error %d.\n",
1920                                ll_i2sbi(inode)->ll_fsname,
1921                                PFID(lu_object_fid(&obj->co_lu)), result);
1922         }
1923
1924         io->ci_result = result < 0 ? result : 0;
1925         RETURN(result);
1926 }