Whamcloud - gitweb
LU-7734 lnet: set primary NID in ptlrpc_connection_get()
[fs/lustre-release.git] / lustre / llite / rw26.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2016, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lustre/lustre/llite/rw26.c
33  *
34  * Lustre Lite I/O page cache routines for the 2.5/2.6 kernel version
35  */
36
37 #include <linux/kernel.h>
38 #include <linux/mm.h>
39 #include <linux/string.h>
40 #include <linux/stat.h>
41 #include <linux/errno.h>
42 #include <linux/unistd.h>
43 #include <asm/uaccess.h>
44
45 #ifdef HAVE_MIGRATE_H
46 #include <linux/migrate.h>
47 #elif defined(HAVE_MIGRATE_MODE_H)
48 #include <linux/migrate_mode.h>
49 #endif
50 #include <linux/fs.h>
51 #include <linux/buffer_head.h>
52 #include <linux/mpage.h>
53 #include <linux/writeback.h>
54 #include <linux/stat.h>
55 #include <asm/uaccess.h>
56 #include <linux/mm.h>
57 #include <linux/pagemap.h>
58
59 #define DEBUG_SUBSYSTEM S_LLITE
60
61 #include "llite_internal.h"
62 #include <lustre_compat.h>
63
64 /**
65  * Implements Linux VM address_space::invalidatepage() method. This method is
66  * called when the page is truncate from a file, either as a result of
67  * explicit truncate, or when inode is removed from memory (as a result of
68  * final iput(), umount, or memory pressure induced icache shrinking).
69  *
70  * [0, offset] bytes of the page remain valid (this is for a case of not-page
71  * aligned truncate). Lustre leaves partially truncated page in the cache,
72  * relying on struct inode::i_size to limit further accesses.
73  */
74 static void ll_invalidatepage(struct page *vmpage,
75 #ifdef HAVE_INVALIDATE_RANGE
76                                 unsigned int offset, unsigned int length
77 #else
78                                 unsigned long offset
79 #endif
80                              )
81 {
82         struct inode     *inode;
83         struct lu_env    *env;
84         struct cl_page   *page;
85         struct cl_object *obj;
86
87         LASSERT(PageLocked(vmpage));
88         LASSERT(!PageWriteback(vmpage));
89
90         /*
91          * It is safe to not check anything in invalidatepage/releasepage
92          * below because they are run with page locked and all our io is
93          * happening with locked page too
94          */
95 #ifdef HAVE_INVALIDATE_RANGE
96         if (offset == 0 && length == PAGE_SIZE) {
97 #else
98         if (offset == 0) {
99 #endif
100                 /* See the comment in ll_releasepage() */
101                 env = cl_env_percpu_get();
102                 LASSERT(!IS_ERR(env));
103
104                 inode = vmpage->mapping->host;
105                 obj = ll_i2info(inode)->lli_clob;
106                 if (obj != NULL) {
107                         page = cl_vmpage_page(vmpage, obj);
108                         if (page != NULL) {
109                                 cl_page_delete(env, page);
110                                 cl_page_put(env, page);
111                         }
112                 } else
113                         LASSERT(vmpage->private == 0);
114
115                 cl_env_percpu_put(env);
116         }
117 }
118
119 #ifdef HAVE_RELEASEPAGE_WITH_INT
120 #define RELEASEPAGE_ARG_TYPE int
121 #else
122 #define RELEASEPAGE_ARG_TYPE gfp_t
123 #endif
124 static int ll_releasepage(struct page *vmpage, RELEASEPAGE_ARG_TYPE gfp_mask)
125 {
126         struct lu_env           *env;
127         struct cl_object        *obj;
128         struct cl_page          *page;
129         struct address_space    *mapping;
130         int result = 0;
131
132         LASSERT(PageLocked(vmpage));
133         if (PageWriteback(vmpage) || PageDirty(vmpage))
134                 return 0;
135
136         mapping = vmpage->mapping;
137         if (mapping == NULL)
138                 return 1;
139
140         obj = ll_i2info(mapping->host)->lli_clob;
141         if (obj == NULL)
142                 return 1;
143
144         /* 1 for caller, 1 for cl_page and 1 for page cache */
145         if (page_count(vmpage) > 3)
146                 return 0;
147
148         page = cl_vmpage_page(vmpage, obj);
149         if (page == NULL)
150                 return 1;
151
152         env = cl_env_percpu_get();
153         LASSERT(!IS_ERR(env));
154
155         if (!cl_page_in_use(page)) {
156                 result = 1;
157                 cl_page_delete(env, page);
158         }
159
160         /* To use percpu env array, the call path can not be rescheduled;
161          * otherwise percpu array will be messed if ll_releaspage() called
162          * again on the same CPU.
163          *
164          * If this page holds the last refc of cl_object, the following
165          * call path may cause reschedule:
166          *   cl_page_put -> cl_page_free -> cl_object_put ->
167          *     lu_object_put -> lu_object_free -> lov_delete_raid0.
168          *
169          * However, the kernel can't get rid of this inode until all pages have
170          * been cleaned up. Now that we hold page lock here, it's pretty safe
171          * that we won't get into object delete path.
172          */
173         LASSERT(cl_object_refc(obj) > 1);
174         cl_page_put(env, page);
175
176         cl_env_percpu_put(env);
177         return result;
178 }
179
180 #define MAX_DIRECTIO_SIZE 2*1024*1024*1024UL
181
182 ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io,
183                            int rw, struct inode *inode,
184                            struct ll_dio_pages *pv)
185 {
186         struct cl_page    *clp;
187         struct cl_2queue  *queue;
188         struct cl_object  *obj = io->ci_obj;
189         int i;
190         ssize_t rc = 0;
191         loff_t file_offset  = pv->ldp_start_offset;
192         size_t size         = pv->ldp_size;
193         int page_count      = pv->ldp_nr;
194         struct page **pages = pv->ldp_pages;
195         size_t page_size    = cl_page_size(obj);
196         bool do_io;
197         int  io_pages       = 0;
198         ENTRY;
199
200         queue = &io->ci_queue;
201         cl_2queue_init(queue);
202         for (i = 0; i < page_count; i++) {
203                 if (pv->ldp_offsets)
204                     file_offset = pv->ldp_offsets[i];
205
206                 LASSERT(!(file_offset & (page_size - 1)));
207                 clp = cl_page_find(env, obj, cl_index(obj, file_offset),
208                                    pv->ldp_pages[i], CPT_TRANSIENT);
209                 if (IS_ERR(clp)) {
210                         rc = PTR_ERR(clp);
211                         break;
212                 }
213
214                 rc = cl_page_own(env, io, clp);
215                 if (rc) {
216                         LASSERT(clp->cp_state == CPS_FREEING);
217                         cl_page_put(env, clp);
218                         break;
219                 }
220
221                 do_io = true;
222
223                 /* check the page type: if the page is a host page, then do
224                  * write directly */
225                 if (clp->cp_type == CPT_CACHEABLE) {
226                         struct page *vmpage = cl_page_vmpage(clp);
227                         struct page *src_page;
228                         struct page *dst_page;
229                         void       *src;
230                         void       *dst;
231
232                         src_page = (rw == WRITE) ? pages[i] : vmpage;
233                         dst_page = (rw == WRITE) ? vmpage : pages[i];
234
235                         src = ll_kmap_atomic(src_page, KM_USER0);
236                         dst = ll_kmap_atomic(dst_page, KM_USER1);
237                         memcpy(dst, src, min(page_size, size));
238                         ll_kunmap_atomic(dst, KM_USER1);
239                         ll_kunmap_atomic(src, KM_USER0);
240
241                         /* make sure page will be added to the transfer by
242                          * cl_io_submit()->...->vvp_page_prep_write(). */
243                         if (rw == WRITE)
244                                 set_page_dirty(vmpage);
245
246                         if (rw == READ) {
247                                 /* do not issue the page for read, since it
248                                  * may reread a ra page which has NOT uptodate
249                                  * bit set. */
250                                 cl_page_disown(env, io, clp);
251                                 do_io = false;
252                         }
253                 }
254
255                 if (likely(do_io)) {
256                         cl_2queue_add(queue, clp);
257
258                         /*
259                          * Set page clip to tell transfer formation engine
260                          * that page has to be sent even if it is beyond KMS.
261                          */
262                         cl_page_clip(env, clp, 0, min(size, page_size));
263
264                         ++io_pages;
265                 }
266
267                 /* drop the reference count for cl_page_find */
268                 cl_page_put(env, clp);
269                 size -= page_size;
270                 file_offset += page_size;
271         }
272
273         if (rc == 0 && io_pages) {
274                 rc = cl_io_submit_sync(env, io,
275                                        rw == READ ? CRT_READ : CRT_WRITE,
276                                        queue, 0);
277         }
278         if (rc == 0)
279                 rc = pv->ldp_size;
280
281         cl_2queue_discard(env, io, queue);
282         cl_2queue_disown(env, io, queue);
283         cl_2queue_fini(env, queue);
284         RETURN(rc);
285 }
286 EXPORT_SYMBOL(ll_direct_rw_pages);
287
288 static ssize_t
289 ll_direct_IO_seg(const struct lu_env *env, struct cl_io *io, int rw,
290                  struct inode *inode, size_t size, loff_t file_offset,
291                  struct page **pages, int page_count)
292 {
293         struct ll_dio_pages pvec = { .ldp_pages         = pages,
294                                      .ldp_nr            = page_count,
295                                      .ldp_size          = size,
296                                      .ldp_offsets       = NULL,
297                                      .ldp_start_offset  = file_offset
298                                    };
299
300         return ll_direct_rw_pages(env, io, rw, inode, &pvec);
301 }
302
303 /*  ll_free_user_pages - tear down page struct array
304  *  @pages: array of page struct pointers underlying target buffer */
305 static void ll_free_user_pages(struct page **pages, int npages, int do_dirty)
306 {
307         int i;
308
309         for (i = 0; i < npages; i++) {
310                 if (pages[i] == NULL)
311                         break;
312                 if (do_dirty)
313                         set_page_dirty_lock(pages[i]);
314                 put_page(pages[i]);
315         }
316
317 #if defined(HAVE_DIRECTIO_ITER) || defined(HAVE_IOV_ITER_RW)
318         kvfree(pages);
319 #else
320         OBD_FREE_LARGE(pages, npages * sizeof(*pages));
321 #endif
322 }
323
324 #ifdef KMALLOC_MAX_SIZE
325 #define MAX_MALLOC KMALLOC_MAX_SIZE
326 #else
327 #define MAX_MALLOC (128 * 1024)
328 #endif
329
330 /* This is the maximum size of a single O_DIRECT request, based on the
331  * kmalloc limit.  We need to fit all of the brw_page structs, each one
332  * representing PAGE_SIZE worth of user data, into a single buffer, and
333  * then truncate this to be a full-sized RPC.  For 4kB PAGE_SIZE this is
334  * up to 22MB for 128kB kmalloc and up to 682MB for 4MB kmalloc. */
335 #define MAX_DIO_SIZE ((MAX_MALLOC / sizeof(struct brw_page) * PAGE_SIZE) & \
336                       ~(DT_MAX_BRW_SIZE - 1))
337
338 #ifndef HAVE_IOV_ITER_RW
339 # define iov_iter_rw(iter)      rw
340 #endif
341
342 #if defined(HAVE_DIRECTIO_ITER) || defined(HAVE_IOV_ITER_RW)
343 static ssize_t
344 ll_direct_IO(
345 # ifndef HAVE_IOV_ITER_RW
346              int rw,
347 # endif
348              struct kiocb *iocb, struct iov_iter *iter,
349              loff_t file_offset)
350 {
351         struct ll_cl_context *lcc;
352         const struct lu_env *env;
353         struct cl_io *io;
354         struct file *file = iocb->ki_filp;
355         struct inode *inode = file->f_mapping->host;
356         ssize_t count = iov_iter_count(iter);
357         ssize_t tot_bytes = 0, result = 0;
358         size_t size = MAX_DIO_SIZE;
359
360         /* Check EOF by ourselves */
361         if (iov_iter_rw(iter) == READ && file_offset >= i_size_read(inode))
362                 return 0;
363         /* FIXME: io smaller than PAGE_SIZE is broken on ia64 ??? */
364         if ((file_offset & ~PAGE_MASK) || (count & ~PAGE_MASK))
365                 return -EINVAL;
366
367         CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), size=%zd (max %lu), "
368                "offset=%lld=%llx, pages %zd (max %lu)\n",
369                PFID(ll_inode2fid(inode)), inode, count, MAX_DIO_SIZE,
370                file_offset, file_offset, count >> PAGE_SHIFT,
371                MAX_DIO_SIZE >> PAGE_SHIFT);
372
373         /* Check that all user buffers are aligned as well */
374         if (iov_iter_alignment(iter) & ~PAGE_MASK)
375                 return -EINVAL;
376
377         lcc = ll_cl_find(file);
378         if (lcc == NULL)
379                 RETURN(-EIO);
380
381         env = lcc->lcc_env;
382         LASSERT(!IS_ERR(env));
383         io = lcc->lcc_io;
384         LASSERT(io != NULL);
385
386         /* 0. Need locking between buffered and direct access. and race with
387          *    size changing by concurrent truncates and writes.
388          * 1. Need inode mutex to operate transient pages.
389          */
390         if (iov_iter_rw(iter) == READ)
391                 inode_lock(inode);
392
393         while (iov_iter_count(iter)) {
394                 struct page **pages;
395                 size_t offs;
396
397                 count = min_t(size_t, iov_iter_count(iter), size);
398                 if (iov_iter_rw(iter) == READ) {
399                         if (file_offset >= i_size_read(inode))
400                                 break;
401
402                         if (file_offset + count > i_size_read(inode))
403                                 count = i_size_read(inode) - file_offset;
404                 }
405
406                 result = iov_iter_get_pages_alloc(iter, &pages, count, &offs);
407                 if (likely(result > 0)) {
408                         int n = DIV_ROUND_UP(result + offs, PAGE_SIZE);
409
410                         result = ll_direct_IO_seg(env, io, iov_iter_rw(iter),
411                                                   inode, result, file_offset,
412                                                   pages, n);
413                         ll_free_user_pages(pages, n,
414                                            iov_iter_rw(iter) == READ);
415
416                 }
417                 if (unlikely(result <= 0)) {
418                         /* If we can't allocate a large enough buffer
419                          * for the request, shrink it to a smaller
420                          * PAGE_SIZE multiple and try again.
421                          * We should always be able to kmalloc for a
422                          * page worth of page pointers = 4MB on i386. */
423                         if (result == -ENOMEM &&
424                             size > (PAGE_SIZE / sizeof(*pages)) *
425                                     PAGE_SIZE) {
426                                 size = ((((size / 2) - 1) |
427                                         ~PAGE_MASK) + 1) & PAGE_MASK;
428                                 CDEBUG(D_VFSTRACE, "DIO size now %zu\n",
429                                        size);
430                                 continue;
431                         }
432
433                         GOTO(out, result);
434                 }
435
436                 iov_iter_advance(iter, result);
437                 tot_bytes += result;
438                 file_offset += result;
439         }
440 out:
441         if (iov_iter_rw(iter) == READ)
442                 inode_unlock(inode);
443
444         if (tot_bytes > 0) {
445                 struct vvp_io *vio = vvp_env_io(env);
446
447                 /* no commit async for direct IO */
448                 vio->u.write.vui_written += tot_bytes;
449         }
450
451         return tot_bytes ? : result;
452 }
453 #else /* !HAVE_DIRECTIO_ITER && !HAVE_IOV_ITER_RW */
454
455 static inline int ll_get_user_pages(int rw, unsigned long user_addr,
456                                     size_t size, struct page ***pages,
457                                     int *max_pages)
458 {
459         int result = -ENOMEM;
460
461         /* set an arbitrary limit to prevent arithmetic overflow */
462         if (size > MAX_DIRECTIO_SIZE) {
463                 *pages = NULL;
464                 return -EFBIG;
465         }
466
467         *max_pages = (user_addr + size + PAGE_SIZE - 1) >>
468                       PAGE_SHIFT;
469         *max_pages -= user_addr >> PAGE_SHIFT;
470
471         OBD_ALLOC_LARGE(*pages, *max_pages * sizeof(**pages));
472         if (*pages) {
473                 down_read(&current->mm->mmap_sem);
474                 result = get_user_pages(current, current->mm, user_addr,
475                                         *max_pages, (rw == READ), 0, *pages,
476                                         NULL);
477                 up_read(&current->mm->mmap_sem);
478                 if (unlikely(result <= 0))
479                         OBD_FREE_LARGE(*pages, *max_pages * sizeof(**pages));
480         }
481
482         return result;
483 }
484
485 static ssize_t
486 ll_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
487              loff_t file_offset, unsigned long nr_segs)
488 {
489         struct ll_cl_context *lcc;
490         const struct lu_env *env;
491         struct cl_io *io;
492         struct file *file = iocb->ki_filp;
493         struct inode *inode = file->f_mapping->host;
494         ssize_t count = iov_length(iov, nr_segs);
495         ssize_t tot_bytes = 0, result = 0;
496         unsigned long seg = 0;
497         size_t size = MAX_DIO_SIZE;
498         ENTRY;
499
500         /* FIXME: io smaller than PAGE_SIZE is broken on ia64 ??? */
501         if ((file_offset & ~PAGE_MASK) || (count & ~PAGE_MASK))
502                 RETURN(-EINVAL);
503
504         CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), size=%zd (max %lu), "
505                "offset=%lld=%llx, pages %zd (max %lu)\n",
506                PFID(ll_inode2fid(inode)), inode, count, MAX_DIO_SIZE,
507                file_offset, file_offset, count >> PAGE_SHIFT,
508                MAX_DIO_SIZE >> PAGE_SHIFT);
509
510         /* Check that all user buffers are aligned as well */
511         for (seg = 0; seg < nr_segs; seg++) {
512                 if (((unsigned long)iov[seg].iov_base & ~PAGE_MASK) ||
513                     (iov[seg].iov_len & ~PAGE_MASK))
514                         RETURN(-EINVAL);
515         }
516
517         lcc = ll_cl_find(file);
518         if (lcc == NULL)
519                 RETURN(-EIO);
520
521         env = lcc->lcc_env;
522         LASSERT(!IS_ERR(env));
523         io = lcc->lcc_io;
524         LASSERT(io != NULL);
525
526         for (seg = 0; seg < nr_segs; seg++) {
527                 size_t iov_left = iov[seg].iov_len;
528                 unsigned long user_addr = (unsigned long)iov[seg].iov_base;
529
530                 if (rw == READ) {
531                         if (file_offset >= i_size_read(inode))
532                                 break;
533                         if (file_offset + iov_left > i_size_read(inode))
534                                 iov_left = i_size_read(inode) - file_offset;
535                 }
536
537                 while (iov_left > 0) {
538                         struct page **pages;
539                         int page_count, max_pages = 0;
540                         size_t bytes;
541
542                         bytes = min(size, iov_left);
543                         page_count = ll_get_user_pages(rw, user_addr, bytes,
544                                                        &pages, &max_pages);
545                         if (likely(page_count > 0)) {
546                                 if (unlikely(page_count <  max_pages))
547                                         bytes = page_count << PAGE_SHIFT;
548                                 result = ll_direct_IO_seg(env, io, rw, inode,
549                                                           bytes, file_offset,
550                                                           pages, page_count);
551                                 ll_free_user_pages(pages, max_pages, rw==READ);
552                         } else if (page_count == 0) {
553                                 GOTO(out, result = -EFAULT);
554                         } else {
555                                 result = page_count;
556                         }
557                         if (unlikely(result <= 0)) {
558                                 /* If we can't allocate a large enough buffer
559                                  * for the request, shrink it to a smaller
560                                  * PAGE_SIZE multiple and try again.
561                                  * We should always be able to kmalloc for a
562                                  * page worth of page pointers = 4MB on i386. */
563                                 if (result == -ENOMEM &&
564                                     size > (PAGE_SIZE / sizeof(*pages)) *
565                                            PAGE_SIZE) {
566                                         size = ((((size / 2) - 1) |
567                                                  ~PAGE_MASK) + 1) &
568                                                 PAGE_MASK;
569                                         CDEBUG(D_VFSTRACE, "DIO size now %zu\n",
570                                                size);
571                                         continue;
572                                 }
573
574                                 GOTO(out, result);
575                         }
576
577                         tot_bytes += result;
578                         file_offset += result;
579                         iov_left -= result;
580                         user_addr += result;
581                 }
582         }
583 out:
584         if (tot_bytes > 0) {
585                 struct vvp_io *vio = vvp_env_io(env);
586
587                 /* no commit async for direct IO */
588                 vio->u.write.vui_written += tot_bytes;
589         }
590
591         RETURN(tot_bytes ? tot_bytes : result);
592 }
593 #endif /* HAVE_DIRECTIO_ITER || HAVE_IOV_ITER_RW */
594
595 /**
596  * Prepare partially written-to page for a write.
597  */
598 static int ll_prepare_partial_page(const struct lu_env *env, struct cl_io *io,
599                                    struct cl_page *pg)
600 {
601         struct cl_attr *attr   = vvp_env_thread_attr(env);
602         struct cl_object *obj  = io->ci_obj;
603         struct vvp_page *vpg   = cl_object_page_slice(obj, pg);
604         loff_t          offset = cl_offset(obj, vvp_index(vpg));
605         int             result;
606
607         cl_object_attr_lock(obj);
608         result = cl_object_attr_get(env, obj, attr);
609         cl_object_attr_unlock(obj);
610         if (result == 0) {
611                 /*
612                  * If are writing to a new page, no need to read old data.
613                  * The extent locking will have updated the KMS, and for our
614                  * purposes here we can treat it like i_size.
615                  */
616                 if (attr->cat_kms <= offset) {
617                         char *kaddr = ll_kmap_atomic(vpg->vpg_page, KM_USER0);
618
619                         memset(kaddr, 0, cl_page_size(obj));
620                         ll_kunmap_atomic(kaddr, KM_USER0);
621                 } else if (vpg->vpg_defer_uptodate)
622                         vpg->vpg_ra_used = 1;
623                 else
624                         result = ll_page_sync_io(env, io, pg, CRT_READ);
625         }
626         return result;
627 }
628
629 static int ll_write_begin(struct file *file, struct address_space *mapping,
630                           loff_t pos, unsigned len, unsigned flags,
631                           struct page **pagep, void **fsdata)
632 {
633         struct ll_cl_context *lcc;
634         const struct lu_env  *env = NULL;
635         struct cl_io   *io;
636         struct cl_page *page = NULL;
637
638         struct cl_object *clob = ll_i2info(mapping->host)->lli_clob;
639         pgoff_t index = pos >> PAGE_SHIFT;
640         struct page *vmpage = NULL;
641         unsigned from = pos & (PAGE_SIZE - 1);
642         unsigned to = from + len;
643         int result = 0;
644         ENTRY;
645
646         CDEBUG(D_VFSTRACE, "Writing %lu of %d to %d bytes\n", index, from, len);
647
648         lcc = ll_cl_find(file);
649         if (lcc == NULL) {
650                 io = NULL;
651                 GOTO(out, result = -EIO);
652         }
653
654         env = lcc->lcc_env;
655         io  = lcc->lcc_io;
656
657         /* To avoid deadlock, try to lock page first. */
658         vmpage = grab_cache_page_nowait(mapping, index);
659
660         if (unlikely(vmpage == NULL ||
661                      PageDirty(vmpage) || PageWriteback(vmpage))) {
662                 struct vvp_io *vio = vvp_env_io(env);
663                 struct cl_page_list *plist = &vio->u.write.vui_queue;
664
665                 /* if the page is already in dirty cache, we have to commit
666                  * the pages right now; otherwise, it may cause deadlock
667                  * because it holds page lock of a dirty page and request for
668                  * more grants. It's okay for the dirty page to be the first
669                  * one in commit page list, though. */
670                 if (vmpage != NULL && plist->pl_nr > 0) {
671                         unlock_page(vmpage);
672                         put_page(vmpage);
673                         vmpage = NULL;
674                 }
675
676                 /* commit pages and then wait for page lock */
677                 result = vvp_io_write_commit(env, io);
678                 if (result < 0)
679                         GOTO(out, result);
680
681                 if (vmpage == NULL) {
682                         vmpage = grab_cache_page_write_begin(mapping, index,
683                                                              flags);
684                         if (vmpage == NULL)
685                                 GOTO(out, result = -ENOMEM);
686                 }
687         }
688
689         page = cl_page_find(env, clob, vmpage->index, vmpage, CPT_CACHEABLE);
690         if (IS_ERR(page))
691                 GOTO(out, result = PTR_ERR(page));
692
693         lcc->lcc_page = page;
694         lu_ref_add(&page->cp_reference, "cl_io", io);
695
696         cl_page_assume(env, io, page);
697         if (!PageUptodate(vmpage)) {
698                 /*
699                  * We're completely overwriting an existing page,
700                  * so _don't_ set it up to date until commit_write
701                  */
702                 if (from == 0 && to == PAGE_SIZE) {
703                         CL_PAGE_HEADER(D_PAGE, env, page, "full page write\n");
704                         POISON_PAGE(vmpage, 0x11);
705                 } else {
706                         /* TODO: can be optimized at OSC layer to check if it
707                          * is a lockless IO. In that case, it's not necessary
708                          * to read the data. */
709                         result = ll_prepare_partial_page(env, io, page);
710                         if (result == 0)
711                                 SetPageUptodate(vmpage);
712                 }
713         }
714         if (result < 0)
715                 cl_page_unassume(env, io, page);
716         EXIT;
717 out:
718         if (result < 0) {
719                 if (vmpage != NULL) {
720                         unlock_page(vmpage);
721                         put_page(vmpage);
722                 }
723                 if (!IS_ERR_OR_NULL(page)) {
724                         lu_ref_del(&page->cp_reference, "cl_io", io);
725                         cl_page_put(env, page);
726                 }
727                 if (io)
728                         io->ci_result = result;
729         } else {
730                 *pagep = vmpage;
731                 *fsdata = lcc;
732         }
733         RETURN(result);
734 }
735
736 static int ll_write_end(struct file *file, struct address_space *mapping,
737                         loff_t pos, unsigned len, unsigned copied,
738                         struct page *vmpage, void *fsdata)
739 {
740         struct ll_cl_context *lcc = fsdata;
741         const struct lu_env *env;
742         struct cl_io *io;
743         struct vvp_io *vio;
744         struct cl_page *page;
745         unsigned from = pos & (PAGE_SIZE - 1);
746         bool unplug = false;
747         int result = 0;
748         ENTRY;
749
750         put_page(vmpage);
751
752         LASSERT(lcc != NULL);
753         env  = lcc->lcc_env;
754         page = lcc->lcc_page;
755         io   = lcc->lcc_io;
756         vio  = vvp_env_io(env);
757
758         LASSERT(cl_page_is_owned(page, io));
759         if (copied > 0) {
760                 struct cl_page_list *plist = &vio->u.write.vui_queue;
761
762                 lcc->lcc_page = NULL; /* page will be queued */
763
764                 /* Add it into write queue */
765                 cl_page_list_add(plist, page);
766                 if (plist->pl_nr == 1) /* first page */
767                         vio->u.write.vui_from = from;
768                 else
769                         LASSERT(from == 0);
770                 vio->u.write.vui_to = from + copied;
771
772                 /* To address the deadlock in balance_dirty_pages() where
773                  * this dirty page may be written back in the same thread. */
774                 if (PageDirty(vmpage))
775                         unplug = true;
776
777                 /* We may have one full RPC, commit it soon */
778                 if (plist->pl_nr >= PTLRPC_MAX_BRW_PAGES)
779                         unplug = true;
780
781                 CL_PAGE_DEBUG(D_VFSTRACE, env, page,
782                               "queued page: %d.\n", plist->pl_nr);
783         } else {
784                 cl_page_disown(env, io, page);
785
786                 lcc->lcc_page = NULL;
787                 lu_ref_del(&page->cp_reference, "cl_io", io);
788                 cl_page_put(env, page);
789
790                 /* page list is not contiguous now, commit it now */
791                 unplug = true;
792         }
793         if (unplug ||
794             file->f_flags & O_SYNC || IS_SYNC(file_inode(file)))
795                 result = vvp_io_write_commit(env, io);
796
797         if (result < 0)
798                 io->ci_result = result;
799         RETURN(result >= 0 ? copied : result);
800 }
801
802 #ifdef CONFIG_MIGRATION
803 static int ll_migratepage(struct address_space *mapping,
804                           struct page *newpage, struct page *page
805 #ifdef HAVE_MIGRATEPAGE_4ARGS
806                           , enum migrate_mode mode
807 #endif
808         )
809 {
810         /* Always fail page migration until we have a proper implementation */
811         return -EIO;
812 }
813 #endif
814
815 const struct address_space_operations ll_aops = {
816         .readpage       = ll_readpage,
817         .direct_IO      = ll_direct_IO,
818         .writepage      = ll_writepage,
819         .writepages     = ll_writepages,
820         .set_page_dirty = __set_page_dirty_nobuffers,
821         .write_begin    = ll_write_begin,
822         .write_end      = ll_write_end,
823         .invalidatepage = ll_invalidatepage,
824         .releasepage    = (void *)ll_releasepage,
825 #ifdef CONFIG_MIGRATION
826         .migratepage    = ll_migratepage,
827 #endif
828 };