Whamcloud - gitweb
8ac6698a91c8fa8835b677963ffa2b6b7be115bd
[fs/lustre-release.git] / lustre / llite / llite_mmap.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2016, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  */
32
33 #include <linux/errno.h>
34 #include <linux/delay.h>
35 #include <linux/kernel.h>
36 #include <linux/mm.h>
37
38 #define DEBUG_SUBSYSTEM S_LLITE
39
40 #include "llite_internal.h"
41 #include <lustre_compat.h>
42
43 static const struct vm_operations_struct ll_file_vm_ops;
44
45 void policy_from_vma(union ldlm_policy_data *policy, struct vm_area_struct *vma,
46                      unsigned long addr, size_t count)
47 {
48         policy->l_extent.start = ((addr - vma->vm_start) & PAGE_MASK) +
49                                  (vma->vm_pgoff << PAGE_SHIFT);
50         policy->l_extent.end = (policy->l_extent.start + count - 1) |
51                                ~PAGE_MASK;
52 }
53
54 struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr,
55                                size_t count)
56 {
57         struct vm_area_struct *vma, *ret = NULL;
58         ENTRY;
59
60         /* mmap_sem must have been held by caller. */
61         LASSERT(!down_write_trylock(&mm->mmap_sem));
62
63         for(vma = find_vma(mm, addr);
64             vma != NULL && vma->vm_start < (addr + count); vma = vma->vm_next) {
65                 if (vma->vm_ops && vma->vm_ops == &ll_file_vm_ops &&
66                     vma->vm_flags & VM_SHARED) {
67                         ret = vma;
68                         break;
69                 }
70         }
71         RETURN(ret);
72 }
73
74 /**
75  * API independent part for page fault initialization.
76  * \param env - corespondent lu_env to processing
77  * \param vma - virtual memory area addressed to page fault
78  * \param index - page index corespondent to fault.
79  *
80  * \return error codes from cl_io_init.
81  */
82 static struct cl_io *
83 ll_fault_io_init(struct lu_env *env, struct vm_area_struct *vma, pgoff_t index)
84 {
85         struct file            *file = vma->vm_file;
86         struct inode           *inode = file_inode(file);
87         struct cl_io           *io;
88         struct cl_fault_io     *fio;
89         int                     rc;
90         ENTRY;
91
92         if (ll_file_nolock(file))
93                 RETURN(ERR_PTR(-EOPNOTSUPP));
94
95 restart:
96         io = vvp_env_thread_io(env);
97         io->ci_obj = ll_i2info(inode)->lli_clob;
98         LASSERT(io->ci_obj != NULL);
99
100         fio = &io->u.ci_fault;
101         fio->ft_index = index;
102         fio->ft_executable = vma->vm_flags & VM_EXEC;
103
104         CDEBUG(D_MMAP,
105                DFID": vma=%p start=%#lx end=%#lx vm_flags=%#lx idx=%lu\n",
106                PFID(&ll_i2info(inode)->lli_fid), vma, vma->vm_start,
107                vma->vm_end, vma->vm_flags, fio->ft_index);
108
109         if (vma->vm_flags & VM_SEQ_READ)
110                 io->ci_seq_read = 1;
111         else if (vma->vm_flags & VM_RAND_READ)
112                 io->ci_rand_read = 1;
113
114         rc = cl_io_init(env, io, CIT_FAULT, io->ci_obj);
115         if (rc == 0) {
116                 struct vvp_io *vio = vvp_env_io(env);
117                 struct ll_file_data *fd = file->private_data;
118
119                 LASSERT(vio->vui_cl.cis_io == io);
120
121                 /* mmap lock must be MANDATORY it has to cache
122                  * pages. */
123                 io->ci_lockreq = CILR_MANDATORY;
124                 vio->vui_fd = fd;
125         } else {
126                 LASSERT(rc < 0);
127                 cl_io_fini(env, io);
128                 if (io->ci_need_restart)
129                         goto restart;
130
131                 io = ERR_PTR(rc);
132         }
133
134         RETURN(io);
135 }
136
137 /* Sharing code of page_mkwrite method for rhel5 and rhel6 */
138 static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,
139                             bool *retry)
140 {
141         struct lu_env           *env;
142         struct cl_io            *io;
143         struct vvp_io           *vio;
144         int                      result;
145         __u16                    refcheck;
146         sigset_t old, new;
147         struct inode             *inode = NULL;
148         struct ll_inode_info     *lli;
149         ENTRY;
150
151         LASSERT(vmpage != NULL);
152         env = cl_env_get(&refcheck);
153         if (IS_ERR(env))
154                 RETURN(PTR_ERR(env));
155
156         io = ll_fault_io_init(env, vma, vmpage->index);
157         if (IS_ERR(io))
158                 GOTO(out, result = PTR_ERR(io));
159
160         result = io->ci_result;
161         if (result < 0)
162                 GOTO(out_io, result);
163
164         io->u.ci_fault.ft_mkwrite = 1;
165         io->u.ci_fault.ft_writable = 1;
166
167         vio = vvp_env_io(env);
168         vio->u.fault.ft_vma    = vma;
169         vio->u.fault.ft_vmpage = vmpage;
170
171         siginitsetinv(&new, sigmask(SIGKILL) | sigmask(SIGTERM));
172         sigprocmask(SIG_BLOCK, &new, &old);
173
174         inode = vvp_object_inode(io->ci_obj);
175         lli = ll_i2info(inode);
176
177         result = cl_io_loop(env, io);
178
179         sigprocmask(SIG_SETMASK, &old, NULL);
180
181         if (result == 0) {
182                 lock_page(vmpage);
183                 if (vmpage->mapping == NULL) {
184                         unlock_page(vmpage);
185
186                         /* page was truncated and lock was cancelled, return
187                          * ENODATA so that VM_FAULT_NOPAGE will be returned
188                          * to handle_mm_fault(). */
189                         if (result == 0)
190                                 result = -ENODATA;
191                 } else if (!PageDirty(vmpage)) {
192                         /* race, the page has been cleaned by ptlrpcd after
193                          * it was unlocked, it has to be added into dirty
194                          * cache again otherwise this soon-to-dirty page won't
195                          * consume any grants, even worse if this page is being
196                          * transferred because it will break RPC checksum.
197                          */
198                         unlock_page(vmpage);
199
200                         CDEBUG(D_MMAP, "Race on page_mkwrite %p/%lu, page has "
201                                "been written out, retry.\n",
202                                vmpage, vmpage->index);
203
204                         *retry = true;
205                         result = -EAGAIN;
206                 }
207
208                 if (result == 0)
209                         ll_file_set_flag(lli, LLIF_DATA_MODIFIED);
210         }
211         EXIT;
212
213 out_io:
214         cl_io_fini(env, io);
215 out:
216         cl_env_put(env, &refcheck);
217         CDEBUG(D_MMAP, "%s mkwrite with %d\n", current->comm, result);
218         LASSERT(ergo(result == 0, PageLocked(vmpage)));
219
220         /* if page has been unmapped, presumably due to lock reclaim for
221          * concurrent usage, add some delay before retrying to prevent
222          * entering live-lock situation with competitors
223          */
224         if (result == -ENODATA && inode != NULL) {
225                 CDEBUG(D_MMAP, "delaying new page-fault for inode %p to "
226                                "prevent live-lock\n", inode);
227                 msleep(10);
228         }
229
230         return result;
231 }
232
233 static inline int to_fault_error(int result)
234 {
235         switch(result) {
236         case 0:
237                 result = VM_FAULT_LOCKED;
238                 break;
239         case -ENOMEM:
240                 result = VM_FAULT_OOM;
241                 break;
242         default:
243                 result = VM_FAULT_SIGBUS;
244                 break;
245         }
246         return result;
247 }
248
249 /**
250  * Lustre implementation of a vm_operations_struct::fault() method, called by
251  * VM to server page fault (both in kernel and user space).
252  *
253  * \param vma - is virtiual area struct related to page fault
254  * \param vmf - structure which describe type and address where hit fault
255  *
256  * \return allocated and filled _locked_ page for address
257  * \retval VM_FAULT_ERROR on general error
258  * \retval NOPAGE_OOM not have memory for allocate new page
259  */
260 static vm_fault_t ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf)
261 {
262         struct lu_env           *env;
263         struct cl_io            *io;
264         struct vvp_io           *vio = NULL;
265         struct page             *vmpage;
266         int                      result = 0;
267         int                      fault_ret = 0;
268         __u16                    refcheck;
269         ENTRY;
270
271         env = cl_env_get(&refcheck);
272         if (IS_ERR(env))
273                 RETURN(PTR_ERR(env));
274
275         if (ll_sbi_has_fast_read(ll_i2sbi(file_inode(vma->vm_file)))) {
276                 /* do fast fault */
277                 bool has_retry = vmf->flags & FAULT_FLAG_RETRY_NOWAIT;
278
279                 /* To avoid loops, instruct downstream to not drop mmap_sem */
280                 vmf->flags |= FAULT_FLAG_RETRY_NOWAIT;
281                 ll_cl_add(vma->vm_file, env, NULL, LCC_MMAP);
282                 fault_ret = ll_filemap_fault(vma, vmf);
283                 ll_cl_remove(vma->vm_file, env);
284                 if (!has_retry)
285                         vmf->flags &= ~FAULT_FLAG_RETRY_NOWAIT;
286
287                 /* - If there is no error, then the page was found in cache and
288                  *   uptodate;
289                  * - If VM_FAULT_RETRY is set, the page existed but failed to
290                  *   lock. We will try slow path to avoid loops.
291                  * - Otherwise, it should try normal fault under DLM lock. */
292                 if (!(fault_ret & VM_FAULT_RETRY) &&
293                     !(fault_ret & VM_FAULT_ERROR))
294                         GOTO(out, result = 0);
295
296                 fault_ret = 0;
297         }
298
299         io = ll_fault_io_init(env, vma, vmf->pgoff);
300         if (IS_ERR(io))
301                 GOTO(out, result = PTR_ERR(io));
302
303         result = io->ci_result;
304         if (result == 0) {
305                 vio = vvp_env_io(env);
306                 vio->u.fault.ft_vma       = vma;
307                 vio->u.fault.ft_vmpage    = NULL;
308                 vio->u.fault.ft_vmf = vmf;
309                 vio->u.fault.ft_flags = 0;
310                 vio->u.fault.ft_flags_valid = 0;
311
312                 /* May call ll_readpage() */
313                 ll_cl_add(vma->vm_file, env, io, LCC_MMAP);
314
315                 result = cl_io_loop(env, io);
316
317                 ll_cl_remove(vma->vm_file, env);
318
319                 /* ft_flags are only valid if we reached
320                  * the call to filemap_fault */
321                 if (vio->u.fault.ft_flags_valid)
322                         fault_ret = vio->u.fault.ft_flags;
323
324                 vmpage = vio->u.fault.ft_vmpage;
325                 if (result != 0 && vmpage != NULL) {
326                         put_page(vmpage);
327                         vmf->page = NULL;
328                 }
329         }
330         cl_io_fini(env, io);
331
332 out:
333         cl_env_put(env, &refcheck);
334         if (result != 0 && !(fault_ret & VM_FAULT_RETRY))
335                 fault_ret |= to_fault_error(result);
336
337         CDEBUG(D_MMAP, "%s fault %d/%d\n", current->comm, fault_ret, result);
338         RETURN(fault_ret);
339 }
340
341 #ifdef HAVE_VM_OPS_USE_VM_FAULT_ONLY
342 static vm_fault_t ll_fault(struct vm_fault *vmf)
343 {
344         struct vm_area_struct *vma = vmf->vma;
345 #else
346 static vm_fault_t ll_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
347 {
348 #endif
349         int count = 0;
350         bool printed = false;
351         bool cached;
352         vm_fault_t result;
353         ktime_t kstart = ktime_get();
354         sigset_t old, new;
355
356         result = pcc_fault(vma, vmf, &cached);
357         if (cached)
358                 goto out;
359
360         CDEBUG(D_MMAP, DFID": vma=%p start=%#lx end=%#lx vm_flags=%#lx\n",
361                PFID(&ll_i2info(file_inode(vma->vm_file))->lli_fid),
362                vma, vma->vm_start, vma->vm_end, vma->vm_flags);
363
364         /* Only SIGKILL and SIGTERM is allowed for fault/nopage/mkwrite
365          * so that it can be killed by admin but not cause segfault by
366          * other signals.
367          */
368         siginitsetinv(&new, sigmask(SIGKILL) | sigmask(SIGTERM));
369         sigprocmask(SIG_BLOCK, &new, &old);
370
371         /* make sure offset is not a negative number */
372         if (vmf->pgoff > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
373                 return VM_FAULT_SIGBUS;
374
375 restart:
376         result = ll_fault0(vma, vmf);
377         if (vmf->page &&
378             !(result & (VM_FAULT_RETRY | VM_FAULT_ERROR | VM_FAULT_LOCKED))) {
379                 struct page *vmpage = vmf->page;
380
381                 /* check if this page has been truncated */
382                 lock_page(vmpage);
383                 if (unlikely(vmpage->mapping == NULL)) { /* unlucky */
384                         unlock_page(vmpage);
385                         put_page(vmpage);
386                         vmf->page = NULL;
387
388                         if (!printed && ++count > 16) {
389                                 CWARN("the page is under heavy contention, maybe your app(%s) needs revising :-)\n",
390                                       current->comm);
391                                 printed = true;
392                         }
393
394                         goto restart;
395                 }
396
397                 result |= VM_FAULT_LOCKED;
398         }
399         sigprocmask(SIG_SETMASK, &old, NULL);
400
401 out:
402         if (vmf->page && result == VM_FAULT_LOCKED) {
403                 ll_rw_stats_tally(ll_i2sbi(file_inode(vma->vm_file)),
404                                   current->pid, vma->vm_file->private_data,
405                                   cl_offset(NULL, vmf->page->index), PAGE_SIZE,
406                                   READ);
407                 ll_stats_ops_tally(ll_i2sbi(file_inode(vma->vm_file)),
408                                    LPROC_LL_FAULT,
409                                    ktime_us_delta(ktime_get(), kstart));
410         }
411
412         return result;
413 }
414
415 #ifdef HAVE_VM_OPS_USE_VM_FAULT_ONLY
416 static vm_fault_t ll_page_mkwrite(struct vm_fault *vmf)
417 {
418         struct vm_area_struct *vma = vmf->vma;
419 #else
420 static vm_fault_t ll_page_mkwrite(struct vm_area_struct *vma,
421                                   struct vm_fault *vmf)
422 {
423 #endif
424         int count = 0;
425         bool printed = false;
426         bool retry;
427         bool cached;
428         ktime_t kstart = ktime_get();
429         vm_fault_t result;
430
431         result = pcc_page_mkwrite(vma, vmf, &cached);
432         if (cached)
433                 goto out;
434
435         file_update_time(vma->vm_file);
436         do {
437                 retry = false;
438                 result = ll_page_mkwrite0(vma, vmf->page, &retry);
439
440                 if (!printed && ++count > 16) {
441                         const struct dentry *de = file_dentry(vma->vm_file);
442
443                         CWARN("app(%s): the page %lu of file "DFID" is under heavy contention\n",
444                               current->comm, vmf->pgoff,
445                               PFID(ll_inode2fid(de->d_inode)));
446                         printed = true;
447                 }
448         } while (retry);
449
450         switch (result) {
451         case 0:
452                 LASSERT(PageLocked(vmf->page));
453                 result = VM_FAULT_LOCKED;
454                 break;
455         case -ENODATA:
456         case -EFAULT:
457                 result = VM_FAULT_NOPAGE;
458                 break;
459         case -ENOMEM:
460                 result = VM_FAULT_OOM;
461                 break;
462         case -EAGAIN:
463                 result = VM_FAULT_RETRY;
464                 break;
465         default:
466                 result = VM_FAULT_SIGBUS;
467                 break;
468         }
469
470 out:
471         if (result == VM_FAULT_LOCKED) {
472                 ll_rw_stats_tally(ll_i2sbi(file_inode(vma->vm_file)),
473                                   current->pid, vma->vm_file->private_data,
474                                   cl_offset(NULL, vmf->page->index), PAGE_SIZE,
475                                   WRITE);
476                 ll_stats_ops_tally(ll_i2sbi(file_inode(vma->vm_file)),
477                                    LPROC_LL_MKWRITE,
478                                    ktime_us_delta(ktime_get(), kstart));
479         }
480
481         return result;
482 }
483
484 /**
485  *  To avoid cancel the locks covering mmapped region for lock cache pressure,
486  *  we track the mapped vma count in vvp_object::vob_mmap_cnt.
487  */
488 static void ll_vm_open(struct vm_area_struct * vma)
489 {
490         struct inode *inode    = file_inode(vma->vm_file);
491         struct vvp_object *vob = cl_inode2vvp(inode);
492
493         ENTRY;
494         LASSERT(atomic_read(&vob->vob_mmap_cnt) >= 0);
495         atomic_inc(&vob->vob_mmap_cnt);
496         pcc_vm_open(vma);
497         EXIT;
498 }
499
500 /**
501  * Dual to ll_vm_open().
502  */
503 static void ll_vm_close(struct vm_area_struct *vma)
504 {
505         struct inode      *inode = file_inode(vma->vm_file);
506         struct vvp_object *vob   = cl_inode2vvp(inode);
507
508         ENTRY;
509         atomic_dec(&vob->vob_mmap_cnt);
510         LASSERT(atomic_read(&vob->vob_mmap_cnt) >= 0);
511         pcc_vm_close(vma);
512         EXIT;
513 }
514
515 /* XXX put nice comment here.  talk about __free_pte -> dirty pages and
516  * nopage's reference passing to the pte */
517 int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last)
518 {
519         int rc = -ENOENT;
520         ENTRY;
521
522         LASSERTF(last > first, "last %llu first %llu\n", last, first);
523         if (mapping_mapped(mapping)) {
524                 rc = 0;
525                 unmap_mapping_range(mapping, first + PAGE_SIZE - 1,
526                                     last - first + 1, 0);
527         }
528
529         RETURN(rc);
530 }
531
532 static const struct vm_operations_struct ll_file_vm_ops = {
533         .fault                  = ll_fault,
534         .page_mkwrite           = ll_page_mkwrite,
535         .open                   = ll_vm_open,
536         .close                  = ll_vm_close,
537 };
538
539 int ll_file_mmap(struct file *file, struct vm_area_struct * vma)
540 {
541         struct inode *inode = file_inode(file);
542         ktime_t kstart = ktime_get();
543         bool cached;
544         int rc;
545
546         ENTRY;
547         CDEBUG(D_VFSTRACE | D_MMAP,
548                "VFS_Op: fid="DFID" vma=%p start=%#lx end=%#lx vm_flags=%#lx\n",
549                PFID(&ll_i2info(inode)->lli_fid),
550                vma, vma->vm_start, vma->vm_end, vma->vm_flags);
551
552         if (ll_file_nolock(file))
553                 RETURN(-EOPNOTSUPP);
554
555         rc = pcc_file_mmap(file, vma, &cached);
556         if (cached && rc != 0)
557                 RETURN(rc);
558
559         rc = generic_file_mmap(file, vma);
560         if (rc == 0) {
561                 vma->vm_ops = &ll_file_vm_ops;
562                 vma->vm_ops->open(vma);
563                 /* update the inode's size and mtime */
564                 if (!cached)
565                         rc = ll_glimpse_size(inode);
566         }
567
568         if (!rc)
569                 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_MMAP,
570                                    ktime_us_delta(ktime_get(), kstart));
571
572         RETURN(rc);
573 }