Whamcloud - gitweb
LU-13182 llite: Avoid eternel retry loops with MAP_POPULATE
[fs/lustre-release.git] / lustre / llite / llite_mmap.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2016, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  */
32
33 #include <linux/errno.h>
34 #include <linux/delay.h>
35 #include <linux/kernel.h>
36 #include <linux/mm.h>
37
38 #define DEBUG_SUBSYSTEM S_LLITE
39
40 #include "llite_internal.h"
41 #include <lustre_compat.h>
42
43 static const struct vm_operations_struct ll_file_vm_ops;
44
45 void policy_from_vma(union ldlm_policy_data *policy, struct vm_area_struct *vma,
46                      unsigned long addr, size_t count)
47 {
48         policy->l_extent.start = ((addr - vma->vm_start) & PAGE_MASK) +
49                                  (vma->vm_pgoff << PAGE_SHIFT);
50         policy->l_extent.end = (policy->l_extent.start + count - 1) |
51                                ~PAGE_MASK;
52 }
53
54 struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr,
55                                size_t count)
56 {
57         struct vm_area_struct *vma, *ret = NULL;
58         ENTRY;
59
60         /* mmap_sem must have been held by caller. */
61         LASSERT(!down_write_trylock(&mm->mmap_sem));
62
63         for(vma = find_vma(mm, addr);
64             vma != NULL && vma->vm_start < (addr + count); vma = vma->vm_next) {
65                 if (vma->vm_ops && vma->vm_ops == &ll_file_vm_ops &&
66                     vma->vm_flags & VM_SHARED) {
67                         ret = vma;
68                         break;
69                 }
70         }
71         RETURN(ret);
72 }
73
74 /**
75  * API independent part for page fault initialization.
76  * \param env - corespondent lu_env to processing
77  * \param vma - virtual memory area addressed to page fault
78  * \param index - page index corespondent to fault.
79  * \parm ra_flags - vma readahead flags.
80  *
81  * \return error codes from cl_io_init.
82  */
83 static struct cl_io *
84 ll_fault_io_init(struct lu_env *env, struct vm_area_struct *vma,
85                  pgoff_t index, unsigned long *ra_flags)
86 {
87         struct file            *file = vma->vm_file;
88         struct inode           *inode = file_inode(file);
89         struct cl_io           *io;
90         struct cl_fault_io     *fio;
91         int                     rc;
92         ENTRY;
93
94         if (ll_file_nolock(file))
95                 RETURN(ERR_PTR(-EOPNOTSUPP));
96
97 restart:
98         io = vvp_env_thread_io(env);
99         io->ci_obj = ll_i2info(inode)->lli_clob;
100         LASSERT(io->ci_obj != NULL);
101
102         fio = &io->u.ci_fault;
103         fio->ft_index      = index;
104         fio->ft_executable = vma->vm_flags&VM_EXEC;
105
106         /*
107          * disable VM_SEQ_READ and use VM_RAND_READ to make sure that
108          * the kernel will not read other pages not covered by ldlm in
109          * filemap_nopage. we do our readahead in ll_readpage.
110          */
111         if (ra_flags != NULL)
112                 *ra_flags = vma->vm_flags & (VM_RAND_READ|VM_SEQ_READ);
113         vma->vm_flags &= ~VM_SEQ_READ;
114         vma->vm_flags |= VM_RAND_READ;
115
116         CDEBUG(D_MMAP, "vm_flags: %lx (%lu %d)\n", vma->vm_flags,
117                fio->ft_index, fio->ft_executable);
118
119         rc = cl_io_init(env, io, CIT_FAULT, io->ci_obj);
120         if (rc == 0) {
121                 struct vvp_io *vio = vvp_env_io(env);
122                 struct ll_file_data *fd = file->private_data;
123
124                 LASSERT(vio->vui_cl.cis_io == io);
125
126                 /* mmap lock must be MANDATORY it has to cache
127                  * pages. */
128                 io->ci_lockreq = CILR_MANDATORY;
129                 vio->vui_fd = fd;
130         } else {
131                 LASSERT(rc < 0);
132                 cl_io_fini(env, io);
133                 if (io->ci_need_restart)
134                         goto restart;
135
136                 io = ERR_PTR(rc);
137         }
138
139         RETURN(io);
140 }
141
142 /* Sharing code of page_mkwrite method for rhel5 and rhel6 */
143 static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,
144                             bool *retry)
145 {
146         struct lu_env           *env;
147         struct cl_io            *io;
148         struct vvp_io           *vio;
149         int                      result;
150         __u16                    refcheck;
151         sigset_t old, new;
152         struct inode             *inode = NULL;
153         struct ll_inode_info     *lli;
154         ENTRY;
155
156         LASSERT(vmpage != NULL);
157         env = cl_env_get(&refcheck);
158         if (IS_ERR(env))
159                 RETURN(PTR_ERR(env));
160
161         io = ll_fault_io_init(env, vma, vmpage->index, NULL);
162         if (IS_ERR(io))
163                 GOTO(out, result = PTR_ERR(io));
164
165         result = io->ci_result;
166         if (result < 0)
167                 GOTO(out_io, result);
168
169         io->u.ci_fault.ft_mkwrite = 1;
170         io->u.ci_fault.ft_writable = 1;
171
172         vio = vvp_env_io(env);
173         vio->u.fault.ft_vma    = vma;
174         vio->u.fault.ft_vmpage = vmpage;
175
176         siginitsetinv(&new, sigmask(SIGKILL) | sigmask(SIGTERM));
177         sigprocmask(SIG_BLOCK, &new, &old);
178
179         inode = vvp_object_inode(io->ci_obj);
180         lli = ll_i2info(inode);
181
182         result = cl_io_loop(env, io);
183
184         sigprocmask(SIG_SETMASK, &old, NULL);
185
186         if (result == 0) {
187                 lock_page(vmpage);
188                 if (vmpage->mapping == NULL) {
189                         unlock_page(vmpage);
190
191                         /* page was truncated and lock was cancelled, return
192                          * ENODATA so that VM_FAULT_NOPAGE will be returned
193                          * to handle_mm_fault(). */
194                         if (result == 0)
195                                 result = -ENODATA;
196                 } else if (!PageDirty(vmpage)) {
197                         /* race, the page has been cleaned by ptlrpcd after
198                          * it was unlocked, it has to be added into dirty
199                          * cache again otherwise this soon-to-dirty page won't
200                          * consume any grants, even worse if this page is being
201                          * transferred because it will break RPC checksum.
202                          */
203                         unlock_page(vmpage);
204
205                         CDEBUG(D_MMAP, "Race on page_mkwrite %p/%lu, page has "
206                                "been written out, retry.\n",
207                                vmpage, vmpage->index);
208
209                         *retry = true;
210                         result = -EAGAIN;
211                 }
212
213                 if (result == 0)
214                         ll_file_set_flag(lli, LLIF_DATA_MODIFIED);
215         }
216         EXIT;
217
218 out_io:
219         cl_io_fini(env, io);
220 out:
221         cl_env_put(env, &refcheck);
222         CDEBUG(D_MMAP, "%s mkwrite with %d\n", current->comm, result);
223         LASSERT(ergo(result == 0, PageLocked(vmpage)));
224
225         /* if page has been unmapped, presumably due to lock reclaim for
226          * concurrent usage, add some delay before retrying to prevent
227          * entering live-lock situation with competitors
228          */
229         if (result == -ENODATA && inode != NULL) {
230                 CDEBUG(D_MMAP, "delaying new page-fault for inode %p to "
231                                "prevent live-lock\n", inode);
232                 msleep(10);
233         }
234
235         return result;
236 }
237
238 static inline int to_fault_error(int result)
239 {
240         switch(result) {
241         case 0:
242                 result = VM_FAULT_LOCKED;
243                 break;
244         case -ENOMEM:
245                 result = VM_FAULT_OOM;
246                 break;
247         default:
248                 result = VM_FAULT_SIGBUS;
249                 break;
250         }
251         return result;
252 }
253
254 /**
255  * Lustre implementation of a vm_operations_struct::fault() method, called by
256  * VM to server page fault (both in kernel and user space).
257  *
258  * \param vma - is virtiual area struct related to page fault
259  * \param vmf - structure which describe type and address where hit fault
260  *
261  * \return allocated and filled _locked_ page for address
262  * \retval VM_FAULT_ERROR on general error
263  * \retval NOPAGE_OOM not have memory for allocate new page
264  */
265 static vm_fault_t ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf)
266 {
267         struct lu_env           *env;
268         struct cl_io            *io;
269         struct vvp_io           *vio = NULL;
270         struct page             *vmpage;
271         unsigned long            ra_flags;
272         int                      result = 0;
273         int                      fault_ret = 0;
274         __u16                    refcheck;
275         ENTRY;
276
277         env = cl_env_get(&refcheck);
278         if (IS_ERR(env))
279                 RETURN(PTR_ERR(env));
280
281         if (ll_sbi_has_fast_read(ll_i2sbi(file_inode(vma->vm_file)))) {
282                 /* do fast fault */
283                 bool has_retry = vmf->flags & FAULT_FLAG_RETRY_NOWAIT;
284
285                 /* To avoid loops, instruct downstream to not drop mmap_sem */
286                 vmf->flags |= FAULT_FLAG_RETRY_NOWAIT;
287                 ll_cl_add(vma->vm_file, env, NULL, LCC_MMAP);
288                 fault_ret = ll_filemap_fault(vma, vmf);
289                 ll_cl_remove(vma->vm_file, env);
290                 if (!has_retry)
291                         vmf->flags &= ~FAULT_FLAG_RETRY_NOWAIT;
292
293                 /* - If there is no error, then the page was found in cache and
294                  *   uptodate;
295                  * - If VM_FAULT_RETRY is set, the page existed but failed to
296                  *   lock. We will try slow path to avoid loops.
297                  * - Otherwise, it should try normal fault under DLM lock. */
298                 if (!(fault_ret & VM_FAULT_RETRY) &&
299                     !(fault_ret & VM_FAULT_ERROR))
300                         GOTO(out, result = 0);
301
302                 fault_ret = 0;
303         }
304
305         io = ll_fault_io_init(env, vma, vmf->pgoff, &ra_flags);
306         if (IS_ERR(io))
307                 GOTO(out, result = PTR_ERR(io));
308
309         result = io->ci_result;
310         if (result == 0) {
311                 vio = vvp_env_io(env);
312                 vio->u.fault.ft_vma       = vma;
313                 vio->u.fault.ft_vmpage    = NULL;
314                 vio->u.fault.ft_vmf = vmf;
315                 vio->u.fault.ft_flags = 0;
316                 vio->u.fault.ft_flags_valid = 0;
317
318                 /* May call ll_readpage() */
319                 ll_cl_add(vma->vm_file, env, io, LCC_MMAP);
320
321                 result = cl_io_loop(env, io);
322
323                 ll_cl_remove(vma->vm_file, env);
324
325                 /* ft_flags are only valid if we reached
326                  * the call to filemap_fault */
327                 if (vio->u.fault.ft_flags_valid)
328                         fault_ret = vio->u.fault.ft_flags;
329
330                 vmpage = vio->u.fault.ft_vmpage;
331                 if (result != 0 && vmpage != NULL) {
332                         put_page(vmpage);
333                         vmf->page = NULL;
334                 }
335         }
336         cl_io_fini(env, io);
337
338         vma->vm_flags |= ra_flags;
339
340 out:
341         cl_env_put(env, &refcheck);
342         if (result != 0 && !(fault_ret & VM_FAULT_RETRY))
343                 fault_ret |= to_fault_error(result);
344
345         CDEBUG(D_MMAP, "%s fault %d/%d\n", current->comm, fault_ret, result);
346         RETURN(fault_ret);
347 }
348
349 #ifdef HAVE_VM_OPS_USE_VM_FAULT_ONLY
350 static vm_fault_t ll_fault(struct vm_fault *vmf)
351 {
352         struct vm_area_struct *vma = vmf->vma;
353 #else
354 static vm_fault_t ll_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
355 {
356 #endif
357         int count = 0;
358         bool printed = false;
359         bool cached;
360         vm_fault_t result;
361         ktime_t kstart = ktime_get();
362         sigset_t old, new;
363
364         result = pcc_fault(vma, vmf, &cached);
365         if (cached)
366                 goto out;
367
368         /* Only SIGKILL and SIGTERM is allowed for fault/nopage/mkwrite
369          * so that it can be killed by admin but not cause segfault by
370          * other signals.
371          */
372         siginitsetinv(&new, sigmask(SIGKILL) | sigmask(SIGTERM));
373         sigprocmask(SIG_BLOCK, &new, &old);
374
375         /* make sure offset is not a negative number */
376         if (vmf->pgoff > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
377                 return VM_FAULT_SIGBUS;
378 restart:
379         result = ll_fault0(vma, vmf);
380         if (vmf->page &&
381             !(result & (VM_FAULT_RETRY | VM_FAULT_ERROR | VM_FAULT_LOCKED))) {
382                 struct page *vmpage = vmf->page;
383
384                 /* check if this page has been truncated */
385                 lock_page(vmpage);
386                 if (unlikely(vmpage->mapping == NULL)) { /* unlucky */
387                         unlock_page(vmpage);
388                         put_page(vmpage);
389                         vmf->page = NULL;
390
391                         if (!printed && ++count > 16) {
392                                 CWARN("the page is under heavy contention, maybe your app(%s) needs revising :-)\n",
393                                       current->comm);
394                                 printed = true;
395                         }
396
397                         goto restart;
398                 }
399
400                 result |= VM_FAULT_LOCKED;
401         }
402         sigprocmask(SIG_SETMASK, &old, NULL);
403
404 out:
405         if (vmf->page && result == VM_FAULT_LOCKED) {
406                 ll_rw_stats_tally(ll_i2sbi(file_inode(vma->vm_file)),
407                                   current->pid, vma->vm_file->private_data,
408                                   cl_offset(NULL, vmf->page->index), PAGE_SIZE,
409                                   READ);
410                 ll_stats_ops_tally(ll_i2sbi(file_inode(vma->vm_file)),
411                                    LPROC_LL_FAULT,
412                                    ktime_us_delta(ktime_get(), kstart));
413         }
414
415         return result;
416 }
417
418 #ifdef HAVE_VM_OPS_USE_VM_FAULT_ONLY
419 static vm_fault_t ll_page_mkwrite(struct vm_fault *vmf)
420 {
421         struct vm_area_struct *vma = vmf->vma;
422 #else
423 static vm_fault_t ll_page_mkwrite(struct vm_area_struct *vma,
424                                   struct vm_fault *vmf)
425 {
426 #endif
427         int count = 0;
428         bool printed = false;
429         bool retry;
430         bool cached;
431         ktime_t kstart = ktime_get();
432         vm_fault_t result;
433
434         result = pcc_page_mkwrite(vma, vmf, &cached);
435         if (cached)
436                 goto out;
437
438         file_update_time(vma->vm_file);
439         do {
440                 retry = false;
441                 result = ll_page_mkwrite0(vma, vmf->page, &retry);
442
443                 if (!printed && ++count > 16) {
444                         const struct dentry *de = file_dentry(vma->vm_file);
445
446                         CWARN("app(%s): the page %lu of file "DFID" is under heavy contention\n",
447                               current->comm, vmf->pgoff,
448                               PFID(ll_inode2fid(de->d_inode)));
449                         printed = true;
450                 }
451         } while (retry);
452
453         switch (result) {
454         case 0:
455                 LASSERT(PageLocked(vmf->page));
456                 result = VM_FAULT_LOCKED;
457                 break;
458         case -ENODATA:
459         case -EFAULT:
460                 result = VM_FAULT_NOPAGE;
461                 break;
462         case -ENOMEM:
463                 result = VM_FAULT_OOM;
464                 break;
465         case -EAGAIN:
466                 result = VM_FAULT_RETRY;
467                 break;
468         default:
469                 result = VM_FAULT_SIGBUS;
470                 break;
471         }
472
473 out:
474         if (result == VM_FAULT_LOCKED) {
475                 ll_rw_stats_tally(ll_i2sbi(file_inode(vma->vm_file)),
476                                   current->pid, vma->vm_file->private_data,
477                                   cl_offset(NULL, vmf->page->index), PAGE_SIZE,
478                                   WRITE);
479                 ll_stats_ops_tally(ll_i2sbi(file_inode(vma->vm_file)),
480                                    LPROC_LL_MKWRITE,
481                                    ktime_us_delta(ktime_get(), kstart));
482         }
483
484         return result;
485 }
486
487 /**
488  *  To avoid cancel the locks covering mmapped region for lock cache pressure,
489  *  we track the mapped vma count in vvp_object::vob_mmap_cnt.
490  */
491 static void ll_vm_open(struct vm_area_struct * vma)
492 {
493         struct inode *inode    = file_inode(vma->vm_file);
494         struct vvp_object *vob = cl_inode2vvp(inode);
495
496         ENTRY;
497         LASSERT(atomic_read(&vob->vob_mmap_cnt) >= 0);
498         atomic_inc(&vob->vob_mmap_cnt);
499         pcc_vm_open(vma);
500         EXIT;
501 }
502
503 /**
504  * Dual to ll_vm_open().
505  */
506 static void ll_vm_close(struct vm_area_struct *vma)
507 {
508         struct inode      *inode = file_inode(vma->vm_file);
509         struct vvp_object *vob   = cl_inode2vvp(inode);
510
511         ENTRY;
512         atomic_dec(&vob->vob_mmap_cnt);
513         LASSERT(atomic_read(&vob->vob_mmap_cnt) >= 0);
514         pcc_vm_close(vma);
515         EXIT;
516 }
517
518 /* XXX put nice comment here.  talk about __free_pte -> dirty pages and
519  * nopage's reference passing to the pte */
520 int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last)
521 {
522         int rc = -ENOENT;
523         ENTRY;
524
525         LASSERTF(last > first, "last %llu first %llu\n", last, first);
526         if (mapping_mapped(mapping)) {
527                 rc = 0;
528                 unmap_mapping_range(mapping, first + PAGE_SIZE - 1,
529                                     last - first + 1, 0);
530         }
531
532         RETURN(rc);
533 }
534
535 static const struct vm_operations_struct ll_file_vm_ops = {
536         .fault                  = ll_fault,
537         .page_mkwrite           = ll_page_mkwrite,
538         .open                   = ll_vm_open,
539         .close                  = ll_vm_close,
540 };
541
542 int ll_file_mmap(struct file *file, struct vm_area_struct * vma)
543 {
544         struct inode *inode = file_inode(file);
545         ktime_t kstart = ktime_get();
546         bool cached;
547         int rc;
548
549         ENTRY;
550
551         if (ll_file_nolock(file))
552                 RETURN(-EOPNOTSUPP);
553
554         rc = pcc_file_mmap(file, vma, &cached);
555         if (cached && rc != 0)
556                 RETURN(rc);
557
558         rc = generic_file_mmap(file, vma);
559         if (rc == 0) {
560                 vma->vm_ops = &ll_file_vm_ops;
561                 vma->vm_ops->open(vma);
562                 /* update the inode's size and mtime */
563                 if (!cached)
564                         rc = ll_glimpse_size(inode);
565         }
566
567         if (!rc)
568                 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_MMAP,
569                                    ktime_us_delta(ktime_get(), kstart));
570
571         RETURN(rc);
572 }