Whamcloud - gitweb
LU-14677 llite: move env contexts to ll_inode_info level
[fs/lustre-release.git] / lustre / llite / llite_mmap.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2016, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  */
31
32 #include <linux/errno.h>
33 #include <linux/delay.h>
34 #include <linux/kernel.h>
35 #include <linux/mm.h>
36 #include <linux/file.h>
37
38 #define DEBUG_SUBSYSTEM S_LLITE
39
40 #include "llite_internal.h"
41 #include <lustre_compat.h>
42
43 static const struct vm_operations_struct ll_file_vm_ops;
44
45 void policy_from_vma(union ldlm_policy_data *policy, struct vm_area_struct *vma,
46                      unsigned long addr, size_t count)
47 {
48         policy->l_extent.start = ((addr - vma->vm_start) & PAGE_MASK) +
49                                  (vma->vm_pgoff << PAGE_SHIFT);
50         policy->l_extent.end = (policy->l_extent.start + count - 1) |
51                                ~PAGE_MASK;
52 }
53
54 struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr,
55                                size_t count)
56 {
57         struct vm_area_struct *vma, *ret = NULL;
58         ENTRY;
59
60         /* mmap_lock must have been held by caller. */
61         LASSERT(!mmap_write_trylock(mm));
62
63         for (vma = find_vma(mm, addr);
64              vma != NULL && vma->vm_start < (addr + count);
65              vma = vma->vm_next) {
66                 if (vma->vm_ops && vma->vm_ops == &ll_file_vm_ops &&
67                     vma->vm_flags & VM_SHARED) {
68                         ret = vma;
69                         break;
70                 }
71         }
72         RETURN(ret);
73 }
74
75 /**
76  * API independent part for page fault initialization.
77  * \param env - corespondent lu_env to processing
78  * \param vma - virtual memory area addressed to page fault
79  * \param index - page index corespondent to fault.
80  * \param mkwrite - whether it is mmap write.
81  *
82  * \return error codes from cl_io_init.
83  */
84 static struct cl_io *
85 ll_fault_io_init(struct lu_env *env, struct vm_area_struct *vma,
86                 pgoff_t index, bool mkwrite)
87 {
88         struct file            *file = vma->vm_file;
89         struct inode           *inode = file_inode(file);
90         struct cl_io           *io;
91         struct cl_fault_io     *fio;
92         int                     rc;
93         ENTRY;
94
95         if (ll_file_nolock(file))
96                 RETURN(ERR_PTR(-EOPNOTSUPP));
97
98 restart:
99         io = vvp_env_thread_io(env);
100         io->ci_obj = ll_i2info(inode)->lli_clob;
101         LASSERT(io->ci_obj != NULL);
102
103         fio = &io->u.ci_fault;
104         fio->ft_index = index;
105         fio->ft_executable = vma->vm_flags & VM_EXEC;
106
107         if (mkwrite) {
108                 fio->ft_mkwrite = 1;
109                 fio->ft_writable = 1;
110         }
111
112         CDEBUG(D_MMAP,
113                DFID": vma=%p start=%#lx end=%#lx vm_flags=%#lx idx=%lu\n",
114                PFID(&ll_i2info(inode)->lli_fid), vma, vma->vm_start,
115                vma->vm_end, vma->vm_flags, fio->ft_index);
116
117         if (vma->vm_flags & VM_SEQ_READ)
118                 io->ci_seq_read = 1;
119         else if (vma->vm_flags & VM_RAND_READ)
120                 io->ci_rand_read = 1;
121
122         rc = cl_io_init(env, io, CIT_FAULT, io->ci_obj);
123         if (rc == 0) {
124                 struct vvp_io *vio = vvp_env_io(env);
125                 struct ll_file_data *fd = file->private_data;
126
127                 LASSERT(vio->vui_cl.cis_io == io);
128
129                 /* mmap lock must be MANDATORY it has to cache
130                  * pages. */
131                 io->ci_lockreq = CILR_MANDATORY;
132                 vio->vui_fd = fd;
133         } else {
134                 cl_io_fini(env, io);
135                 if (io->ci_need_restart)
136                         goto restart;
137
138                 io = ERR_PTR(rc);
139         }
140
141         RETURN(io);
142 }
143
144 /* Sharing code of page_mkwrite method for rhel5 and rhel6 */
145 static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,
146                             bool *retry)
147 {
148         struct lu_env           *env;
149         struct cl_io            *io;
150         struct vvp_io           *vio;
151         int                      result;
152         __u16                    refcheck;
153         sigset_t old, new;
154         struct inode             *inode = NULL;
155         struct ll_inode_info     *lli;
156         ENTRY;
157
158         LASSERT(vmpage != NULL);
159         env = cl_env_get(&refcheck);
160         if (IS_ERR(env))
161                 RETURN(PTR_ERR(env));
162
163         io = ll_fault_io_init(env, vma, vmpage->index, true);
164         if (IS_ERR(io))
165                 GOTO(out, result = PTR_ERR(io));
166
167         result = io->ci_result;
168         if (result < 0)
169                 GOTO(out_io, result);
170
171         vio = vvp_env_io(env);
172         vio->u.fault.ft_vma    = vma;
173         vio->u.fault.ft_vmpage = vmpage;
174
175         siginitsetinv(&new, sigmask(SIGKILL) | sigmask(SIGTERM));
176         sigprocmask(SIG_BLOCK, &new, &old);
177
178         inode = vvp_object_inode(io->ci_obj);
179         lli = ll_i2info(inode);
180
181         result = cl_io_loop(env, io);
182
183         sigprocmask(SIG_SETMASK, &old, NULL);
184
185         if (result == 0) {
186                 lock_page(vmpage);
187                 if (vmpage->mapping == NULL) {
188                         unlock_page(vmpage);
189
190                         /* page was truncated and lock was cancelled, return
191                          * ENODATA so that VM_FAULT_NOPAGE will be returned
192                          * to handle_mm_fault(). */
193                         if (result == 0)
194                                 result = -ENODATA;
195                 } else if (!PageDirty(vmpage)) {
196                         /* race, the page has been cleaned by ptlrpcd after
197                          * it was unlocked, it has to be added into dirty
198                          * cache again otherwise this soon-to-dirty page won't
199                          * consume any grants, even worse if this page is being
200                          * transferred because it will break RPC checksum.
201                          */
202                         unlock_page(vmpage);
203
204                         CDEBUG(D_MMAP, "Race on page_mkwrite %p/%lu, page has "
205                                "been written out, retry.\n",
206                                vmpage, vmpage->index);
207
208                         *retry = true;
209                         result = -EAGAIN;
210                 }
211
212                 if (result == 0)
213                         set_bit(LLIF_DATA_MODIFIED, &lli->lli_flags);
214         }
215         EXIT;
216
217 out_io:
218         cl_io_fini(env, io);
219 out:
220         cl_env_put(env, &refcheck);
221         CDEBUG(D_MMAP, "%s mkwrite with %d\n", current->comm, result);
222         LASSERT(ergo(result == 0, PageLocked(vmpage)));
223
224         /* if page has been unmapped, presumably due to lock reclaim for
225          * concurrent usage, add some delay before retrying to prevent
226          * entering live-lock situation with competitors
227          */
228         if (result == -ENODATA && inode != NULL) {
229                 CDEBUG(D_MMAP, "delaying new page-fault for inode %p to "
230                                "prevent live-lock\n", inode);
231                 msleep(10);
232         }
233
234         return result;
235 }
236
237 static inline int to_fault_error(int result)
238 {
239         switch(result) {
240         case 0:
241                 result = VM_FAULT_LOCKED;
242                 break;
243         case -ENOMEM:
244                 result = VM_FAULT_OOM;
245                 break;
246         default:
247                 result = VM_FAULT_SIGBUS;
248                 break;
249         }
250         return result;
251 }
252
253 /**
254  * Lustre implementation of a vm_operations_struct::fault() method, called by
255  * VM to server page fault (both in kernel and user space).
256  *
257  * \param vma - is virtiual area struct related to page fault
258  * \param vmf - structure which describe type and address where hit fault
259  *
260  * \return allocated and filled _locked_ page for address
261  * \retval VM_FAULT_ERROR on general error
262  * \retval NOPAGE_OOM not have memory for allocate new page
263  */
264 static vm_fault_t ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf)
265 {
266         struct inode *inode = file_inode(vma->vm_file);
267         struct lu_env           *env;
268         struct cl_io            *io;
269         struct vvp_io           *vio = NULL;
270         struct page             *vmpage;
271         int                      result = 0;
272         int                      fault_ret = 0;
273         __u16                    refcheck;
274         ENTRY;
275
276         env = cl_env_get(&refcheck);
277         if (IS_ERR(env))
278                 RETURN(PTR_ERR(env));
279
280         if (ll_sbi_has_fast_read(ll_i2sbi(inode))) {
281                 /* do fast fault */
282                 bool has_retry = vmf->flags & FAULT_FLAG_RETRY_NOWAIT;
283
284                 /* To avoid loops, instruct downstream to not drop mmap_sem */
285                 vmf->flags |= FAULT_FLAG_RETRY_NOWAIT;
286                 ll_cl_add(inode, env, NULL, LCC_MMAP);
287                 fault_ret = ll_filemap_fault(vma, vmf);
288                 ll_cl_remove(inode, env);
289                 if (!has_retry)
290                         vmf->flags &= ~FAULT_FLAG_RETRY_NOWAIT;
291
292                 /* - If there is no error, then the page was found in cache and
293                  *   uptodate;
294                  * - If VM_FAULT_RETRY is set, the page existed but failed to
295                  *   lock. We will try slow path to avoid loops.
296                  * - Otherwise, it should try normal fault under DLM lock. */
297                 if (!(fault_ret & VM_FAULT_RETRY) &&
298                     !(fault_ret & VM_FAULT_ERROR))
299                         GOTO(out, result = 0);
300
301                 fault_ret = 0;
302         }
303
304         io = ll_fault_io_init(env, vma, vmf->pgoff, false);
305         if (IS_ERR(io))
306                 GOTO(out, result = PTR_ERR(io));
307
308         result = io->ci_result;
309         if (result == 0) {
310                 vio = vvp_env_io(env);
311                 vio->u.fault.ft_vma       = vma;
312                 vio->u.fault.ft_vmpage    = NULL;
313                 vio->u.fault.ft_vmf = vmf;
314                 vio->u.fault.ft_flags = 0;
315                 vio->u.fault.ft_flags_valid = 0;
316
317                 /* May call ll_readpage() */
318                 ll_cl_add(inode, env, io, LCC_MMAP);
319
320                 result = cl_io_loop(env, io);
321
322                 ll_cl_remove(inode, env);
323
324                 /* ft_flags are only valid if we reached
325                  * the call to filemap_fault */
326                 if (vio->u.fault.ft_flags_valid)
327                         fault_ret = vio->u.fault.ft_flags;
328
329                 vmpage = vio->u.fault.ft_vmpage;
330                 if (result != 0 && vmpage != NULL) {
331                         put_page(vmpage);
332                         vmf->page = NULL;
333                 }
334         }
335         cl_io_fini(env, io);
336
337 out:
338         cl_env_put(env, &refcheck);
339         if (result != 0 && !(fault_ret & VM_FAULT_RETRY))
340                 fault_ret |= to_fault_error(result);
341
342         CDEBUG(D_MMAP, "%s fault %d/%d\n", current->comm, fault_ret, result);
343         RETURN(fault_ret);
344 }
345
346 #ifdef HAVE_VM_OPS_USE_VM_FAULT_ONLY
347 static vm_fault_t ll_fault(struct vm_fault *vmf)
348 {
349         struct vm_area_struct *vma = vmf->vma;
350 #else
351 static vm_fault_t ll_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
352 {
353 #endif
354         int count = 0;
355         bool printed = false;
356         bool cached;
357         vm_fault_t result;
358         ktime_t kstart = ktime_get();
359         sigset_t old, new;
360
361         result = pcc_fault(vma, vmf, &cached);
362         if (cached)
363                 goto out;
364
365         CDEBUG(D_MMAP, DFID": vma=%p start=%#lx end=%#lx vm_flags=%#lx\n",
366                PFID(&ll_i2info(file_inode(vma->vm_file))->lli_fid),
367                vma, vma->vm_start, vma->vm_end, vma->vm_flags);
368
369         /* Only SIGKILL and SIGTERM is allowed for fault/nopage/mkwrite
370          * so that it can be killed by admin but not cause segfault by
371          * other signals.
372          */
373         siginitsetinv(&new, sigmask(SIGKILL) | sigmask(SIGTERM));
374         sigprocmask(SIG_BLOCK, &new, &old);
375
376         /* make sure offset is not a negative number */
377         if (vmf->pgoff > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
378                 return VM_FAULT_SIGBUS;
379
380 restart:
381         result = ll_fault0(vma, vmf);
382         if (vmf->page &&
383             !(result & (VM_FAULT_RETRY | VM_FAULT_ERROR | VM_FAULT_LOCKED))) {
384                 struct page *vmpage = vmf->page;
385
386                 /* check if this page has been truncated */
387                 lock_page(vmpage);
388                 if (unlikely(vmpage->mapping == NULL)) { /* unlucky */
389                         unlock_page(vmpage);
390                         put_page(vmpage);
391                         vmf->page = NULL;
392
393                         if (!printed && ++count > 16) {
394                                 CWARN("the page is under heavy contention, maybe your app(%s) needs revising :-)\n",
395                                       current->comm);
396                                 printed = true;
397                         }
398
399                         goto restart;
400                 }
401
402                 result |= VM_FAULT_LOCKED;
403         }
404         sigprocmask(SIG_SETMASK, &old, NULL);
405
406 out:
407         if (vmf->page && result == VM_FAULT_LOCKED) {
408                 ll_rw_stats_tally(ll_i2sbi(file_inode(vma->vm_file)),
409                                   current->pid, vma->vm_file->private_data,
410                                   cl_offset(NULL, vmf->page->index), PAGE_SIZE,
411                                   READ);
412                 ll_stats_ops_tally(ll_i2sbi(file_inode(vma->vm_file)),
413                                    LPROC_LL_FAULT,
414                                    ktime_us_delta(ktime_get(), kstart));
415         }
416
417         return result;
418 }
419
420 #ifdef HAVE_VM_OPS_USE_VM_FAULT_ONLY
421 static vm_fault_t ll_page_mkwrite(struct vm_fault *vmf)
422 {
423         struct vm_area_struct *vma = vmf->vma;
424 #else
425 static vm_fault_t ll_page_mkwrite(struct vm_area_struct *vma,
426                                   struct vm_fault *vmf)
427 {
428 #endif
429         int count = 0;
430         bool printed = false;
431         bool retry;
432         bool cached;
433         ktime_t kstart = ktime_get();
434         vm_fault_t result;
435
436         result = pcc_page_mkwrite(vma, vmf, &cached);
437         if (cached)
438                 goto out;
439
440         file_update_time(vma->vm_file);
441         do {
442                 retry = false;
443                 result = ll_page_mkwrite0(vma, vmf->page, &retry);
444
445                 if (!printed && ++count > 16) {
446                         const struct dentry *de = file_dentry(vma->vm_file);
447
448                         CWARN("app(%s): the page %lu of file "DFID" is under heavy contention\n",
449                               current->comm, vmf->pgoff,
450                               PFID(ll_inode2fid(de->d_inode)));
451                         printed = true;
452                 }
453         } while (retry);
454
455         switch (result) {
456         case 0:
457                 LASSERT(PageLocked(vmf->page));
458                 result = VM_FAULT_LOCKED;
459                 break;
460         case -ENODATA:
461         case -EFAULT:
462                 result = VM_FAULT_NOPAGE;
463                 break;
464         case -ENOMEM:
465                 result = VM_FAULT_OOM;
466                 break;
467         case -EAGAIN:
468                 result = VM_FAULT_RETRY;
469                 break;
470         default:
471                 result = VM_FAULT_SIGBUS;
472                 break;
473         }
474
475 out:
476         if (result == VM_FAULT_LOCKED) {
477                 ll_rw_stats_tally(ll_i2sbi(file_inode(vma->vm_file)),
478                                   current->pid, vma->vm_file->private_data,
479                                   cl_offset(NULL, vmf->page->index), PAGE_SIZE,
480                                   WRITE);
481                 ll_stats_ops_tally(ll_i2sbi(file_inode(vma->vm_file)),
482                                    LPROC_LL_MKWRITE,
483                                    ktime_us_delta(ktime_get(), kstart));
484         }
485
486         return result;
487 }
488
489 /**
490  *  To avoid cancel the locks covering mmapped region for lock cache pressure,
491  *  we track the mapped vma count in vvp_object::vob_mmap_cnt.
492  */
493 static void ll_vm_open(struct vm_area_struct * vma)
494 {
495         struct inode *inode    = file_inode(vma->vm_file);
496         struct vvp_object *vob = cl_inode2vvp(inode);
497
498         ENTRY;
499         LASSERT(atomic_read(&vob->vob_mmap_cnt) >= 0);
500         atomic_inc(&vob->vob_mmap_cnt);
501         pcc_vm_open(vma);
502         EXIT;
503 }
504
505 /**
506  * Dual to ll_vm_open().
507  */
508 static void ll_vm_close(struct vm_area_struct *vma)
509 {
510         struct inode      *inode = file_inode(vma->vm_file);
511         struct vvp_object *vob   = cl_inode2vvp(inode);
512
513         ENTRY;
514         atomic_dec(&vob->vob_mmap_cnt);
515         LASSERT(atomic_read(&vob->vob_mmap_cnt) >= 0);
516         pcc_vm_close(vma);
517         EXIT;
518 }
519
520 static const struct vm_operations_struct ll_file_vm_ops = {
521         .fault                  = ll_fault,
522         .page_mkwrite           = ll_page_mkwrite,
523         .open                   = ll_vm_open,
524         .close                  = ll_vm_close,
525 };
526
527 int ll_file_mmap(struct file *file, struct vm_area_struct * vma)
528 {
529         struct inode *inode = file_inode(file);
530         ktime_t kstart = ktime_get();
531         bool cached;
532         int rc;
533
534         ENTRY;
535         CDEBUG(D_VFSTRACE | D_MMAP,
536                "VFS_Op: fid="DFID" vma=%p start=%#lx end=%#lx vm_flags=%#lx\n",
537                PFID(&ll_i2info(inode)->lli_fid),
538                vma, vma->vm_start, vma->vm_end, vma->vm_flags);
539
540         if (ll_file_nolock(file))
541                 RETURN(-EOPNOTSUPP);
542
543         rc = pcc_file_mmap(file, vma, &cached);
544         if (cached && rc != 0)
545                 RETURN(rc);
546
547         rc = generic_file_mmap(file, vma);
548         if (rc == 0) {
549                 vma->vm_ops = &ll_file_vm_ops;
550                 vma->vm_ops->open(vma);
551                 /* update the inode's size and mtime */
552                 if (!cached)
553                         rc = ll_glimpse_size(inode);
554         }
555
556         if (!rc)
557                 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_MMAP,
558                                    ktime_us_delta(ktime_get(), kstart));
559
560         RETURN(rc);
561 }