Whamcloud - gitweb
LU-13199 lustre: remove cl_{offset,index,page_size} helpers
[fs/lustre-release.git] / lustre / llite / llite_mmap.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2016, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  */
31
32 #include <linux/errno.h>
33 #include <linux/delay.h>
34 #include <linux/kernel.h>
35 #include <linux/mm.h>
36 #include <linux/file.h>
37
38 #define DEBUG_SUBSYSTEM S_LLITE
39
40 #include "llite_internal.h"
41 #include <lustre_compat.h>
42
43 static const struct vm_operations_struct ll_file_vm_ops;
44
45 void policy_from_vma(union ldlm_policy_data *policy, struct vm_area_struct *vma,
46                      unsigned long addr, size_t count)
47 {
48         policy->l_extent.start = ((addr - vma->vm_start) & PAGE_MASK) +
49                                  (vma->vm_pgoff << PAGE_SHIFT);
50         policy->l_extent.end = (policy->l_extent.start + count - 1) |
51                                ~PAGE_MASK;
52 }
53
54 /*
55  * Linux commit v6.0-rc3-225-gf39af05949a4
56  * mm: add VMA iterator
57  */
58 #ifndef VMA_ITERATOR
59 #define vma_iterator vm_area_struct *
60 #define vma_iter_init(vmip, mm, addr) *(vmip) = find_vma(mm, addr)
61 #define for_each_vma(vmi, vma) \
62         for (vma = vmi; vma != NULL; vma = vma->vm_next)
63 #endif
64
65 struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr,
66                                size_t count)
67 {
68         struct vm_area_struct *vma, *ret = NULL;
69         struct vma_iterator vmi;
70
71         ENTRY;
72
73         /* mmap_lock must have been held by caller. */
74         LASSERT(!mmap_write_trylock(mm));
75
76         vma_iter_init(&vmi, mm, addr);
77         for_each_vma(vmi, vma) {
78                 if (vma->vm_start < (addr + count))
79                         break;
80                 if (vma->vm_ops && vma->vm_ops == &ll_file_vm_ops &&
81                     vma->vm_flags & VM_SHARED) {
82                         ret = vma;
83                         break;
84                 }
85         }
86         RETURN(ret);
87 }
88
89 /**
90  * API independent part for page fault initialization.
91  * \param env - corespondent lu_env to processing
92  * \param vma - virtual memory area addressed to page fault
93  * \param index - page index corespondent to fault.
94  * \param mkwrite - whether it is mmap write.
95  *
96  * \return error codes from cl_io_init.
97  */
98 static struct cl_io *
99 ll_fault_io_init(struct lu_env *env, struct vm_area_struct *vma,
100                 pgoff_t index, bool mkwrite)
101 {
102         struct file            *file = vma->vm_file;
103         struct inode           *inode = file_inode(file);
104         struct cl_io           *io;
105         struct cl_fault_io     *fio;
106         int                     rc;
107         ENTRY;
108
109         if (ll_file_nolock(file))
110                 RETURN(ERR_PTR(-EOPNOTSUPP));
111
112 restart:
113         io = vvp_env_thread_io(env);
114         io->ci_obj = ll_i2info(inode)->lli_clob;
115         LASSERT(io->ci_obj != NULL);
116
117         fio = &io->u.ci_fault;
118         fio->ft_index = index;
119         fio->ft_executable = vma->vm_flags & VM_EXEC;
120
121         if (mkwrite) {
122                 fio->ft_mkwrite = 1;
123                 fio->ft_writable = 1;
124         }
125
126         CDEBUG(D_MMAP,
127                DFID": vma=%p start=%#lx end=%#lx vm_flags=%#lx idx=%lu\n",
128                PFID(&ll_i2info(inode)->lli_fid), vma, vma->vm_start,
129                vma->vm_end, vma->vm_flags, fio->ft_index);
130
131         if (vma->vm_flags & VM_SEQ_READ)
132                 io->ci_seq_read = 1;
133         else if (vma->vm_flags & VM_RAND_READ)
134                 io->ci_rand_read = 1;
135
136         rc = cl_io_init(env, io, CIT_FAULT, io->ci_obj);
137         if (rc == 0) {
138                 struct vvp_io *vio = vvp_env_io(env);
139                 struct ll_file_data *fd = file->private_data;
140
141                 LASSERT(vio->vui_cl.cis_io == io);
142
143                 /* mmap lock must be MANDATORY it has to cache
144                  * pages. */
145                 io->ci_lockreq = CILR_MANDATORY;
146                 vio->vui_fd = fd;
147         } else {
148                 cl_io_fini(env, io);
149                 if (io->ci_need_restart)
150                         goto restart;
151
152                 io = ERR_PTR(rc);
153         }
154
155         RETURN(io);
156 }
157
158 /* Sharing code of page_mkwrite method for rhel5 and rhel6 */
159 static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,
160                             bool *retry)
161 {
162         struct lu_env           *env;
163         struct cl_io            *io;
164         struct vvp_io           *vio;
165         int                      result;
166         __u16                    refcheck;
167         sigset_t old, new;
168         struct inode             *inode = NULL;
169         struct ll_inode_info     *lli;
170         ENTRY;
171
172         LASSERT(vmpage != NULL);
173         env = cl_env_get(&refcheck);
174         if (IS_ERR(env))
175                 RETURN(PTR_ERR(env));
176
177         io = ll_fault_io_init(env, vma, vmpage->index, true);
178         if (IS_ERR(io))
179                 GOTO(out, result = PTR_ERR(io));
180
181         result = io->ci_result;
182         if (result < 0)
183                 GOTO(out_io, result);
184
185         vio = vvp_env_io(env);
186         vio->u.fault.ft_vma    = vma;
187         vio->u.fault.ft_vmpage = vmpage;
188
189         siginitsetinv(&new, sigmask(SIGKILL) | sigmask(SIGTERM));
190         sigprocmask(SIG_BLOCK, &new, &old);
191
192         inode = vvp_object_inode(io->ci_obj);
193         lli = ll_i2info(inode);
194
195         result = cl_io_loop(env, io);
196
197         sigprocmask(SIG_SETMASK, &old, NULL);
198
199         if (result == 0) {
200                 lock_page(vmpage);
201                 if (vmpage->mapping == NULL) {
202                         unlock_page(vmpage);
203
204                         /* page was truncated and lock was cancelled, return
205                          * ENODATA so that VM_FAULT_NOPAGE will be returned
206                          * to handle_mm_fault(). */
207                         if (result == 0)
208                                 result = -ENODATA;
209                 } else if (!PageDirty(vmpage)) {
210                         /* race, the page has been cleaned by ptlrpcd after
211                          * it was unlocked, it has to be added into dirty
212                          * cache again otherwise this soon-to-dirty page won't
213                          * consume any grants, even worse if this page is being
214                          * transferred because it will break RPC checksum.
215                          */
216                         unlock_page(vmpage);
217
218                         CDEBUG(D_MMAP, "Race on page_mkwrite %p/%lu, page has "
219                                "been written out, retry.\n",
220                                vmpage, vmpage->index);
221
222                         *retry = true;
223                         result = -EAGAIN;
224                 }
225
226                 if (result == 0)
227                         set_bit(LLIF_DATA_MODIFIED, &lli->lli_flags);
228         }
229         EXIT;
230
231 out_io:
232         cl_io_fini(env, io);
233 out:
234         cl_env_put(env, &refcheck);
235         CDEBUG(D_MMAP, "%s mkwrite with %d\n", current->comm, result);
236         LASSERT(ergo(result == 0, PageLocked(vmpage)));
237
238         /* if page has been unmapped, presumably due to lock reclaim for
239          * concurrent usage, add some delay before retrying to prevent
240          * entering live-lock situation with competitors
241          */
242         if (result == -ENODATA && inode != NULL) {
243                 CDEBUG(D_MMAP, "delaying new page-fault for inode %p to "
244                                "prevent live-lock\n", inode);
245                 msleep(10);
246         }
247
248         return result;
249 }
250
251 static inline int to_fault_error(int result)
252 {
253         switch(result) {
254         case 0:
255                 result = VM_FAULT_LOCKED;
256                 break;
257         case -ENOMEM:
258                 result = VM_FAULT_OOM;
259                 break;
260         default:
261                 result = VM_FAULT_SIGBUS;
262                 break;
263         }
264         return result;
265 }
266
267 int ll_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
268 {
269         struct inode *inode = file_inode(vma->vm_file);
270         int ret;
271         unsigned int seq;
272
273         /* this seqlock lets us notice if a page has been deleted on this inode
274          * during the fault process, allowing us to catch an erroneous SIGBUS
275          * See LU-16160
276          */
277         do {
278                 seq = read_seqbegin(&ll_i2info(inode)->lli_page_inv_lock);
279                 ret = __ll_filemap_fault(vma, vmf);
280         } while (read_seqretry(&ll_i2info(inode)->lli_page_inv_lock, seq) &&
281                  (ret & VM_FAULT_SIGBUS));
282
283         return ret;
284 }
285
286 /**
287  * Lustre implementation of a vm_operations_struct::fault() method, called by
288  * VM to server page fault (both in kernel and user space).
289  *
290  * \param vma - is virtiual area struct related to page fault
291  * \param vmf - structure which describe type and address where hit fault
292  *
293  * \return allocated and filled _locked_ page for address
294  * \retval VM_FAULT_ERROR on general error
295  * \retval NOPAGE_OOM not have memory for allocate new page
296  */
297 static vm_fault_t ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf)
298 {
299         struct inode *inode = file_inode(vma->vm_file);
300         struct lu_env           *env;
301         struct cl_io            *io;
302         struct vvp_io           *vio = NULL;
303         struct page             *vmpage;
304         int                      result = 0;
305         int                      fault_ret = 0;
306         __u16                    refcheck;
307         ENTRY;
308
309         env = cl_env_get(&refcheck);
310         if (IS_ERR(env))
311                 RETURN(PTR_ERR(env));
312
313         if (ll_sbi_has_fast_read(ll_i2sbi(inode))) {
314                 /* do fast fault */
315                 bool allow_retry = vmf->flags & FAULT_FLAG_ALLOW_RETRY;
316                 bool has_retry = vmf->flags & FAULT_FLAG_RETRY_NOWAIT;
317
318                 /* To avoid loops, instruct downstream to not drop mmap_sem */
319                 /**
320                  * only need FAULT_FLAG_ALLOW_RETRY prior to Linux 5.1
321                  * (6b4c9f4469819), where FAULT_FLAG_RETRY_NOWAIT is enough
322                  * to not drop mmap_sem when failed to lock the page.
323                  */
324                 vmf->flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT;
325                 ll_cl_add(inode, env, NULL, LCC_MMAP);
326                 fault_ret = ll_filemap_fault(vma, vmf);
327                 ll_cl_remove(inode, env);
328                 if (!has_retry)
329                         vmf->flags &= ~FAULT_FLAG_RETRY_NOWAIT;
330                 if (!allow_retry)
331                         vmf->flags &= ~FAULT_FLAG_ALLOW_RETRY;
332
333                 /* - If there is no error, then the page was found in cache and
334                  *   uptodate;
335                  * - If VM_FAULT_RETRY is set, the page existed but failed to
336                  *   lock. We will try slow path to avoid loops.
337                  * - Otherwise, it should try normal fault under DLM lock. */
338                 if (!(fault_ret & VM_FAULT_RETRY) &&
339                     !(fault_ret & VM_FAULT_ERROR))
340                         GOTO(out, result = 0);
341
342                 fault_ret = 0;
343         }
344
345         io = ll_fault_io_init(env, vma, vmf->pgoff, false);
346         if (IS_ERR(io))
347                 GOTO(out, result = PTR_ERR(io));
348
349         result = io->ci_result;
350         if (result == 0) {
351                 vio = vvp_env_io(env);
352                 vio->u.fault.ft_vma       = vma;
353                 vio->u.fault.ft_vmpage    = NULL;
354                 vio->u.fault.ft_vmf = vmf;
355                 vio->u.fault.ft_flags = 0;
356                 vio->u.fault.ft_flags_valid = 0;
357
358                 /* May call ll_readpage() */
359                 ll_cl_add(inode, env, io, LCC_MMAP);
360
361                 result = cl_io_loop(env, io);
362
363                 ll_cl_remove(inode, env);
364
365                 /* ft_flags are only valid if we reached
366                  * the call to filemap_fault */
367                 if (vio->u.fault.ft_flags_valid)
368                         fault_ret = vio->u.fault.ft_flags;
369
370                 vmpage = vio->u.fault.ft_vmpage;
371                 if (result != 0 && vmpage != NULL) {
372                         put_page(vmpage);
373                         vmf->page = NULL;
374                 }
375         }
376         cl_io_fini(env, io);
377
378 out:
379         cl_env_put(env, &refcheck);
380         if (result != 0 && !(fault_ret & VM_FAULT_RETRY))
381                 fault_ret |= to_fault_error(result);
382
383         CDEBUG(D_MMAP, "%s fault %d/%d\n", current->comm, fault_ret, result);
384         RETURN(fault_ret);
385 }
386
387 #ifdef HAVE_VM_OPS_USE_VM_FAULT_ONLY
388 static vm_fault_t ll_fault(struct vm_fault *vmf)
389 {
390         struct vm_area_struct *vma = vmf->vma;
391 #else
392 static vm_fault_t ll_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
393 {
394 #endif
395         int count = 0;
396         bool printed = false;
397         bool cached;
398         vm_fault_t result;
399         ktime_t kstart = ktime_get();
400         sigset_t old, new;
401
402         result = pcc_fault(vma, vmf, &cached);
403         if (cached)
404                 goto out;
405
406         CDEBUG(D_MMAP|D_IOTRACE,
407                DFID": vma=%p start=%#lx end=%#lx vm_flags=%#lx idx=%lu\n",
408                PFID(&ll_i2info(file_inode(vma->vm_file))->lli_fid),
409                vma, vma->vm_start, vma->vm_end, vma->vm_flags, vmf->pgoff);
410
411         /* Only SIGKILL and SIGTERM is allowed for fault/nopage/mkwrite
412          * so that it can be killed by admin but not cause segfault by
413          * other signals.
414          */
415         siginitsetinv(&new, sigmask(SIGKILL) | sigmask(SIGTERM));
416         sigprocmask(SIG_BLOCK, &new, &old);
417
418         /* make sure offset is not a negative number */
419         if (vmf->pgoff > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
420                 return VM_FAULT_SIGBUS;
421
422 restart:
423         result = ll_fault0(vma, vmf);
424         if (vmf->page &&
425             !(result & (VM_FAULT_RETRY | VM_FAULT_ERROR | VM_FAULT_LOCKED))) {
426                 struct page *vmpage = vmf->page;
427
428                 /* lock the page, then check if this page has been truncated
429                  * or deleted from Lustre and retry if so
430                  */
431                 lock_page(vmpage);
432                 if (unlikely(vmpage->mapping == NULL) ||
433                     vmpage->private == 0) { /* unlucky */
434                         unlock_page(vmpage);
435                         put_page(vmpage);
436                         vmf->page = NULL;
437
438                         if (!printed && ++count > 16) {
439                                 struct inode *inode = file_inode(vma->vm_file);
440
441                                 CWARN("%s: FID "DFID" under heavy mmap contention by '%s', consider revising IO pattern\n",
442                                       ll_i2sbi(inode)->ll_fsname,
443                                       PFID(&ll_i2info(inode)->lli_fid),
444                                       current->comm);
445                                 printed = true;
446                         }
447
448                         goto restart;
449                 }
450
451                 result |= VM_FAULT_LOCKED;
452         }
453         sigprocmask(SIG_SETMASK, &old, NULL);
454
455 out:
456         if (vmf->page && result == VM_FAULT_LOCKED) {
457                 ll_rw_stats_tally(ll_i2sbi(file_inode(vma->vm_file)),
458                                   current->pid, vma->vm_file->private_data,
459                                   vmf->page->index << PAGE_SHIFT, PAGE_SIZE,
460                                   READ);
461                 ll_stats_ops_tally(ll_i2sbi(file_inode(vma->vm_file)),
462                                    LPROC_LL_FAULT,
463                                    ktime_us_delta(ktime_get(), kstart));
464         }
465
466         CDEBUG(D_IOTRACE,
467                "COMPLETED: "DFID": vma=%p start=%#lx end=%#lx vm_flags=%#lx idx=%lu\n",
468                PFID(&ll_i2info(file_inode(vma->vm_file))->lli_fid),
469                vma, vma->vm_start, vma->vm_end, vma->vm_flags, vmf->pgoff);
470
471         return result;
472 }
473
474 #ifdef HAVE_VM_OPS_USE_VM_FAULT_ONLY
475 static vm_fault_t ll_page_mkwrite(struct vm_fault *vmf)
476 {
477         struct vm_area_struct *vma = vmf->vma;
478 #else
479 static vm_fault_t ll_page_mkwrite(struct vm_area_struct *vma,
480                                   struct vm_fault *vmf)
481 {
482 #endif
483         int count = 0;
484         bool printed = false;
485         bool retry;
486         bool cached;
487         ktime_t kstart = ktime_get();
488         vm_fault_t result;
489
490         CDEBUG(D_MMAP|D_IOTRACE,
491                DFID": vma=%p start=%#lx end=%#lx vm_flags=%#lx idx=%lu\n",
492                PFID(&ll_i2info(file_inode(vma->vm_file))->lli_fid),
493                vma, vma->vm_start, vma->vm_end, vma->vm_flags,
494                vmf->page->index);
495
496         result = pcc_page_mkwrite(vma, vmf, &cached);
497         if (cached)
498                 goto out;
499
500         file_update_time(vma->vm_file);
501         do {
502                 retry = false;
503                 result = ll_page_mkwrite0(vma, vmf->page, &retry);
504
505                 if (!printed && ++count > 16) {
506                         const struct dentry *de = file_dentry(vma->vm_file);
507
508                         CWARN("app(%s): the page %lu of file "DFID" is under heavy contention\n",
509                               current->comm, vmf->pgoff,
510                               PFID(ll_inode2fid(de->d_inode)));
511                         printed = true;
512                 }
513         } while (retry);
514
515         switch (result) {
516         case 0:
517                 LASSERT(PageLocked(vmf->page));
518                 result = VM_FAULT_LOCKED;
519                 break;
520         case -ENODATA:
521         case -EFAULT:
522                 result = VM_FAULT_NOPAGE;
523                 break;
524         case -ENOMEM:
525                 result = VM_FAULT_OOM;
526                 break;
527         case -EAGAIN:
528                 result = VM_FAULT_RETRY;
529                 break;
530         default:
531                 result = VM_FAULT_SIGBUS;
532                 break;
533         }
534
535 out:
536         if (result == VM_FAULT_LOCKED) {
537                 ll_rw_stats_tally(ll_i2sbi(file_inode(vma->vm_file)),
538                                   current->pid, vma->vm_file->private_data,
539                                   vmf->page->index << PAGE_SHIFT, PAGE_SIZE,
540                                   WRITE);
541                 ll_stats_ops_tally(ll_i2sbi(file_inode(vma->vm_file)),
542                                    LPROC_LL_MKWRITE,
543                                    ktime_us_delta(ktime_get(), kstart));
544         }
545
546         CDEBUG(D_IOTRACE,
547                "COMPLETED: "DFID": vma=%p start=%#lx end=%#lx vm_flags=%#lx idx=%lu\n",
548                PFID(&ll_i2info(file_inode(vma->vm_file))->lli_fid),
549                vma, vma->vm_start, vma->vm_end, vma->vm_flags,
550                vmf->page->index);
551         return result;
552 }
553
554 /**
555  *  To avoid cancel the locks covering mmapped region for lock cache pressure,
556  *  we track the mapped vma count in vvp_object::vob_mmap_cnt.
557  */
558 static void ll_vm_open(struct vm_area_struct * vma)
559 {
560         struct inode *inode    = file_inode(vma->vm_file);
561         struct vvp_object *vob = cl_inode2vvp(inode);
562
563         ENTRY;
564         LASSERT(atomic_read(&vob->vob_mmap_cnt) >= 0);
565         atomic_inc(&vob->vob_mmap_cnt);
566         pcc_vm_open(vma);
567         EXIT;
568 }
569
570 /**
571  * Dual to ll_vm_open().
572  */
573 static void ll_vm_close(struct vm_area_struct *vma)
574 {
575         struct inode      *inode = file_inode(vma->vm_file);
576         struct vvp_object *vob   = cl_inode2vvp(inode);
577
578         ENTRY;
579         atomic_dec(&vob->vob_mmap_cnt);
580         LASSERT(atomic_read(&vob->vob_mmap_cnt) >= 0);
581         pcc_vm_close(vma);
582         EXIT;
583 }
584
585 static const struct vm_operations_struct ll_file_vm_ops = {
586         .fault                  = ll_fault,
587         .page_mkwrite           = ll_page_mkwrite,
588         .open                   = ll_vm_open,
589         .close                  = ll_vm_close,
590 };
591
592 int ll_file_mmap(struct file *file, struct vm_area_struct * vma)
593 {
594         struct inode *inode = file_inode(file);
595         ktime_t kstart = ktime_get();
596         bool cached;
597         int rc;
598
599         ENTRY;
600         CDEBUG(D_VFSTRACE | D_MMAP,
601                "VFS_Op: fid="DFID" vma=%p start=%#lx end=%#lx vm_flags=%#lx\n",
602                PFID(&ll_i2info(inode)->lli_fid),
603                vma, vma->vm_start, vma->vm_end, vma->vm_flags);
604
605         if (ll_file_nolock(file))
606                 RETURN(-EOPNOTSUPP);
607
608         rc = pcc_file_mmap(file, vma, &cached);
609         if (cached && rc != 0)
610                 RETURN(rc);
611
612         rc = generic_file_mmap(file, vma);
613         if (rc == 0) {
614                 vma->vm_ops = &ll_file_vm_ops;
615                 vma->vm_ops->open(vma);
616                 /* update the inode's size and mtime */
617                 if (!cached)
618                         rc = ll_glimpse_size(inode);
619         }
620
621         if (!rc)
622                 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_MMAP,
623                                    ktime_us_delta(ktime_get(), kstart));
624
625         RETURN(rc);
626 }