Whamcloud - gitweb
LU-14989 sec: keep encryption context in xattr cache
[fs/lustre-release.git] / lustre / llite / rw.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * lustre/llite/rw.c
32  *
33  * Lustre Lite I/O page cache routines shared by different kernel revs
34  */
35
36 #include <linux/kernel.h>
37 #include <linux/mm.h>
38 #include <linux/string.h>
39 #include <linux/stat.h>
40 #include <linux/errno.h>
41 #include <linux/unistd.h>
42 #include <linux/writeback.h>
43 #include <asm/uaccess.h>
44
45 #include <linux/fs.h>
46 #include <linux/file.h>
47 #include <linux/stat.h>
48 #include <asm/uaccess.h>
49 #include <linux/mm.h>
50 #include <linux/pagemap.h>
51 /* current_is_kswapd() */
52 #include <linux/swap.h>
53 #include <linux/task_io_accounting_ops.h>
54
55 #define DEBUG_SUBSYSTEM S_LLITE
56
57 #include <obd_cksum.h>
58 #include "llite_internal.h"
59 #include <lustre_compat.h>
60
61 static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which);
62
63 /**
64  * Get readahead pages from the filesystem readahead pool of the client for a
65  * thread.
66  *
67  * /param sbi superblock for filesystem readahead state ll_ra_info
68  * /param ria per-thread readahead state
69  * /param pages number of pages requested for readahead for the thread.
70  *
71  * WARNING: This algorithm is used to reduce contention on sbi->ll_lock.
72  * It should work well if the ra_max_pages is much greater than the single
73  * file's read-ahead window, and not too many threads contending for
74  * these readahead pages.
75  *
76  * TODO: There may be a 'global sync problem' if many threads are trying
77  * to get an ra budget that is larger than the remaining readahead pages
78  * and reach here at exactly the same time. They will compute /a ret to
79  * consume the remaining pages, but will fail at atomic_add_return() and
80  * get a zero ra window, although there is still ra space remaining. - Jay */
81
82 static unsigned long ll_ra_count_get(struct ll_sb_info *sbi,
83                                      struct ra_io_arg *ria,
84                                      unsigned long pages,
85                                      unsigned long pages_min)
86 {
87         struct ll_ra_info *ra = &sbi->ll_ra_info;
88         long ret;
89
90         ENTRY;
91
92         WARN_ON_ONCE(pages_min > pages);
93         /**
94          * Don't try readahead aggresively if we are limited
95          * LRU pages, otherwise, it could cause deadlock.
96          */
97         pages = min(sbi->ll_cache->ccc_lru_max >> 2, pages);
98         /**
99          * if this happen, we reserve more pages than needed,
100          * this will make us leak @ra_cur_pages, because
101          * ll_ra_count_put() acutally freed @pages.
102          */
103         if (unlikely(pages_min > pages))
104                 pages_min = pages;
105
106         /*
107          * If read-ahead pages left are less than 1M, do not do read-ahead,
108          * otherwise it will form small read RPC(< 1M), which hurt server
109          * performance a lot.
110          */
111         ret = min(ra->ra_max_pages - atomic_read(&ra->ra_cur_pages),
112                   pages);
113         if (ret < 0 || ret < min_t(long, PTLRPC_MAX_BRW_PAGES, pages))
114                 GOTO(out, ret = 0);
115
116         if (atomic_add_return(ret, &ra->ra_cur_pages) > ra->ra_max_pages) {
117                 atomic_sub(ret, &ra->ra_cur_pages);
118                 ret = 0;
119         }
120
121 out:
122         if (ret < pages_min) {
123                 /* override ra limit for maximum performance */
124                 atomic_add(pages_min - ret, &ra->ra_cur_pages);
125                 ret = pages_min;
126         }
127         RETURN(ret);
128 }
129
130 void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long pages)
131 {
132         struct ll_ra_info *ra = &sbi->ll_ra_info;
133         atomic_sub(pages, &ra->ra_cur_pages);
134 }
135
136 static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which)
137 {
138         LASSERTF(which < _NR_RA_STAT, "which: %u\n", which);
139         lprocfs_counter_incr(sbi->ll_ra_stats, which);
140 }
141
142 static inline bool ll_readahead_enabled(struct ll_sb_info *sbi)
143 {
144         return sbi->ll_ra_info.ra_max_pages_per_file > 0 &&
145                 sbi->ll_ra_info.ra_max_pages > 0;
146 }
147
148 void ll_ra_stats_inc(struct inode *inode, enum ra_stat which)
149 {
150         struct ll_sb_info *sbi = ll_i2sbi(inode);
151         ll_ra_stats_inc_sbi(sbi, which);
152 }
153
154 #define RAS_CDEBUG(ras) \
155         CDEBUG(D_READA,                                                      \
156                "lre %llu cr %lu cb %llu wsi %lu wp %lu nra %lu rpc %lu "     \
157                "r %lu csr %lu so %llu sb %llu sl %llu lr %lu\n",             \
158                ras->ras_last_read_end_bytes, ras->ras_consecutive_requests,  \
159                ras->ras_consecutive_bytes, ras->ras_window_start_idx,        \
160                ras->ras_window_pages, ras->ras_next_readahead_idx,           \
161                ras->ras_rpc_pages, ras->ras_requests,                        \
162                ras->ras_consecutive_stride_requests, ras->ras_stride_offset, \
163                ras->ras_stride_bytes, ras->ras_stride_length,                \
164                ras->ras_async_last_readpage_idx)
165
166 static bool pos_in_window(loff_t pos, loff_t point,
167                           unsigned long before, unsigned long after)
168 {
169         loff_t start = point - before;
170         loff_t end = point + after;
171
172         if (start > point)
173                 start = 0;
174         if (end < point)
175                 end = ~0;
176
177         return start <= pos && pos <= end;
178 }
179
180 enum ll_ra_page_hint {
181         MAYNEED = 0, /* this page possibly accessed soon */
182         WILLNEED /* this page is gurateed to be needed */
183 };
184
185 /**
186  * Initiates read-ahead of a page with given index.
187  *
188  * \retval +ve: page was already uptodate so it will be skipped
189  *              from being added;
190  * \retval -ve: page wasn't added to \a queue for error;
191  * \retval   0: page was added into \a queue for read ahead.
192  */
193 static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io,
194                               struct cl_page_list *queue, pgoff_t index,
195                               enum ll_ra_page_hint hint)
196 {
197         struct cl_object *clob  = io->ci_obj;
198         struct inode     *inode = vvp_object_inode(clob);
199         struct page      *vmpage = NULL;
200         struct cl_page   *page;
201         struct vvp_page  *vpg;
202         enum ra_stat      which = _NR_RA_STAT; /* keep gcc happy */
203         int               rc    = 0;
204         const char       *msg   = NULL;
205
206         ENTRY;
207
208         switch (hint) {
209         case MAYNEED:
210                 vmpage = grab_cache_page_nowait(inode->i_mapping, index);
211                 if (vmpage == NULL) {
212                         which = RA_STAT_FAILED_GRAB_PAGE;
213                         msg   = "g_c_p_n failed";
214                         GOTO(out, rc = -EBUSY);
215                 }
216                 break;
217         case WILLNEED:
218                 vmpage = find_or_create_page(inode->i_mapping, index,
219                                              GFP_NOFS);
220                 if (vmpage == NULL)
221                         GOTO(out, rc = -ENOMEM);
222                 break;
223         default:
224                 /* should not come here */
225                 GOTO(out, rc = -EINVAL);
226         }
227  
228         /* Check if vmpage was truncated or reclaimed */
229         if (vmpage->mapping != inode->i_mapping) {
230                 which = RA_STAT_WRONG_GRAB_PAGE;
231                 msg   = "g_c_p_n returned invalid page";
232                 GOTO(out, rc = -EBUSY);
233         }
234
235         page = cl_page_find(env, clob, vmpage->index, vmpage, CPT_CACHEABLE);
236         if (IS_ERR(page)) {
237                 which = RA_STAT_FAILED_GRAB_PAGE;
238                 msg   = "cl_page_find failed";
239                 GOTO(out, rc = PTR_ERR(page));
240         }
241
242         lu_ref_add(&page->cp_reference, "ra", current);
243         cl_page_assume(env, io, page);
244         vpg = cl2vvp_page(cl_object_page_slice(clob, page));
245         if (!vpg->vpg_defer_uptodate && !PageUptodate(vmpage)) {
246                 if (hint == MAYNEED) {
247                         vpg->vpg_defer_uptodate = 1;
248                         vpg->vpg_ra_used = 0;
249                 }
250                 cl_page_list_add(queue, page, true);
251         } else {
252                 /* skip completed pages */
253                 cl_page_unassume(env, io, page);
254                 /* This page is already uptodate, returning a positive number
255                  * to tell the callers about this */
256                 rc = 1;
257         }
258
259         lu_ref_del(&page->cp_reference, "ra", current);
260         cl_page_put(env, page);
261
262 out:
263         if (vmpage != NULL) {
264                 if (rc != 0)
265                         unlock_page(vmpage);
266                 put_page(vmpage);
267         }
268         if (msg != NULL && hint == MAYNEED) {
269                 ll_ra_stats_inc(inode, which);
270                 CDEBUG(D_READA, "%s\n", msg);
271
272         }
273
274         RETURN(rc);
275 }
276
277 #define RIA_DEBUG(ria)                                                  \
278         CDEBUG(D_READA, "rs %lu re %lu ro %llu rl %llu rb %llu\n",      \
279                ria->ria_start_idx, ria->ria_end_idx, ria->ria_stoff,    \
280                ria->ria_length, ria->ria_bytes)
281
282 static inline int stride_io_mode(struct ll_readahead_state *ras)
283 {
284         return ras->ras_consecutive_stride_requests > 1;
285 }
286
287 /* The function calculates how many bytes will be read in
288  * [off, off + length], in such stride IO area,
289  * stride_offset = st_off, stride_lengh = st_len,
290  * stride_bytes = st_bytes
291  *
292  *   |------------------|*****|------------------|*****|------------|*****|....
293  * st_off
294  *   |--- st_bytes     ---|
295  *   |-----     st_len   -----|
296  *
297  *              How many bytes it should read in such pattern
298  *              |-------------------------------------------------------------|
299  *              off
300  *              |<------                  length                      ------->|
301  *
302  *          =   |<----->|  +  |-------------------------------------| +   |---|
303  *             start_left                 st_bytes * i                 end_left
304  */
305 static loff_t stride_byte_count(loff_t st_off, loff_t st_len, loff_t st_bytes,
306                                 loff_t off, loff_t length)
307 {
308         u64 start = off > st_off ? off - st_off : 0;
309         u64 end = off + length > st_off ? off + length - st_off : 0;
310         u64 start_left;
311         u64 end_left;
312         u64 bytes_count;
313
314         if (st_len == 0 || length == 0 || end == 0)
315                 return length;
316
317         start = div64_u64_rem(start, st_len, &start_left);
318         if (start_left < st_bytes)
319                 start_left = st_bytes - start_left;
320         else
321                 start_left = 0;
322
323         end = div64_u64_rem(end, st_len, &end_left);
324         if (end_left > st_bytes)
325                 end_left = st_bytes;
326
327         CDEBUG(D_READA, "start %llu, end %llu start_left %llu end_left %llu\n",
328                start, end, start_left, end_left);
329
330         if (start == end)
331                 bytes_count = end_left - (st_bytes - start_left);
332         else
333                 bytes_count = start_left +
334                         st_bytes * (end - start - 1) + end_left;
335
336         CDEBUG(D_READA,
337                "st_off %llu, st_len %llu st_bytes %llu off %llu length %llu bytescount %llu\n",
338                st_off, st_len, st_bytes, off, length, bytes_count);
339
340         return bytes_count;
341 }
342
343 static unsigned long ria_page_count(struct ra_io_arg *ria)
344 {
345         loff_t length_bytes = ria->ria_end_idx >= ria->ria_start_idx ?
346                 (loff_t)(ria->ria_end_idx -
347                          ria->ria_start_idx + 1) << PAGE_SHIFT : 0;
348         loff_t bytes_count;
349
350         if (ria->ria_length > ria->ria_bytes && ria->ria_bytes &&
351             (ria->ria_length & ~PAGE_MASK || ria->ria_bytes & ~PAGE_MASK ||
352              ria->ria_stoff & ~PAGE_MASK)) {
353                 /* Over-estimate un-aligned page stride read */
354                 unsigned long pg_count = ((ria->ria_bytes +
355                                            PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
356                 pg_count *= length_bytes / ria->ria_length + 1;
357
358                 return pg_count;
359         }
360         bytes_count = stride_byte_count(ria->ria_stoff, ria->ria_length,
361                                         ria->ria_bytes,
362                                         (loff_t)ria->ria_start_idx<<PAGE_SHIFT,
363                                         length_bytes);
364         return (bytes_count + PAGE_SIZE - 1) >> PAGE_SHIFT;
365 }
366
367 static pgoff_t ras_align(struct ll_readahead_state *ras, pgoff_t index)
368 {
369         unsigned opt_size = min(ras->ras_window_pages, ras->ras_rpc_pages);
370
371         if (opt_size == 0)
372                 opt_size = 1;
373         return index - (index % opt_size);
374 }
375
376 /* Check whether the index is in the defined ra-window */
377 static bool ras_inside_ra_window(pgoff_t idx, struct ra_io_arg *ria)
378 {
379         loff_t pos = (loff_t)idx << PAGE_SHIFT;
380
381         /* If ria_length == ria_bytes, it means non-stride I/O mode,
382          * idx should always inside read-ahead window in this case
383          * For stride I/O mode, just check whether the idx is inside
384          * the ria_bytes.
385          */
386         if (ria->ria_length == 0 || ria->ria_length == ria->ria_bytes)
387                 return true;
388
389         if (pos >= ria->ria_stoff) {
390                 u64 offset;
391
392                 div64_u64_rem(pos - ria->ria_stoff, ria->ria_length, &offset);
393
394                 if (offset < ria->ria_bytes ||
395                     (ria->ria_length - offset) < PAGE_SIZE)
396                         return true;
397         } else if (pos + PAGE_SIZE > ria->ria_stoff) {
398                 return true;
399         }
400
401         return false;
402 }
403
404 static unsigned long
405 ll_read_ahead_pages(const struct lu_env *env, struct cl_io *io,
406                     struct cl_page_list *queue, struct ll_readahead_state *ras,
407                     struct ra_io_arg *ria, pgoff_t *ra_end, pgoff_t skip_index)
408 {
409         struct cl_read_ahead ra = { 0 };
410         /* busy page count is per stride */
411         int rc = 0, count = 0, busy_page_count = 0;
412         pgoff_t page_idx;
413
414         LASSERT(ria != NULL);
415         RIA_DEBUG(ria);
416
417         for (page_idx = ria->ria_start_idx;
418              page_idx <= ria->ria_end_idx && ria->ria_reserved > 0;
419              page_idx++) {
420                 if (skip_index && page_idx == skip_index)
421                         continue;
422                 if (ras_inside_ra_window(page_idx, ria)) {
423                         if (ra.cra_end_idx == 0 || ra.cra_end_idx < page_idx) {
424                                 pgoff_t end_idx;
425
426                                 /*
427                                  * Do not shrink ria_end_idx at any case until
428                                  * the minimum end of current read is covered.
429                                  *
430                                  * Do not extend read lock accross stripe if
431                                  * lock contention detected.
432                                  */
433                                 if (ra.cra_contention &&
434                                     page_idx > ria->ria_end_idx_min) {
435                                         ria->ria_end_idx = *ra_end;
436                                         break;
437                                 }
438
439                                 cl_read_ahead_release(env, &ra);
440
441                                 rc = cl_io_read_ahead(env, io, page_idx, &ra);
442                                 if (rc < 0)
443                                         break;
444
445                                  /*
446                                   * Only shrink ria_end_idx if the matched
447                                   * LDLM lock doesn't cover more.
448                                   */
449                                 if (page_idx > ra.cra_end_idx) {
450                                         ria->ria_end_idx = ra.cra_end_idx;
451                                         break;
452                                 }
453
454                                 CDEBUG(D_READA, "idx: %lu, ra: %lu, rpc: %lu\n",
455                                        page_idx, ra.cra_end_idx,
456                                        ra.cra_rpc_pages);
457                                 LASSERTF(ra.cra_end_idx >= page_idx,
458                                          "object: %p, indcies %lu / %lu\n",
459                                          io->ci_obj, ra.cra_end_idx, page_idx);
460                                 /* update read ahead RPC size.
461                                  * NB: it's racy but doesn't matter */
462                                 if (ras->ras_rpc_pages != ra.cra_rpc_pages &&
463                                     ra.cra_rpc_pages > 0)
464                                         ras->ras_rpc_pages = ra.cra_rpc_pages;
465                                 if (!skip_index) {
466                                         /* trim it to align with optimal RPC size */
467                                         end_idx = ras_align(ras, ria->ria_end_idx + 1);
468                                         if (end_idx > 0 && !ria->ria_eof)
469                                                 ria->ria_end_idx = end_idx - 1;
470                                 }
471                                 if (ria->ria_end_idx < ria->ria_end_idx_min)
472                                         ria->ria_end_idx = ria->ria_end_idx_min;
473                         }
474                         if (page_idx > ria->ria_end_idx)
475                                 break;
476
477                         /* If the page is inside the read-ahead window */
478                         rc = ll_read_ahead_page(env, io, queue, page_idx,
479                                                 MAYNEED);
480                         if (rc < 0 && rc != -EBUSY)
481                                 break;
482                         if (rc == -EBUSY) {
483                                 busy_page_count++;
484                                 CDEBUG(D_READA,
485                                        "skip busy page: %lu\n", page_idx);
486                                 /* For page unaligned readahead the first
487                                  * last pages of each region can be read by
488                                  * another reader on the same node, and so
489                                  * may be busy. So only stop for > 2 busy
490                                  * pages. */
491                                 if (busy_page_count > 2)
492                                         break;
493                         }
494
495                         *ra_end = page_idx;
496                         /* Only subtract from reserve & count the page if we
497                          * really did readahead on that page. */
498                         if (rc == 0) {
499                                 ria->ria_reserved--;
500                                 count++;
501                         }
502                 } else if (stride_io_mode(ras)) {
503                         /* If it is not in the read-ahead window, and it is
504                          * read-ahead mode, then check whether it should skip
505                          * the stride gap.
506                          */
507                         loff_t pos = (loff_t)page_idx << PAGE_SHIFT;
508                         u64 offset;
509
510                         div64_u64_rem(pos - ria->ria_stoff, ria->ria_length,
511                                       &offset);
512                         if (offset >= ria->ria_bytes) {
513                                 pos += (ria->ria_length - offset);
514                                 if ((pos >> PAGE_SHIFT) >= page_idx + 1)
515                                         page_idx = (pos >> PAGE_SHIFT) - 1;
516                                 busy_page_count = 0;
517                                 CDEBUG(D_READA,
518                                        "Stride: jump %llu pages to %lu\n",
519                                        ria->ria_length - offset, page_idx);
520                                 continue;
521                         }
522                 }
523         }
524
525         cl_read_ahead_release(env, &ra);
526
527         return count;
528 }
529
530 static void ll_readahead_work_free(struct ll_readahead_work *work)
531 {
532         fput(work->lrw_file);
533         OBD_FREE_PTR(work);
534 }
535
536 static void ll_readahead_handle_work(struct work_struct *wq);
537 static void ll_readahead_work_add(struct inode *inode,
538                                   struct ll_readahead_work *work)
539 {
540         INIT_WORK(&work->lrw_readahead_work, ll_readahead_handle_work);
541         queue_work(ll_i2sbi(inode)->ll_ra_info.ll_readahead_wq,
542                    &work->lrw_readahead_work);
543 }
544
545 static int ll_readahead_file_kms(const struct lu_env *env,
546                                 struct cl_io *io, __u64 *kms)
547 {
548         struct cl_object *clob;
549         struct inode *inode;
550         struct cl_attr *attr = vvp_env_thread_attr(env);
551         int ret;
552
553         clob = io->ci_obj;
554         inode = vvp_object_inode(clob);
555
556         cl_object_attr_lock(clob);
557         ret = cl_object_attr_get(env, clob, attr);
558         cl_object_attr_unlock(clob);
559
560         if (ret != 0)
561                 RETURN(ret);
562
563         *kms = attr->cat_kms;
564         return 0;
565 }
566
567 static void ll_readahead_handle_work(struct work_struct *wq)
568 {
569         struct ll_readahead_work *work;
570         struct lu_env *env;
571         __u16 refcheck;
572         struct ra_io_arg *ria;
573         struct inode *inode;
574         struct ll_file_data *fd;
575         struct ll_readahead_state *ras;
576         struct cl_io *io;
577         struct cl_2queue *queue;
578         pgoff_t ra_end_idx = 0;
579         unsigned long pages, pages_min = 0;
580         struct file *file;
581         __u64 kms;
582         int rc;
583         pgoff_t eof_index;
584         struct ll_sb_info *sbi;
585
586         work = container_of(wq, struct ll_readahead_work,
587                             lrw_readahead_work);
588         fd = work->lrw_file->private_data;
589         ras = &fd->fd_ras;
590         file = work->lrw_file;
591         inode = file_inode(file);
592         sbi = ll_i2sbi(inode);
593
594         env = cl_env_alloc(&refcheck, LCT_NOREF);
595         if (IS_ERR(env))
596                 GOTO(out_free_work, rc = PTR_ERR(env));
597
598         io = vvp_env_thread_io(env);
599         ll_io_init(io, file, CIT_READ, NULL);
600
601         rc = ll_readahead_file_kms(env, io, &kms);
602         if (rc != 0)
603                 GOTO(out_put_env, rc);
604
605         if (kms == 0) {
606                 ll_ra_stats_inc(inode, RA_STAT_ZERO_LEN);
607                 GOTO(out_put_env, rc = 0);
608         }
609
610         ria = &ll_env_info(env)->lti_ria;
611         memset(ria, 0, sizeof(*ria));
612
613         ria->ria_start_idx = work->lrw_start_idx;
614         /* Truncate RA window to end of file */
615         eof_index = (pgoff_t)(kms - 1) >> PAGE_SHIFT;
616         if (eof_index <= work->lrw_end_idx) {
617                 work->lrw_end_idx = eof_index;
618                 ria->ria_eof = true;
619         }
620         if (work->lrw_end_idx <= work->lrw_start_idx)
621                 GOTO(out_put_env, rc = 0);
622
623         ria->ria_end_idx = work->lrw_end_idx;
624         pages = ria->ria_end_idx - ria->ria_start_idx + 1;
625         ria->ria_reserved = ll_ra_count_get(sbi, ria,
626                                             ria_page_count(ria), pages_min);
627
628         CDEBUG(D_READA,
629                "async reserved pages: %lu/%lu/%lu, ra_cur %d, ra_max %lu\n",
630                ria->ria_reserved, pages, pages_min,
631                atomic_read(&ll_i2sbi(inode)->ll_ra_info.ra_cur_pages),
632                ll_i2sbi(inode)->ll_ra_info.ra_max_pages);
633
634         if (ria->ria_reserved < pages) {
635                 ll_ra_stats_inc(inode, RA_STAT_MAX_IN_FLIGHT);
636                 if (PAGES_TO_MiB(ria->ria_reserved) < 1) {
637                         ll_ra_count_put(ll_i2sbi(inode), ria->ria_reserved);
638                         GOTO(out_put_env, rc = 0);
639                 }
640         }
641
642         rc = cl_io_rw_init(env, io, CIT_READ, ria->ria_start_idx, pages);
643         if (rc)
644                 GOTO(out_put_env, rc);
645
646         /* overwrite jobid inited in vvp_io_init() */
647         if (strncmp(ll_i2info(inode)->lli_jobid, work->lrw_jobid,
648                     sizeof(work->lrw_jobid)))
649                 memcpy(ll_i2info(inode)->lli_jobid, work->lrw_jobid,
650                        sizeof(work->lrw_jobid));
651
652         vvp_env_io(env)->vui_fd = fd;
653         io->ci_state = CIS_LOCKED;
654         io->ci_async_readahead = true;
655         rc = cl_io_start(env, io);
656         if (rc)
657                 GOTO(out_io_fini, rc);
658
659         queue = &io->ci_queue;
660         cl_2queue_init(queue);
661
662         rc = ll_read_ahead_pages(env, io, &queue->c2_qin, ras, ria,
663                                  &ra_end_idx, 0);
664         if (ria->ria_reserved != 0)
665                 ll_ra_count_put(ll_i2sbi(inode), ria->ria_reserved);
666         if (queue->c2_qin.pl_nr > 0) {
667                 int count = queue->c2_qin.pl_nr;
668
669                 rc = cl_io_submit_rw(env, io, CRT_READ, queue);
670                 if (rc == 0)
671                         task_io_account_read(PAGE_SIZE * count);
672         }
673         if (ria->ria_end_idx == ra_end_idx && ra_end_idx == (kms >> PAGE_SHIFT))
674                 ll_ra_stats_inc(inode, RA_STAT_EOF);
675
676         if (ra_end_idx != ria->ria_end_idx)
677                 ll_ra_stats_inc(inode, RA_STAT_FAILED_REACH_END);
678
679         /* TODO: discard all pages until page reinit route is implemented */
680         cl_page_list_discard(env, io, &queue->c2_qin);
681
682         /* Unlock unsent read pages in case of error. */
683         cl_page_list_disown(env, io, &queue->c2_qin);
684
685         cl_2queue_fini(env, queue);
686 out_io_fini:
687         cl_io_end(env, io);
688         cl_io_fini(env, io);
689 out_put_env:
690         cl_env_put(env, &refcheck);
691 out_free_work:
692         if (ra_end_idx > 0)
693                 ll_ra_stats_inc_sbi(ll_i2sbi(inode), RA_STAT_ASYNC);
694         atomic_dec(&sbi->ll_ra_info.ra_async_inflight);
695         ll_readahead_work_free(work);
696 }
697
698 static int ll_readahead(const struct lu_env *env, struct cl_io *io,
699                         struct cl_page_list *queue,
700                         struct ll_readahead_state *ras, bool hit,
701                         struct file *file, pgoff_t skip_index)
702 {
703         struct vvp_io *vio = vvp_env_io(env);
704         struct ll_thread_info *lti = ll_env_info(env);
705         unsigned long pages, pages_min = 0;
706         pgoff_t ra_end_idx = 0, start_idx = 0, end_idx = 0;
707         struct inode *inode;
708         struct ra_io_arg *ria = &lti->lti_ria;
709         struct cl_object *clob;
710         int ret = 0;
711         __u64 kms;
712         struct ll_sb_info *sbi;
713         struct ll_ra_info *ra;
714
715         ENTRY;
716
717         ENTRY;
718
719         clob = io->ci_obj;
720         inode = vvp_object_inode(clob);
721         sbi = ll_i2sbi(inode);
722         ra = &sbi->ll_ra_info;
723
724         /**
725          * In case we have a limited max_cached_mb, readahead
726          * should be stopped if it have run out of all LRU slots.
727          */
728         if (atomic_read(&ra->ra_cur_pages) >= sbi->ll_cache->ccc_lru_max) {
729                 ll_ra_stats_inc(inode, RA_STAT_MAX_IN_FLIGHT);
730                 RETURN(0);
731         }
732
733         memset(ria, 0, sizeof(*ria));
734         ret = ll_readahead_file_kms(env, io, &kms);
735         if (ret != 0)
736                 RETURN(ret);
737
738         if (kms == 0) {
739                 ll_ra_stats_inc(inode, RA_STAT_ZERO_LEN);
740                 RETURN(0);
741         }
742
743         spin_lock(&ras->ras_lock);
744
745         /**
746          * Note: other thread might rollback the ras_next_readahead_idx,
747          * if it can not get the full size of prepared pages, see the
748          * end of this function. For stride read ahead, it needs to
749          * make sure the offset is no less than ras_stride_offset,
750          * so that stride read ahead can work correctly.
751          */
752         if (stride_io_mode(ras))
753                 start_idx = max_t(pgoff_t, ras->ras_next_readahead_idx,
754                                   ras->ras_stride_offset >> PAGE_SHIFT);
755         else
756                 start_idx = ras->ras_next_readahead_idx;
757
758         if (ras->ras_window_pages > 0)
759                 end_idx = ras->ras_window_start_idx + ras->ras_window_pages - 1;
760
761         if (skip_index)
762                 end_idx = start_idx + ras->ras_window_pages - 1;
763
764         /* Enlarge the RA window to encompass the full read */
765         if (vio->vui_ra_valid &&
766             end_idx < vio->vui_ra_start_idx + vio->vui_ra_pages - 1)
767                 end_idx = vio->vui_ra_start_idx + vio->vui_ra_pages - 1;
768
769         if (end_idx != 0) {
770                 pgoff_t eof_index;
771
772                 /* Truncate RA window to end of file */
773                 eof_index = (pgoff_t)((kms - 1) >> PAGE_SHIFT);
774                 if (eof_index <= end_idx) {
775                         end_idx = eof_index;
776                         ria->ria_eof = true;
777                 }
778         }
779         ria->ria_start_idx = start_idx;
780         ria->ria_end_idx = end_idx;
781         /* If stride I/O mode is detected, get stride window*/
782         if (stride_io_mode(ras)) {
783                 ria->ria_stoff = ras->ras_stride_offset;
784                 ria->ria_length = ras->ras_stride_length;
785                 ria->ria_bytes = ras->ras_stride_bytes;
786         }
787         spin_unlock(&ras->ras_lock);
788
789         if (end_idx == 0) {
790                 ll_ra_stats_inc(inode, RA_STAT_ZERO_WINDOW);
791                 RETURN(0);
792         }
793         pages = ria_page_count(ria);
794         if (pages == 0) {
795                 ll_ra_stats_inc(inode, RA_STAT_ZERO_WINDOW);
796                 RETURN(0);
797         }
798
799         RAS_CDEBUG(ras);
800         CDEBUG(D_READA, DFID": ria: %lu/%lu, bead: %lu/%lu, hit: %d\n",
801                PFID(lu_object_fid(&clob->co_lu)),
802                ria->ria_start_idx, ria->ria_end_idx,
803                vio->vui_ra_valid ? vio->vui_ra_start_idx : 0,
804                vio->vui_ra_valid ? vio->vui_ra_pages : 0,
805                hit);
806
807         /* at least to extend the readahead window to cover current read */
808         if (!hit && vio->vui_ra_valid &&
809             vio->vui_ra_start_idx + vio->vui_ra_pages > ria->ria_start_idx) {
810                 ria->ria_end_idx_min =
811                         vio->vui_ra_start_idx + vio->vui_ra_pages - 1;
812                 pages_min = vio->vui_ra_start_idx + vio->vui_ra_pages -
813                                 ria->ria_start_idx;
814                  /**
815                   * For performance reason, exceeding @ra_max_pages
816                   * are allowed, but this should be limited with RPC
817                   * size in case a large block size read issued. Trim
818                   * to RPC boundary.
819                   */
820                 pages_min = min(pages_min, ras->ras_rpc_pages -
821                                 (ria->ria_start_idx % ras->ras_rpc_pages));
822         }
823
824         /* don't over reserved for mmap range read */
825         if (skip_index)
826                 pages_min = 0;
827         if (pages_min > pages)
828                 pages = pages_min;
829         ria->ria_reserved = ll_ra_count_get(ll_i2sbi(inode), ria, pages,
830                                             pages_min);
831         if (ria->ria_reserved < pages)
832                 ll_ra_stats_inc(inode, RA_STAT_MAX_IN_FLIGHT);
833
834         CDEBUG(D_READA, "reserved pages: %lu/%lu/%lu, ra_cur %d, ra_max %lu\n",
835                ria->ria_reserved, pages, pages_min,
836                atomic_read(&ll_i2sbi(inode)->ll_ra_info.ra_cur_pages),
837                ll_i2sbi(inode)->ll_ra_info.ra_max_pages);
838
839         ret = ll_read_ahead_pages(env, io, queue, ras, ria, &ra_end_idx,
840                                   skip_index);
841         if (ria->ria_reserved != 0)
842                 ll_ra_count_put(ll_i2sbi(inode), ria->ria_reserved);
843
844         if (ra_end_idx == end_idx && ra_end_idx == (kms >> PAGE_SHIFT))
845                 ll_ra_stats_inc(inode, RA_STAT_EOF);
846
847         CDEBUG(D_READA,
848                "ra_end_idx = %lu end_idx = %lu stride end = %lu pages = %d\n",
849                ra_end_idx, end_idx, ria->ria_end_idx, ret);
850
851         if (ra_end_idx != end_idx)
852                 ll_ra_stats_inc(inode, RA_STAT_FAILED_REACH_END);
853         if (ra_end_idx > 0) {
854                 /* update the ras so that the next read-ahead tries from
855                  * where we left off. */
856                 spin_lock(&ras->ras_lock);
857                 ras->ras_next_readahead_idx = ra_end_idx + 1;
858                 spin_unlock(&ras->ras_lock);
859                 RAS_CDEBUG(ras);
860         }
861
862         RETURN(ret);
863 }
864
865 static int ll_readpages(const struct lu_env *env, struct cl_io *io,
866                         struct cl_page_list *queue,
867                         pgoff_t start, pgoff_t end)
868 {
869         int ret = 0;
870         __u64 kms;
871         pgoff_t page_idx;
872         int count = 0;
873
874         ENTRY;
875
876         ret = ll_readahead_file_kms(env, io, &kms);
877         if (ret != 0)
878                 RETURN(ret);
879
880         if (kms == 0)
881                 RETURN(0);
882
883         if (end != 0) {
884                 unsigned long end_index;
885
886                 end_index = (unsigned long)((kms - 1) >> PAGE_SHIFT);
887                 if (end_index <= end)
888                         end = end_index;
889         }
890
891         for (page_idx = start; page_idx <= end; page_idx++) {
892                 ret= ll_read_ahead_page(env, io, queue, page_idx,
893                                         WILLNEED);
894                 if (ret < 0)
895                         break;
896                 else if (ret == 0) /* ret 1 is already uptodate */
897                         count++;
898         }
899
900         RETURN(count > 0 ? count : ret);
901 }
902
903 static void ras_set_start(struct ll_readahead_state *ras, pgoff_t index)
904 {
905         ras->ras_window_start_idx = ras_align(ras, index);
906 }
907
908 /* called with the ras_lock held or from places where it doesn't matter */
909 static void ras_reset(struct ll_readahead_state *ras, pgoff_t index)
910 {
911         ras->ras_consecutive_requests = 0;
912         ras->ras_consecutive_bytes = 0;
913         ras->ras_window_pages = 0;
914         ras_set_start(ras, index);
915         ras->ras_next_readahead_idx = max(ras->ras_window_start_idx, index + 1);
916
917         RAS_CDEBUG(ras);
918 }
919
920 /* called with the ras_lock held or from places where it doesn't matter */
921 static void ras_stride_reset(struct ll_readahead_state *ras)
922 {
923         ras->ras_consecutive_stride_requests = 0;
924         ras->ras_stride_length = 0;
925         ras->ras_stride_bytes = 0;
926         RAS_CDEBUG(ras);
927 }
928
929 void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras)
930 {
931         spin_lock_init(&ras->ras_lock);
932         ras->ras_rpc_pages = PTLRPC_MAX_BRW_PAGES;
933         ras_reset(ras, 0);
934         ras->ras_last_read_end_bytes = 0;
935         ras->ras_requests = 0;
936         ras->ras_range_min_start_idx = 0;
937         ras->ras_range_max_end_idx = 0;
938         ras->ras_range_requests = 0;
939         ras->ras_last_range_pages = 0;
940 }
941
942 /*
943  * Check whether the read request is in the stride window.
944  * If it is in the stride window, return true, otherwise return false.
945  */
946 static bool read_in_stride_window(struct ll_readahead_state *ras,
947                                   loff_t pos, loff_t count)
948 {
949         loff_t stride_gap;
950
951         if (ras->ras_stride_length == 0 || ras->ras_stride_bytes == 0 ||
952             ras->ras_stride_bytes == ras->ras_stride_length)
953                 return false;
954
955         stride_gap = pos - ras->ras_last_read_end_bytes - 1;
956
957         /* If it is contiguous read */
958         if (stride_gap == 0)
959                 return ras->ras_consecutive_bytes + count <=
960                         ras->ras_stride_bytes;
961
962         /* Otherwise check the stride by itself */
963         return (ras->ras_stride_length - ras->ras_stride_bytes) == stride_gap &&
964                 ras->ras_consecutive_bytes == ras->ras_stride_bytes &&
965                 count <= ras->ras_stride_bytes;
966 }
967
968 static void ras_init_stride_detector(struct ll_readahead_state *ras,
969                                      loff_t pos, loff_t count)
970 {
971         loff_t stride_gap = pos - ras->ras_last_read_end_bytes - 1;
972
973         LASSERT(ras->ras_consecutive_stride_requests == 0);
974
975         if (pos <= ras->ras_last_read_end_bytes) {
976                 /*Reset stride window for forward read*/
977                 ras_stride_reset(ras);
978                 return;
979         }
980
981         ras->ras_stride_bytes = ras->ras_consecutive_bytes;
982         ras->ras_stride_length = stride_gap + ras->ras_consecutive_bytes;
983         ras->ras_consecutive_stride_requests++;
984         ras->ras_stride_offset = pos;
985
986         RAS_CDEBUG(ras);
987 }
988
989 static unsigned long
990 stride_page_count(struct ll_readahead_state *ras, loff_t len)
991 {
992         loff_t bytes_count =
993                 stride_byte_count(ras->ras_stride_offset,
994                                   ras->ras_stride_length, ras->ras_stride_bytes,
995                                   ras->ras_window_start_idx << PAGE_SHIFT, len);
996
997         return (bytes_count + PAGE_SIZE - 1) >> PAGE_SHIFT;
998 }
999
1000 /* Stride Read-ahead window will be increased inc_len according to
1001  * stride I/O pattern */
1002 static void ras_stride_increase_window(struct ll_readahead_state *ras,
1003                                        struct ll_ra_info *ra, loff_t inc_bytes)
1004 {
1005         loff_t window_bytes, stride_bytes;
1006         u64 left_bytes;
1007         u64 step;
1008         loff_t end;
1009
1010         /* temporarily store in page units to reduce LASSERT() cost below */
1011         end = ras->ras_window_start_idx + ras->ras_window_pages;
1012
1013         LASSERT(ras->ras_stride_length > 0);
1014         LASSERTF(end >= (ras->ras_stride_offset >> PAGE_SHIFT),
1015                  "window_start_idx %lu, window_pages %lu stride_offset %llu\n",
1016                  ras->ras_window_start_idx, ras->ras_window_pages,
1017                  ras->ras_stride_offset);
1018
1019         end <<= PAGE_SHIFT;
1020         if (end <= ras->ras_stride_offset)
1021                 stride_bytes = 0;
1022         else
1023                 stride_bytes = end - ras->ras_stride_offset;
1024
1025         div64_u64_rem(stride_bytes, ras->ras_stride_length, &left_bytes);
1026         window_bytes = (ras->ras_window_pages << PAGE_SHIFT);
1027         if (left_bytes < ras->ras_stride_bytes) {
1028                 if (ras->ras_stride_bytes - left_bytes >= inc_bytes) {
1029                         window_bytes += inc_bytes;
1030                         goto out;
1031                 } else {
1032                         window_bytes += (ras->ras_stride_bytes - left_bytes);
1033                         inc_bytes -= (ras->ras_stride_bytes - left_bytes);
1034                 }
1035         } else {
1036                 window_bytes += (ras->ras_stride_length - left_bytes);
1037         }
1038
1039         LASSERT(ras->ras_stride_bytes != 0);
1040
1041         step = div64_u64_rem(inc_bytes, ras->ras_stride_bytes, &left_bytes);
1042
1043         window_bytes += step * ras->ras_stride_length + left_bytes;
1044         LASSERT(window_bytes > 0);
1045
1046 out:
1047         if (stride_page_count(ras, window_bytes) <=
1048             ra->ra_max_pages_per_file || ras->ras_window_pages == 0)
1049                 ras->ras_window_pages = (window_bytes >> PAGE_SHIFT);
1050
1051         LASSERT(ras->ras_window_pages > 0);
1052
1053         RAS_CDEBUG(ras);
1054 }
1055
1056 static void ras_increase_window(struct inode *inode,
1057                                 struct ll_readahead_state *ras,
1058                                 struct ll_ra_info *ra)
1059 {
1060         /* The stretch of ra-window should be aligned with max rpc_size
1061          * but current clio architecture does not support retrieve such
1062          * information from lower layer. FIXME later
1063          */
1064         if (stride_io_mode(ras)) {
1065                 ras_stride_increase_window(ras, ra,
1066                                       (loff_t)ras->ras_rpc_pages << PAGE_SHIFT);
1067         } else {
1068                 pgoff_t window_pages;
1069
1070                 window_pages = min(ras->ras_window_pages + ras->ras_rpc_pages,
1071                                    ra->ra_max_pages_per_file);
1072                 if (window_pages < ras->ras_rpc_pages)
1073                         ras->ras_window_pages = window_pages;
1074                 else
1075                         ras->ras_window_pages = ras_align(ras, window_pages);
1076         }
1077 }
1078
1079 /**
1080  * Seek within 8 pages are considered as sequential read for now.
1081  */
1082 static inline bool is_loose_seq_read(struct ll_readahead_state *ras, loff_t pos)
1083 {
1084         return pos_in_window(pos, ras->ras_last_read_end_bytes,
1085                              8UL << PAGE_SHIFT, 8UL << PAGE_SHIFT);
1086 }
1087
1088 static inline bool is_loose_mmap_read(struct ll_sb_info *sbi,
1089                                       struct ll_readahead_state *ras,
1090                                       unsigned long pos)
1091 {
1092         unsigned long range_pages = sbi->ll_ra_info.ra_range_pages;
1093
1094         return pos_in_window(pos, ras->ras_last_read_end_bytes,
1095                              range_pages << PAGE_SHIFT,
1096                              range_pages << PAGE_SHIFT);
1097 }
1098
1099 /**
1100  * We have observed slow mmap read performances for some
1101  * applications. The problem is if access pattern is neither
1102  * sequential nor stride, but could be still adjacent in a
1103  * small range and then seek a random position.
1104  *
1105  * So the pattern could be something like this:
1106  *
1107  * [1M data] [hole] [0.5M data] [hole] [0.7M data] [1M data]
1108  *
1109  *
1110  * Every time an application reads mmap data, it may not only
1111  * read a single 4KB page, but aslo a cluster of nearby pages in
1112  * a range(e.g. 1MB) of the first page after a cache miss.
1113  *
1114  * The readahead engine is modified to track the range size of
1115  * a cluster of mmap reads, so that after a seek and/or cache miss,
1116  * the range size is used to efficiently prefetch multiple pages
1117  * in a single RPC rather than many small RPCs.
1118  */
1119 static void ras_detect_cluster_range(struct ll_readahead_state *ras,
1120                                      struct ll_sb_info *sbi,
1121                                      unsigned long pos, unsigned long count)
1122 {
1123         pgoff_t last_pages, pages;
1124         pgoff_t end_idx = (pos + count - 1) >> PAGE_SHIFT;
1125
1126         last_pages = ras->ras_range_max_end_idx -
1127                         ras->ras_range_min_start_idx + 1;
1128         /* First time come here */
1129         if (!ras->ras_range_max_end_idx)
1130                 goto out;
1131
1132         /* Random or Stride read */
1133         if (!is_loose_mmap_read(sbi, ras, pos))
1134                 goto out;
1135
1136         ras->ras_range_requests++;
1137         if (ras->ras_range_max_end_idx < end_idx)
1138                 ras->ras_range_max_end_idx = end_idx;
1139
1140         if (ras->ras_range_min_start_idx > (pos >> PAGE_SHIFT))
1141                 ras->ras_range_min_start_idx = pos >> PAGE_SHIFT;
1142
1143         /* Out of range, consider it as random or stride */
1144         pages = ras->ras_range_max_end_idx -
1145                         ras->ras_range_min_start_idx + 1;
1146         if (pages <= sbi->ll_ra_info.ra_range_pages)
1147                 return;
1148 out:
1149         ras->ras_last_range_pages = last_pages;
1150         ras->ras_range_requests = 0;
1151         ras->ras_range_min_start_idx = pos >> PAGE_SHIFT;
1152         ras->ras_range_max_end_idx = end_idx;
1153 }
1154
1155 static void ras_detect_read_pattern(struct ll_readahead_state *ras,
1156                                     struct ll_sb_info *sbi,
1157                                     loff_t pos, size_t count, bool mmap)
1158 {
1159         bool stride_detect = false;
1160         pgoff_t index = pos >> PAGE_SHIFT;
1161
1162         /*
1163          * Reset the read-ahead window in two cases. First when the app seeks
1164          * or reads to some other part of the file. Secondly if we get a
1165          * read-ahead miss that we think we've previously issued. This can
1166          * be a symptom of there being so many read-ahead pages that the VM
1167          * is reclaiming it before we get to it.
1168          */
1169         if (!is_loose_seq_read(ras, pos)) {
1170                 /* Check whether it is in stride I/O mode */
1171                 if (!read_in_stride_window(ras, pos, count)) {
1172                         if (ras->ras_consecutive_stride_requests == 0)
1173                                 ras_init_stride_detector(ras, pos, count);
1174                         else
1175                                 ras_stride_reset(ras);
1176                         ras->ras_consecutive_bytes = 0;
1177                         ras_reset(ras, index);
1178                 } else {
1179                         ras->ras_consecutive_bytes = 0;
1180                         ras->ras_consecutive_requests = 0;
1181                         if (++ras->ras_consecutive_stride_requests > 1)
1182                                 stride_detect = true;
1183                         RAS_CDEBUG(ras);
1184                 }
1185                 ll_ra_stats_inc_sbi(sbi, RA_STAT_DISTANT_READPAGE);
1186         } else if (stride_io_mode(ras)) {
1187                 /*
1188                  * If this is contiguous read but in stride I/O mode
1189                  * currently, check whether stride step still is valid,
1190                  * if invalid, it will reset the stride ra window to
1191                  * be zero.
1192                  */
1193                 if (!read_in_stride_window(ras, pos, count)) {
1194                         ras_stride_reset(ras);
1195                         ras->ras_window_pages = 0;
1196                         ras->ras_next_readahead_idx = index;
1197                 }
1198         }
1199
1200         ras->ras_consecutive_bytes += count;
1201         if (mmap) {
1202                 pgoff_t idx = ras->ras_consecutive_bytes >> PAGE_SHIFT;
1203                 unsigned long ra_range_pages =
1204                                 max_t(unsigned long, RA_MIN_MMAP_RANGE_PAGES,
1205                                       sbi->ll_ra_info.ra_range_pages);
1206
1207                 if ((idx >= ra_range_pages &&
1208                      idx % ra_range_pages == 0) || stride_detect)
1209                         ras->ras_need_increase_window = true;
1210         } else if ((ras->ras_consecutive_requests > 1 || stride_detect)) {
1211                 ras->ras_need_increase_window = true;
1212         }
1213
1214         ras->ras_last_read_end_bytes = pos + count - 1;
1215 }
1216
1217 void ll_ras_enter(struct file *f, loff_t pos, size_t count)
1218 {
1219         struct ll_file_data *fd = f->private_data;
1220         struct ll_readahead_state *ras = &fd->fd_ras;
1221         struct inode *inode = file_inode(f);
1222         unsigned long index = pos >> PAGE_SHIFT;
1223         struct ll_sb_info *sbi = ll_i2sbi(inode);
1224
1225         spin_lock(&ras->ras_lock);
1226         ras->ras_requests++;
1227         ras->ras_consecutive_requests++;
1228         ras->ras_need_increase_window = false;
1229         ras->ras_no_miss_check = false;
1230         /*
1231          * On the second access to a file smaller than the tunable
1232          * ra_max_read_ahead_whole_pages trigger RA on all pages in the
1233          * file up to ra_max_pages_per_file.  This is simply a best effort
1234          * and only occurs once per open file. Normal RA behavior is reverted
1235          * to for subsequent IO.
1236          */
1237         if (ras->ras_requests >= 2) {
1238                 __u64 kms_pages;
1239                 struct ll_ra_info *ra = &sbi->ll_ra_info;
1240
1241                 kms_pages = (i_size_read(inode) + PAGE_SIZE - 1) >>
1242                             PAGE_SHIFT;
1243
1244                 CDEBUG(D_READA, "kmsp %llu mwp %lu mp %lu\n", kms_pages,
1245                        ra->ra_max_read_ahead_whole_pages,
1246                        ra->ra_max_pages_per_file);
1247
1248                 if (kms_pages &&
1249                     kms_pages <= ra->ra_max_read_ahead_whole_pages) {
1250                         ras->ras_window_start_idx = 0;
1251                         ras->ras_next_readahead_idx = index + 1;
1252                         ras->ras_window_pages = min(ra->ra_max_pages_per_file,
1253                                             ra->ra_max_read_ahead_whole_pages);
1254                         ras->ras_no_miss_check = true;
1255                         GOTO(out_unlock, 0);
1256                 }
1257         }
1258         ras_detect_read_pattern(ras, sbi, pos, count, false);
1259 out_unlock:
1260         spin_unlock(&ras->ras_lock);
1261 }
1262
1263 static bool index_in_stride_window(struct ll_readahead_state *ras,
1264                                    pgoff_t index)
1265 {
1266         loff_t pos = (loff_t)index << PAGE_SHIFT;
1267
1268         if (ras->ras_stride_length == 0 || ras->ras_stride_bytes == 0 ||
1269             ras->ras_stride_bytes == ras->ras_stride_length)
1270                 return false;
1271
1272         if (pos >= ras->ras_stride_offset) {
1273                 u64 offset;
1274
1275                 div64_u64_rem(pos - ras->ras_stride_offset,
1276                               ras->ras_stride_length, &offset);
1277                 if (offset < ras->ras_stride_bytes ||
1278                     ras->ras_stride_length - offset < PAGE_SIZE)
1279                         return true;
1280         } else if (ras->ras_stride_offset - pos < PAGE_SIZE) {
1281                 return true;
1282         }
1283
1284         return false;
1285 }
1286
1287 /*
1288  * ll_ras_enter() is used to detect read pattern according to pos and count.
1289  *
1290  * ras_update() is used to detect cache miss and
1291  * reset window or increase window accordingly
1292  */
1293 static void ras_update(struct ll_sb_info *sbi, struct inode *inode,
1294                        struct ll_readahead_state *ras, pgoff_t index,
1295                        enum ras_update_flags flags, struct cl_io *io)
1296 {
1297         struct ll_ra_info *ra = &sbi->ll_ra_info;
1298         bool hit = flags & LL_RAS_HIT;
1299
1300         ENTRY;
1301         spin_lock(&ras->ras_lock);
1302
1303         if (!hit)
1304                 CDEBUG(D_READA, DFID " pages at %lu miss.\n",
1305                        PFID(ll_inode2fid(inode)), index);
1306         ll_ra_stats_inc_sbi(sbi, hit ? RA_STAT_HIT : RA_STAT_MISS);
1307
1308         /*
1309          * The readahead window has been expanded to cover whole
1310          * file size, we don't care whether ra miss happen or not.
1311          * Because we will read whole file to page cache even if
1312          * some pages missed.
1313          */
1314         if (ras->ras_no_miss_check)
1315                 GOTO(out_unlock, 0);
1316
1317         if (io && io->ci_rand_read)
1318                 GOTO(out_unlock, 0);
1319
1320         if (io && io->ci_seq_read) {
1321                 if (!hit) {
1322                         /* to avoid many small read RPC here */
1323                         ras->ras_window_pages = sbi->ll_ra_info.ra_range_pages;
1324                         ll_ra_stats_inc_sbi(sbi, RA_STAT_MMAP_RANGE_READ);
1325                 }
1326                 goto skip;
1327         }
1328
1329         if (flags & LL_RAS_MMAP) {
1330                 unsigned long ra_pages;
1331
1332                 ras_detect_cluster_range(ras, sbi, index << PAGE_SHIFT,
1333                                          PAGE_SIZE);
1334                 ras_detect_read_pattern(ras, sbi, (loff_t)index << PAGE_SHIFT,
1335                                         PAGE_SIZE, true);
1336
1337                 /* we did not detect anything but we could prefetch */
1338                 if (!ras->ras_need_increase_window &&
1339                     ras->ras_window_pages <= sbi->ll_ra_info.ra_range_pages &&
1340                     ras->ras_range_requests >= 2) {
1341                         if (!hit) {
1342                                 ra_pages = max_t(unsigned long,
1343                                         RA_MIN_MMAP_RANGE_PAGES,
1344                                         ras->ras_last_range_pages);
1345                                 if (index < ra_pages / 2)
1346                                         index = 0;
1347                                 else
1348                                         index -= ra_pages / 2;
1349                                 ras->ras_window_pages = ra_pages;
1350                                 ll_ra_stats_inc_sbi(sbi,
1351                                         RA_STAT_MMAP_RANGE_READ);
1352                         } else {
1353                                 ras->ras_window_pages = 0;
1354                         }
1355                         goto skip;
1356                 }
1357         }
1358
1359         if (!hit && ras->ras_window_pages &&
1360             index < ras->ras_next_readahead_idx &&
1361             pos_in_window(index, ras->ras_window_start_idx, 0,
1362                           ras->ras_window_pages)) {
1363                 ll_ra_stats_inc_sbi(sbi, RA_STAT_MISS_IN_WINDOW);
1364                 ras->ras_need_increase_window = false;
1365
1366                 if (index_in_stride_window(ras, index) &&
1367                     stride_io_mode(ras)) {
1368                         /*
1369                          * if (index != ras->ras_last_readpage + 1)
1370                          *      ras->ras_consecutive_pages = 0;
1371                          */
1372                         ras_reset(ras, index);
1373
1374                         /*
1375                          * If stride-RA hit cache miss, the stride
1376                          * detector will not be reset to avoid the
1377                          * overhead of redetecting read-ahead mode,
1378                          * but on the condition that the stride window
1379                          * is still intersect with normal sequential
1380                          * read-ahead window.
1381                          */
1382                         if (ras->ras_window_start_idx < ras->ras_stride_offset)
1383                                 ras_stride_reset(ras);
1384                         RAS_CDEBUG(ras);
1385                 } else {
1386                         /*
1387                          * Reset both stride window and normal RA
1388                          * window.
1389                          */
1390                         ras_reset(ras, index);
1391                         /* ras->ras_consecutive_pages++; */
1392                         ras->ras_consecutive_bytes = 0;
1393                         ras_stride_reset(ras);
1394                         GOTO(out_unlock, 0);
1395                 }
1396         }
1397
1398 skip:
1399         ras_set_start(ras, index);
1400
1401         if (stride_io_mode(ras)) {
1402                 /* Since stride readahead is sentivite to the offset
1403                  * of read-ahead, so we use original offset here,
1404                  * instead of ras_window_start_idx, which is RPC aligned.
1405                  */
1406                 ras->ras_next_readahead_idx = max(index + 1,
1407                                                   ras->ras_next_readahead_idx);
1408                 ras->ras_window_start_idx =
1409                                 max_t(pgoff_t, ras->ras_window_start_idx,
1410                                       ras->ras_stride_offset >> PAGE_SHIFT);
1411         } else {
1412                 if (ras->ras_next_readahead_idx < ras->ras_window_start_idx)
1413                         ras->ras_next_readahead_idx = ras->ras_window_start_idx;
1414                 if (!hit)
1415                         ras->ras_next_readahead_idx = index + 1;
1416         }
1417
1418         if (ras->ras_need_increase_window) {
1419                 ras_increase_window(inode, ras, ra);
1420                 ras->ras_need_increase_window = false;
1421         }
1422
1423         EXIT;
1424 out_unlock:
1425         spin_unlock(&ras->ras_lock);
1426 }
1427
1428 int ll_writepage(struct page *vmpage, struct writeback_control *wbc)
1429 {
1430         struct inode           *inode = vmpage->mapping->host;
1431         struct ll_inode_info   *lli   = ll_i2info(inode);
1432         struct lu_env          *env;
1433         struct cl_io           *io;
1434         struct cl_page         *page;
1435         struct cl_object       *clob;
1436         bool redirtied = false;
1437         bool unlocked = false;
1438         int result;
1439         __u16 refcheck;
1440         ENTRY;
1441
1442         LASSERT(PageLocked(vmpage));
1443         LASSERT(!PageWriteback(vmpage));
1444
1445         LASSERT(ll_i2dtexp(inode) != NULL);
1446
1447         env = cl_env_get(&refcheck);
1448         if (IS_ERR(env))
1449                 GOTO(out, result = PTR_ERR(env));
1450
1451         clob  = ll_i2info(inode)->lli_clob;
1452         LASSERT(clob != NULL);
1453
1454         io = vvp_env_thread_io(env);
1455         io->ci_obj = clob;
1456         io->ci_ignore_layout = 1;
1457         result = cl_io_init(env, io, CIT_MISC, clob);
1458         if (result == 0) {
1459                 page = cl_page_find(env, clob, vmpage->index,
1460                                     vmpage, CPT_CACHEABLE);
1461                 if (!IS_ERR(page)) {
1462                         lu_ref_add(&page->cp_reference, "writepage",
1463                                    current);
1464                         cl_page_assume(env, io, page);
1465                         result = cl_page_flush(env, io, page);
1466                         if (result != 0) {
1467                                 /*
1468                                  * Re-dirty page on error so it retries write,
1469                                  * but not in case when IO has actually
1470                                  * occurred and completed with an error.
1471                                  */
1472                                 if (!PageError(vmpage)) {
1473                                         redirty_page_for_writepage(wbc, vmpage);
1474                                         result = 0;
1475                                         redirtied = true;
1476                                 }
1477                         }
1478                         cl_page_disown(env, io, page);
1479                         unlocked = true;
1480                         lu_ref_del(&page->cp_reference,
1481                                    "writepage", current);
1482                         cl_page_put(env, page);
1483                 } else {
1484                         result = PTR_ERR(page);
1485                 }
1486         }
1487         cl_io_fini(env, io);
1488
1489         if (redirtied && wbc->sync_mode == WB_SYNC_ALL) {
1490                 loff_t offset = cl_offset(clob, vmpage->index);
1491
1492                 /* Flush page failed because the extent is being written out.
1493                  * Wait for the write of extent to be finished to avoid
1494                  * breaking kernel which assumes ->writepage should mark
1495                  * PageWriteback or clean the page. */
1496                 result = cl_sync_file_range(inode, offset,
1497                                             offset + PAGE_SIZE - 1,
1498                                             CL_FSYNC_LOCAL, 1);
1499                 if (result > 0) {
1500                         /* actually we may have written more than one page.
1501                          * decreasing this page because the caller will count
1502                          * it. */
1503                         wbc->nr_to_write -= result - 1;
1504                         result = 0;
1505                 }
1506         }
1507
1508         cl_env_put(env, &refcheck);
1509         GOTO(out, result);
1510
1511 out:
1512         if (result < 0) {
1513                 if (!lli->lli_async_rc)
1514                         lli->lli_async_rc = result;
1515                 SetPageError(vmpage);
1516                 if (!unlocked)
1517                         unlock_page(vmpage);
1518         }
1519         return result;
1520 }
1521
1522 int ll_writepages(struct address_space *mapping, struct writeback_control *wbc)
1523 {
1524         struct inode *inode = mapping->host;
1525         loff_t start;
1526         loff_t end;
1527         enum cl_fsync_mode mode;
1528         int range_whole = 0;
1529         int result;
1530         ENTRY;
1531
1532         if (wbc->range_cyclic) {
1533                 start = (loff_t)mapping->writeback_index << PAGE_SHIFT;
1534                 end = OBD_OBJECT_EOF;
1535         } else {
1536                 start = wbc->range_start;
1537                 end = wbc->range_end;
1538                 if (end == LLONG_MAX) {
1539                         end = OBD_OBJECT_EOF;
1540                         range_whole = start == 0;
1541                 }
1542         }
1543
1544         mode = CL_FSYNC_NONE;
1545         if (wbc->sync_mode == WB_SYNC_ALL)
1546                 mode = CL_FSYNC_LOCAL;
1547
1548         if (ll_i2info(inode)->lli_clob == NULL)
1549                 RETURN(0);
1550
1551         /* for directio, it would call writepages() to evict cached pages
1552          * inside the IO context of write, which will cause deadlock at
1553          * layout_conf since it waits for active IOs to complete. */
1554         result = cl_sync_file_range(inode, start, end, mode, 1);
1555         if (result > 0) {
1556                 wbc->nr_to_write -= result;
1557                 result = 0;
1558          }
1559
1560         if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) {
1561                 if (end == OBD_OBJECT_EOF)
1562                         mapping->writeback_index = 0;
1563                 else
1564                         mapping->writeback_index = (end >> PAGE_SHIFT) + 1;
1565         }
1566         RETURN(result);
1567 }
1568
1569 struct ll_cl_context *ll_cl_find(struct inode *inode)
1570 {
1571         struct ll_inode_info *lli = ll_i2info(inode);
1572         struct ll_cl_context *lcc;
1573         struct ll_cl_context *found = NULL;
1574
1575         read_lock(&lli->lli_lock);
1576         list_for_each_entry(lcc, &lli->lli_lccs, lcc_list) {
1577                 if (lcc->lcc_cookie == current) {
1578                         found = lcc;
1579                         break;
1580                 }
1581         }
1582         read_unlock(&lli->lli_lock);
1583
1584         return found;
1585 }
1586
1587 void ll_cl_add(struct inode *inode, const struct lu_env *env, struct cl_io *io,
1588                enum lcc_type type)
1589 {
1590         struct ll_inode_info *lli = ll_i2info(inode);
1591         struct ll_cl_context *lcc = &ll_env_info(env)->lti_io_ctx;
1592
1593         memset(lcc, 0, sizeof(*lcc));
1594         INIT_LIST_HEAD(&lcc->lcc_list);
1595         lcc->lcc_cookie = current;
1596         lcc->lcc_env = env;
1597         lcc->lcc_io = io;
1598         lcc->lcc_type = type;
1599
1600         write_lock(&lli->lli_lock);
1601         list_add(&lcc->lcc_list, &lli->lli_lccs);
1602         write_unlock(&lli->lli_lock);
1603 }
1604
1605 void ll_cl_remove(struct inode *inode, const struct lu_env *env)
1606 {
1607         struct ll_inode_info *lli = ll_i2info(inode);
1608         struct ll_cl_context *lcc = &ll_env_info(env)->lti_io_ctx;
1609
1610         write_lock(&lli->lli_lock);
1611         list_del_init(&lcc->lcc_list);
1612         write_unlock(&lli->lli_lock);
1613 }
1614
1615 int ll_io_read_page(const struct lu_env *env, struct cl_io *io,
1616                            struct cl_page *page, struct file *file)
1617 {
1618         struct inode              *inode  = vvp_object_inode(page->cp_obj);
1619         struct ll_sb_info         *sbi    = ll_i2sbi(inode);
1620         struct ll_file_data       *fd     = NULL;
1621         struct ll_readahead_state *ras    = NULL;
1622         struct cl_2queue          *queue  = &io->ci_queue;
1623         struct cl_sync_io         *anchor = NULL;
1624         struct vvp_page           *vpg;
1625         int                        rc = 0, rc2 = 0;
1626         bool                       uptodate;
1627         pgoff_t io_start_index;
1628         pgoff_t io_end_index;
1629         ENTRY;
1630
1631         if (file) {
1632                 fd = file->private_data;
1633                 ras = &fd->fd_ras;
1634         }
1635
1636         vpg = cl2vvp_page(cl_object_page_slice(page->cp_obj, page));
1637         uptodate = vpg->vpg_defer_uptodate;
1638
1639         if (ll_readahead_enabled(sbi) && !vpg->vpg_ra_updated && ras) {
1640                 struct vvp_io *vio = vvp_env_io(env);
1641                 enum ras_update_flags flags = 0;
1642
1643                 if (uptodate)
1644                         flags |= LL_RAS_HIT;
1645                 if (!vio->vui_ra_valid)
1646                         flags |= LL_RAS_MMAP;
1647                 ras_update(sbi, inode, ras, vvp_index(vpg), flags, io);
1648         }
1649
1650         cl_2queue_init(queue);
1651         if (uptodate) {
1652                 vpg->vpg_ra_used = 1;
1653                 cl_page_export(env, page, 1);
1654                 cl_page_disown(env, io, page);
1655         } else {
1656                 anchor = &vvp_env_info(env)->vti_anchor;
1657                 cl_sync_io_init(anchor, 1);
1658                 page->cp_sync_io = anchor;
1659
1660                 cl_2queue_add(queue, page, true);
1661         }
1662
1663         io_start_index = cl_index(io->ci_obj, io->u.ci_rw.crw_pos);
1664         io_end_index = cl_index(io->ci_obj, io->u.ci_rw.crw_pos +
1665                                 io->u.ci_rw.crw_count - 1);
1666         if (ll_readahead_enabled(sbi) && ras && !io->ci_rand_read) {
1667                 pgoff_t skip_index = 0;
1668
1669                 if (ras->ras_next_readahead_idx < vvp_index(vpg))
1670                         skip_index = vvp_index(vpg);
1671                 rc2 = ll_readahead(env, io, &queue->c2_qin, ras,
1672                                    uptodate, file, skip_index);
1673                 CDEBUG(D_READA, DFID " %d pages read ahead at %lu\n",
1674                        PFID(ll_inode2fid(inode)), rc2, vvp_index(vpg));
1675         } else if (vvp_index(vpg) == io_start_index &&
1676                    io_end_index - io_start_index > 0) {
1677                 rc2 = ll_readpages(env, io, &queue->c2_qin, io_start_index + 1,
1678                                    io_end_index);
1679                 CDEBUG(D_READA, DFID " %d pages read at %lu\n",
1680                        PFID(ll_inode2fid(inode)), rc2, vvp_index(vpg));
1681         }
1682
1683         if (queue->c2_qin.pl_nr > 0) {
1684                 int count = queue->c2_qin.pl_nr;
1685                 rc = cl_io_submit_rw(env, io, CRT_READ, queue);
1686                 if (rc == 0)
1687                         task_io_account_read(PAGE_SIZE * count);
1688         }
1689
1690
1691         if (anchor != NULL && !cl_page_is_owned(page, io)) { /* have sent */
1692                 rc = cl_sync_io_wait(env, anchor, 0);
1693
1694                 cl_page_assume(env, io, page);
1695                 cl_page_list_del(env, &queue->c2_qout, page);
1696
1697                 if (!PageUptodate(cl_page_vmpage(page))) {
1698                         /* Failed to read a mirror, discard this page so that
1699                          * new page can be created with new mirror.
1700                          *
1701                          * TODO: this is not needed after page reinit
1702                          * route is implemented */
1703                         cl_page_discard(env, io, page);
1704                 }
1705                 cl_page_disown(env, io, page);
1706         }
1707
1708         /* TODO: discard all pages until page reinit route is implemented */
1709         cl_page_list_discard(env, io, &queue->c2_qin);
1710
1711         /* Unlock unsent read pages in case of error. */
1712         cl_page_list_disown(env, io, &queue->c2_qin);
1713
1714         cl_2queue_fini(env, queue);
1715
1716         RETURN(rc);
1717 }
1718
1719 /*
1720  * Possible return value:
1721  * 0 no async readahead triggered and fast read could not be used.
1722  * 1 no async readahead, but fast read could be used.
1723  * 2 async readahead triggered and fast read could be used too.
1724  * < 0 on error.
1725  */
1726 static int kickoff_async_readahead(struct file *file, unsigned long pages)
1727 {
1728         struct ll_readahead_work *lrw;
1729         struct inode *inode = file_inode(file);
1730         struct ll_sb_info *sbi = ll_i2sbi(inode);
1731         struct ll_file_data *fd = file->private_data;
1732         struct ll_readahead_state *ras = &fd->fd_ras;
1733         struct ll_ra_info *ra = &sbi->ll_ra_info;
1734         unsigned long throttle;
1735         pgoff_t start_idx = ras_align(ras, ras->ras_next_readahead_idx);
1736         pgoff_t end_idx = start_idx + pages - 1;
1737
1738         /**
1739          * In case we have a limited max_cached_mb, readahead
1740          * should be stopped if it have run out of all LRU slots.
1741          */
1742         if (atomic_read(&ra->ra_cur_pages) >= sbi->ll_cache->ccc_lru_max) {
1743                 ll_ra_stats_inc(inode, RA_STAT_MAX_IN_FLIGHT);
1744                 return 0;
1745         }
1746
1747         throttle = min(ra->ra_async_pages_per_file_threshold,
1748                        ra->ra_max_pages_per_file);
1749         /*
1750          * If this is strided i/o or the window is smaller than the
1751          * throttle limit, we do not do async readahead. Otherwise,
1752          * we do async readahead, allowing the user thread to do fast i/o.
1753          */
1754         if (stride_io_mode(ras) || !throttle ||
1755             ras->ras_window_pages < throttle ||
1756             atomic_read(&ra->ra_async_inflight) > ra->ra_async_max_active)
1757                 return 0;
1758
1759         if ((atomic_read(&ra->ra_cur_pages) + pages) > ra->ra_max_pages)
1760                 return 0;
1761
1762         if (ras->ras_async_last_readpage_idx == start_idx)
1763                 return 1;
1764
1765         /* ll_readahead_work_free() free it */
1766         OBD_ALLOC_PTR(lrw);
1767         if (lrw) {
1768                 atomic_inc(&sbi->ll_ra_info.ra_async_inflight);
1769                 lrw->lrw_file = get_file(file);
1770                 lrw->lrw_start_idx = start_idx;
1771                 lrw->lrw_end_idx = end_idx;
1772                 spin_lock(&ras->ras_lock);
1773                 ras->ras_next_readahead_idx = end_idx + 1;
1774                 ras->ras_async_last_readpage_idx = start_idx;
1775                 spin_unlock(&ras->ras_lock);
1776                 memcpy(lrw->lrw_jobid, ll_i2info(inode)->lli_jobid,
1777                        sizeof(lrw->lrw_jobid));
1778                 ll_readahead_work_add(inode, lrw);
1779         } else {
1780                 return -ENOMEM;
1781         }
1782
1783         return 2;
1784 }
1785
1786 /*
1787  * Check if we can issue a readahead RPC, if that is
1788  * the case, we can't do fast IO because we will need
1789  * a cl_io to issue the RPC.
1790  */
1791 static bool ll_use_fast_io(struct file *file,
1792                            struct ll_readahead_state *ras, pgoff_t index)
1793 {
1794         unsigned long fast_read_pages =
1795                 max(RA_REMAIN_WINDOW_MIN, ras->ras_rpc_pages);
1796         loff_t skip_pages;
1797         loff_t stride_bytes = ras->ras_stride_bytes;
1798
1799         if (stride_io_mode(ras) && stride_bytes) {
1800                 skip_pages = (ras->ras_stride_length +
1801                         ras->ras_stride_bytes - 1) / stride_bytes;
1802                 skip_pages *= fast_read_pages;
1803         } else {
1804                 skip_pages = fast_read_pages;
1805         }
1806
1807         if (ras->ras_window_start_idx + ras->ras_window_pages <
1808             ras->ras_next_readahead_idx + skip_pages ||
1809             kickoff_async_readahead(file, fast_read_pages) > 0)
1810                 return true;
1811
1812         return false;
1813 }
1814
1815 int ll_readpage(struct file *file, struct page *vmpage)
1816 {
1817         struct inode *inode = file_inode(file);
1818         struct cl_object *clob = ll_i2info(inode)->lli_clob;
1819         struct ll_cl_context *lcc;
1820         const struct lu_env  *env = NULL;
1821         struct cl_io   *io = NULL;
1822         struct cl_page *page;
1823         struct ll_sb_info *sbi = ll_i2sbi(inode);
1824         int result;
1825         ENTRY;
1826
1827         lcc = ll_cl_find(inode);
1828         if (lcc != NULL) {
1829                 env = lcc->lcc_env;
1830                 io  = lcc->lcc_io;
1831         }
1832
1833         if (io == NULL) { /* fast read */
1834                 struct inode *inode = file_inode(file);
1835                 struct ll_file_data *fd = file->private_data;
1836                 struct ll_readahead_state *ras = &fd->fd_ras;
1837                 struct lu_env  *local_env = NULL;
1838                 struct vvp_page *vpg;
1839
1840                 result = -ENODATA;
1841
1842                 /* TODO: need to verify the layout version to make sure
1843                  * the page is not invalid due to layout change. */
1844                 page = cl_vmpage_page(vmpage, clob);
1845                 if (page == NULL) {
1846                         unlock_page(vmpage);
1847                         ll_ra_stats_inc_sbi(sbi, RA_STAT_FAILED_FAST_READ);
1848                         RETURN(result);
1849                 }
1850
1851                 vpg = cl2vvp_page(cl_object_page_slice(page->cp_obj, page));
1852                 if (vpg->vpg_defer_uptodate) {
1853                         enum ras_update_flags flags = LL_RAS_HIT;
1854
1855                         if (lcc && lcc->lcc_type == LCC_MMAP)
1856                                 flags |= LL_RAS_MMAP;
1857
1858                         /* For fast read, it updates read ahead state only
1859                          * if the page is hit in cache because non cache page
1860                          * case will be handled by slow read later. */
1861                         ras_update(sbi, inode, ras, vvp_index(vpg), flags, io);
1862                         /* avoid duplicate ras_update() call */
1863                         vpg->vpg_ra_updated = 1;
1864
1865                         if (ll_use_fast_io(file, ras, vvp_index(vpg)))
1866                                 result = 0;
1867                 }
1868
1869                 if (!env) {
1870                         local_env = cl_env_percpu_get();
1871                         env = local_env;
1872                 }
1873
1874                 /* export the page and skip io stack */
1875                 if (result == 0) {
1876                         vpg->vpg_ra_used = 1;
1877                         cl_page_export(env, page, 1);
1878                 } else {
1879                         ll_ra_stats_inc_sbi(sbi, RA_STAT_FAILED_FAST_READ);
1880                 }
1881                 /* release page refcount before unlocking the page to ensure
1882                  * the object won't be destroyed in the calling path of
1883                  * cl_page_put(). Please see comment in ll_releasepage(). */
1884                 cl_page_put(env, page);
1885                 unlock_page(vmpage);
1886                 if (local_env)
1887                         cl_env_percpu_put(local_env);
1888
1889                 RETURN(result);
1890         }
1891
1892         /**
1893          * Direct read can fall back to buffered read, but DIO is done
1894          * with lockless i/o, and buffered requires LDLM locking, so in
1895          * this case we must restart without lockless.
1896          */
1897         if (file->f_flags & O_DIRECT &&
1898             lcc && lcc->lcc_type == LCC_RW &&
1899             !io->ci_dio_lock) {
1900                 unlock_page(vmpage);
1901                 io->ci_dio_lock = 1;
1902                 io->ci_need_restart = 1;
1903                 RETURN(-ENOLCK);
1904         }
1905
1906         LASSERT(io->ci_state == CIS_IO_GOING);
1907         page = cl_page_find(env, clob, vmpage->index, vmpage, CPT_CACHEABLE);
1908         if (!IS_ERR(page)) {
1909                 LASSERT(page->cp_type == CPT_CACHEABLE);
1910                 if (likely(!PageUptodate(vmpage))) {
1911                         cl_page_assume(env, io, page);
1912
1913                         result = ll_io_read_page(env, io, page, file);
1914                 } else {
1915                         /* Page from a non-object file. */
1916                         unlock_page(vmpage);
1917                         result = 0;
1918                 }
1919                 cl_page_put(env, page);
1920         } else {
1921                 unlock_page(vmpage);
1922                 result = PTR_ERR(page);
1923         }
1924         RETURN(result);
1925 }