Whamcloud - gitweb
LU-13386 llite: allow current readahead to exceed reservation
[fs/lustre-release.git] / lustre / llite / rw.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lustre/llite/rw.c
33  *
34  * Lustre Lite I/O page cache routines shared by different kernel revs
35  */
36
37 #include <linux/kernel.h>
38 #include <linux/mm.h>
39 #include <linux/string.h>
40 #include <linux/stat.h>
41 #include <linux/errno.h>
42 #include <linux/unistd.h>
43 #include <linux/writeback.h>
44 #include <asm/uaccess.h>
45
46 #include <linux/fs.h>
47 #include <linux/file.h>
48 #include <linux/stat.h>
49 #include <asm/uaccess.h>
50 #include <linux/mm.h>
51 #include <linux/pagemap.h>
52 /* current_is_kswapd() */
53 #include <linux/swap.h>
54 #include <linux/task_io_accounting_ops.h>
55
56 #define DEBUG_SUBSYSTEM S_LLITE
57
58 #include <obd_cksum.h>
59 #include "llite_internal.h"
60 #include <lustre_compat.h>
61
62 static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which);
63
64 /**
65  * Get readahead pages from the filesystem readahead pool of the client for a
66  * thread.
67  *
68  * /param sbi superblock for filesystem readahead state ll_ra_info
69  * /param ria per-thread readahead state
70  * /param pages number of pages requested for readahead for the thread.
71  *
72  * WARNING: This algorithm is used to reduce contention on sbi->ll_lock.
73  * It should work well if the ra_max_pages is much greater than the single
74  * file's read-ahead window, and not too many threads contending for
75  * these readahead pages.
76  *
77  * TODO: There may be a 'global sync problem' if many threads are trying
78  * to get an ra budget that is larger than the remaining readahead pages
79  * and reach here at exactly the same time. They will compute /a ret to
80  * consume the remaining pages, but will fail at atomic_add_return() and
81  * get a zero ra window, although there is still ra space remaining. - Jay */
82
83 static unsigned long ll_ra_count_get(struct ll_sb_info *sbi,
84                                      struct ra_io_arg *ria,
85                                      unsigned long pages,
86                                      unsigned long pages_min)
87 {
88         struct ll_ra_info *ra = &sbi->ll_ra_info;
89         long ret;
90         ENTRY;
91
92         /* If read-ahead pages left are less than 1M, do not do read-ahead,
93          * otherwise it will form small read RPC(< 1M), which hurt server
94          * performance a lot. */
95         ret = min(ra->ra_max_pages - atomic_read(&ra->ra_cur_pages),
96                   pages);
97         if (ret < 0 || ret < min_t(long, PTLRPC_MAX_BRW_PAGES, pages))
98                 GOTO(out, ret = 0);
99
100         if (atomic_add_return(ret, &ra->ra_cur_pages) > ra->ra_max_pages) {
101                 atomic_sub(ret, &ra->ra_cur_pages);
102                 ret = 0;
103         }
104
105 out:
106         if (ret < pages_min) {
107                 /* override ra limit for maximum performance */
108                 atomic_add(pages_min - ret, &ra->ra_cur_pages);
109                 ret = pages_min;
110         }
111         RETURN(ret);
112 }
113
114 void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long pages)
115 {
116         struct ll_ra_info *ra = &sbi->ll_ra_info;
117         atomic_sub(pages, &ra->ra_cur_pages);
118 }
119
120 static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which)
121 {
122         LASSERTF(which < _NR_RA_STAT, "which: %u\n", which);
123         lprocfs_counter_incr(sbi->ll_ra_stats, which);
124 }
125
126 void ll_ra_stats_inc(struct inode *inode, enum ra_stat which)
127 {
128         struct ll_sb_info *sbi = ll_i2sbi(inode);
129         ll_ra_stats_inc_sbi(sbi, which);
130 }
131
132 #define RAS_CDEBUG(ras) \
133         CDEBUG(D_READA,                                                      \
134                "lre %llu cr %lu cb %llu wsi %lu wp %lu nra %lu rpc %lu "     \
135                "r %lu csr %lu so %llu sb %llu sl %llu lr %lu\n",             \
136                ras->ras_last_read_end_bytes, ras->ras_consecutive_requests,  \
137                ras->ras_consecutive_bytes, ras->ras_window_start_idx,        \
138                ras->ras_window_pages, ras->ras_next_readahead_idx,           \
139                ras->ras_rpc_pages, ras->ras_requests,                        \
140                ras->ras_consecutive_stride_requests, ras->ras_stride_offset, \
141                ras->ras_stride_bytes, ras->ras_stride_length,                \
142                ras->ras_async_last_readpage_idx)
143
144 static bool pos_in_window(loff_t pos, loff_t point,
145                           unsigned long before, unsigned long after)
146 {
147         loff_t start = point - before;
148         loff_t end = point + after;
149
150         if (start > point)
151                 start = 0;
152         if (end < point)
153                 end = ~0;
154
155         return start <= pos && pos <= end;
156 }
157
158 /**
159  * Initiates read-ahead of a page with given index.
160  *
161  * \retval +ve: page was already uptodate so it will be skipped
162  *              from being added;
163  * \retval -ve: page wasn't added to \a queue for error;
164  * \retval   0: page was added into \a queue for read ahead.
165  */
166 static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io,
167                               struct cl_page_list *queue, pgoff_t index)
168 {
169         struct cl_object *clob  = io->ci_obj;
170         struct inode     *inode = vvp_object_inode(clob);
171         struct page      *vmpage;
172         struct cl_page   *page;
173         struct vvp_page  *vpg;
174         enum ra_stat      which = _NR_RA_STAT; /* keep gcc happy */
175         int               rc    = 0;
176         const char       *msg   = NULL;
177         ENTRY;
178
179         vmpage = grab_cache_page_nowait(inode->i_mapping, index);
180         if (vmpage == NULL) {
181                 which = RA_STAT_FAILED_GRAB_PAGE;
182                 msg   = "g_c_p_n failed";
183                 GOTO(out, rc = -EBUSY);
184         }
185
186         /* Check if vmpage was truncated or reclaimed */
187         if (vmpage->mapping != inode->i_mapping) {
188                 which = RA_STAT_WRONG_GRAB_PAGE;
189                 msg   = "g_c_p_n returned invalid page";
190                 GOTO(out, rc = -EBUSY);
191         }
192
193         page = cl_page_find(env, clob, vmpage->index, vmpage, CPT_CACHEABLE);
194         if (IS_ERR(page)) {
195                 which = RA_STAT_FAILED_GRAB_PAGE;
196                 msg   = "cl_page_find failed";
197                 GOTO(out, rc = PTR_ERR(page));
198         }
199
200         lu_ref_add(&page->cp_reference, "ra", current);
201         cl_page_assume(env, io, page);
202         vpg = cl2vvp_page(cl_object_page_slice(clob, page));
203         if (!vpg->vpg_defer_uptodate && !PageUptodate(vmpage)) {
204                 vpg->vpg_defer_uptodate = 1;
205                 vpg->vpg_ra_used = 0;
206                 cl_page_list_add(queue, page);
207         } else {
208                 /* skip completed pages */
209                 cl_page_unassume(env, io, page);
210                 /* This page is already uptodate, returning a positive number
211                  * to tell the callers about this */
212                 rc = 1;
213         }
214
215         lu_ref_del(&page->cp_reference, "ra", current);
216         cl_page_put(env, page);
217
218 out:
219         if (vmpage != NULL) {
220                 if (rc != 0)
221                         unlock_page(vmpage);
222                 put_page(vmpage);
223         }
224         if (msg != NULL) {
225                 ll_ra_stats_inc(inode, which);
226                 CDEBUG(D_READA, "%s\n", msg);
227
228         }
229
230         RETURN(rc);
231 }
232
233 #define RIA_DEBUG(ria)                                                  \
234         CDEBUG(D_READA, "rs %lu re %lu ro %llu rl %llu rb %llu\n",      \
235                ria->ria_start_idx, ria->ria_end_idx, ria->ria_stoff,    \
236                ria->ria_length, ria->ria_bytes)
237
238 static inline int stride_io_mode(struct ll_readahead_state *ras)
239 {
240         return ras->ras_consecutive_stride_requests > 1;
241 }
242
243 /* The function calculates how many bytes will be read in
244  * [off, off + length], in such stride IO area,
245  * stride_offset = st_off, stride_lengh = st_len,
246  * stride_bytes = st_bytes
247  *
248  *   |------------------|*****|------------------|*****|------------|*****|....
249  * st_off
250  *   |--- st_bytes     ---|
251  *   |-----     st_len   -----|
252  *
253  *              How many bytes it should read in such pattern
254  *              |-------------------------------------------------------------|
255  *              off
256  *              |<------                  length                      ------->|
257  *
258  *          =   |<----->|  +  |-------------------------------------| +   |---|
259  *             start_left                 st_bytes * i                 end_left
260  */
261 static loff_t stride_byte_count(loff_t st_off, loff_t st_len, loff_t st_bytes,
262                                 loff_t off, loff_t length)
263 {
264         u64 start = off > st_off ? off - st_off : 0;
265         u64 end = off + length > st_off ? off + length - st_off : 0;
266         u64 start_left;
267         u64 end_left;
268         u64 bytes_count;
269
270         if (st_len == 0 || length == 0 || end == 0)
271                 return length;
272
273         start = div64_u64_rem(start, st_len, &start_left);
274         if (start_left < st_bytes)
275                 start_left = st_bytes - start_left;
276         else
277                 start_left = 0;
278
279         end = div64_u64_rem(end, st_len, &end_left);
280         if (end_left > st_bytes)
281                 end_left = st_bytes;
282
283         CDEBUG(D_READA, "start %llu, end %llu start_left %llu end_left %llu\n",
284                start, end, start_left, end_left);
285
286         if (start == end)
287                 bytes_count = end_left - (st_bytes - start_left);
288         else
289                 bytes_count = start_left +
290                         st_bytes * (end - start - 1) + end_left;
291
292         CDEBUG(D_READA,
293                "st_off %llu, st_len %llu st_bytes %llu off %llu length %llu bytescount %llu\n",
294                st_off, st_len, st_bytes, off, length, bytes_count);
295
296         return bytes_count;
297 }
298
299 static unsigned long ria_page_count(struct ra_io_arg *ria)
300 {
301         loff_t length_bytes = ria->ria_end_idx >= ria->ria_start_idx ?
302                 (loff_t)(ria->ria_end_idx -
303                          ria->ria_start_idx + 1) << PAGE_SHIFT : 0;
304         loff_t bytes_count;
305
306         if (ria->ria_length > ria->ria_bytes && ria->ria_bytes &&
307             (ria->ria_length & ~PAGE_MASK || ria->ria_bytes & ~PAGE_MASK ||
308              ria->ria_stoff & ~PAGE_MASK)) {
309                 /* Over-estimate un-aligned page stride read */
310                 unsigned long pg_count = ((ria->ria_bytes +
311                                            PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
312                 pg_count *= length_bytes / ria->ria_length + 1;
313
314                 return pg_count;
315         }
316         bytes_count = stride_byte_count(ria->ria_stoff, ria->ria_length,
317                                         ria->ria_bytes,
318                                         (loff_t)ria->ria_start_idx<<PAGE_SHIFT,
319                                         length_bytes);
320         return (bytes_count + PAGE_SIZE - 1) >> PAGE_SHIFT;
321 }
322
323 static pgoff_t ras_align(struct ll_readahead_state *ras, pgoff_t index)
324 {
325         return index - (index % ras->ras_rpc_pages);
326 }
327
328 /* Check whether the index is in the defined ra-window */
329 static bool ras_inside_ra_window(pgoff_t idx, struct ra_io_arg *ria)
330 {
331         loff_t pos = (loff_t)idx << PAGE_SHIFT;
332
333         /* If ria_length == ria_bytes, it means non-stride I/O mode,
334          * idx should always inside read-ahead window in this case
335          * For stride I/O mode, just check whether the idx is inside
336          * the ria_bytes.
337          */
338         if (ria->ria_length == 0 || ria->ria_length == ria->ria_bytes)
339                 return true;
340
341         if (pos >= ria->ria_stoff) {
342                 u64 offset;
343
344                 div64_u64_rem(pos - ria->ria_stoff, ria->ria_length, &offset);
345
346                 if (offset < ria->ria_bytes ||
347                     (ria->ria_length - offset) < PAGE_SIZE)
348                         return true;
349         } else if (pos + PAGE_SIZE > ria->ria_stoff) {
350                 return true;
351         }
352
353         return false;
354 }
355
356 static unsigned long
357 ll_read_ahead_pages(const struct lu_env *env, struct cl_io *io,
358                     struct cl_page_list *queue, struct ll_readahead_state *ras,
359                     struct ra_io_arg *ria, pgoff_t *ra_end)
360 {
361         struct cl_read_ahead ra = { 0 };
362         /* busy page count is per stride */
363         int rc = 0, count = 0, busy_page_count = 0;
364         pgoff_t page_idx;
365
366         LASSERT(ria != NULL);
367         RIA_DEBUG(ria);
368
369         for (page_idx = ria->ria_start_idx;
370              page_idx <= ria->ria_end_idx && ria->ria_reserved > 0;
371              page_idx++) {
372                 if (ras_inside_ra_window(page_idx, ria)) {
373                         if (ra.cra_end_idx == 0 || ra.cra_end_idx < page_idx) {
374                                 pgoff_t end_idx;
375
376                                 /*
377                                  * Do not shrink ria_end_idx at any case until
378                                  * the minimum end of current read is covered.
379                                  *
380                                  * Do not extend read lock accross stripe if
381                                  * lock contention detected.
382                                  */
383                                 if (ra.cra_contention &&
384                                     page_idx > ria->ria_end_idx_min) {
385                                         ria->ria_end_idx = *ra_end;
386                                         break;
387                                 }
388
389                                 cl_read_ahead_release(env, &ra);
390
391                                 rc = cl_io_read_ahead(env, io, page_idx, &ra);
392                                 if (rc < 0)
393                                         break;
394
395                                  /*
396                                   * Only shrink ria_end_idx if the matched
397                                   * LDLM lock doesn't cover more.
398                                   */
399                                 if (page_idx > ra.cra_end_idx) {
400                                         ria->ria_end_idx = ra.cra_end_idx;
401                                         break;
402                                 }
403
404                                 CDEBUG(D_READA, "idx: %lu, ra: %lu, rpc: %lu\n",
405                                        page_idx, ra.cra_end_idx,
406                                        ra.cra_rpc_pages);
407                                 LASSERTF(ra.cra_end_idx >= page_idx,
408                                          "object: %p, indcies %lu / %lu\n",
409                                          io->ci_obj, ra.cra_end_idx, page_idx);
410                                 /* update read ahead RPC size.
411                                  * NB: it's racy but doesn't matter */
412                                 if (ras->ras_rpc_pages != ra.cra_rpc_pages &&
413                                     ra.cra_rpc_pages > 0)
414                                         ras->ras_rpc_pages = ra.cra_rpc_pages;
415                                 /* trim it to align with optimal RPC size */
416                                 end_idx = ras_align(ras, ria->ria_end_idx + 1);
417                                 if (end_idx > 0 && !ria->ria_eof)
418                                         ria->ria_end_idx = end_idx - 1;
419                                 if (ria->ria_end_idx < ria->ria_end_idx_min)
420                                         ria->ria_end_idx = ria->ria_end_idx_min;
421                         }
422                         if (page_idx > ria->ria_end_idx)
423                                 break;
424
425                         /* If the page is inside the read-ahead window */
426                         rc = ll_read_ahead_page(env, io, queue, page_idx);
427                         if (rc < 0 && rc != -EBUSY)
428                                 break;
429                         if (rc == -EBUSY) {
430                                 busy_page_count++;
431                                 CDEBUG(D_READA,
432                                        "skip busy page: %lu\n", page_idx);
433                                 /* For page unaligned readahead the first
434                                  * last pages of each region can be read by
435                                  * another reader on the same node, and so
436                                  * may be busy. So only stop for > 2 busy
437                                  * pages. */
438                                 if (busy_page_count > 2)
439                                         break;
440                         }
441
442                         *ra_end = page_idx;
443                         /* Only subtract from reserve & count the page if we
444                          * really did readahead on that page. */
445                         if (rc == 0) {
446                                 ria->ria_reserved--;
447                                 count++;
448                         }
449                 } else if (stride_io_mode(ras)) {
450                         /* If it is not in the read-ahead window, and it is
451                          * read-ahead mode, then check whether it should skip
452                          * the stride gap.
453                          */
454                         loff_t pos = (loff_t)page_idx << PAGE_SHIFT;
455                         u64 offset;
456
457                         div64_u64_rem(pos - ria->ria_stoff, ria->ria_length,
458                                       &offset);
459                         if (offset >= ria->ria_bytes) {
460                                 pos += (ria->ria_length - offset);
461                                 if ((pos >> PAGE_SHIFT) >= page_idx + 1)
462                                         page_idx = (pos >> PAGE_SHIFT) - 1;
463                                 busy_page_count = 0;
464                                 CDEBUG(D_READA,
465                                        "Stride: jump %llu pages to %lu\n",
466                                        ria->ria_length - offset, page_idx);
467                                 continue;
468                         }
469                 }
470         }
471
472         cl_read_ahead_release(env, &ra);
473
474         return count;
475 }
476
477 static void ll_readahead_work_free(struct ll_readahead_work *work)
478 {
479         fput(work->lrw_file);
480         OBD_FREE_PTR(work);
481 }
482
483 static void ll_readahead_handle_work(struct work_struct *wq);
484 static void ll_readahead_work_add(struct inode *inode,
485                                   struct ll_readahead_work *work)
486 {
487         INIT_WORK(&work->lrw_readahead_work, ll_readahead_handle_work);
488         queue_work(ll_i2sbi(inode)->ll_ra_info.ll_readahead_wq,
489                    &work->lrw_readahead_work);
490 }
491
492 static int ll_readahead_file_kms(const struct lu_env *env,
493                                 struct cl_io *io, __u64 *kms)
494 {
495         struct cl_object *clob;
496         struct inode *inode;
497         struct cl_attr *attr = vvp_env_thread_attr(env);
498         int ret;
499
500         clob = io->ci_obj;
501         inode = vvp_object_inode(clob);
502
503         cl_object_attr_lock(clob);
504         ret = cl_object_attr_get(env, clob, attr);
505         cl_object_attr_unlock(clob);
506
507         if (ret != 0)
508                 RETURN(ret);
509
510         *kms = attr->cat_kms;
511         return 0;
512 }
513
514 static void ll_readahead_handle_work(struct work_struct *wq)
515 {
516         struct ll_readahead_work *work;
517         struct lu_env *env;
518         __u16 refcheck;
519         struct ra_io_arg *ria;
520         struct inode *inode;
521         struct ll_file_data *fd;
522         struct ll_readahead_state *ras;
523         struct cl_io *io;
524         struct cl_2queue *queue;
525         pgoff_t ra_end_idx = 0;
526         unsigned long pages, pages_min = 0;
527         struct file *file;
528         __u64 kms;
529         int rc;
530         pgoff_t eof_index;
531         struct ll_sb_info *sbi;
532
533         work = container_of(wq, struct ll_readahead_work,
534                             lrw_readahead_work);
535         fd = work->lrw_file->private_data;
536         ras = &fd->fd_ras;
537         file = work->lrw_file;
538         inode = file_inode(file);
539         sbi = ll_i2sbi(inode);
540
541         env = cl_env_alloc(&refcheck, LCT_NOREF);
542         if (IS_ERR(env))
543                 GOTO(out_free_work, rc = PTR_ERR(env));
544
545         io = vvp_env_thread_io(env);
546         ll_io_init(io, file, CIT_READ, NULL);
547
548         rc = ll_readahead_file_kms(env, io, &kms);
549         if (rc != 0)
550                 GOTO(out_put_env, rc);
551
552         if (kms == 0) {
553                 ll_ra_stats_inc(inode, RA_STAT_ZERO_LEN);
554                 GOTO(out_put_env, rc = 0);
555         }
556
557         ria = &ll_env_info(env)->lti_ria;
558         memset(ria, 0, sizeof(*ria));
559
560         ria->ria_start_idx = work->lrw_start_idx;
561         /* Truncate RA window to end of file */
562         eof_index = (pgoff_t)(kms - 1) >> PAGE_SHIFT;
563         if (eof_index <= work->lrw_end_idx) {
564                 work->lrw_end_idx = eof_index;
565                 ria->ria_eof = true;
566         }
567         if (work->lrw_end_idx <= work->lrw_start_idx)
568                 GOTO(out_put_env, rc = 0);
569
570         ria->ria_end_idx = work->lrw_end_idx;
571         pages = ria->ria_end_idx - ria->ria_start_idx + 1;
572         ria->ria_reserved = ll_ra_count_get(sbi, ria,
573                                             ria_page_count(ria), pages_min);
574
575         CDEBUG(D_READA,
576                "async reserved pages: %lu/%lu/%lu, ra_cur %d, ra_max %lu\n",
577                ria->ria_reserved, pages, pages_min,
578                atomic_read(&ll_i2sbi(inode)->ll_ra_info.ra_cur_pages),
579                ll_i2sbi(inode)->ll_ra_info.ra_max_pages);
580
581         if (ria->ria_reserved < pages) {
582                 ll_ra_stats_inc(inode, RA_STAT_MAX_IN_FLIGHT);
583                 if (PAGES_TO_MiB(ria->ria_reserved) < 1) {
584                         ll_ra_count_put(ll_i2sbi(inode), ria->ria_reserved);
585                         GOTO(out_put_env, rc = 0);
586                 }
587         }
588
589         rc = cl_io_rw_init(env, io, CIT_READ, ria->ria_start_idx, pages);
590         if (rc)
591                 GOTO(out_put_env, rc);
592
593         vvp_env_io(env)->vui_io_subtype = IO_NORMAL;
594         vvp_env_io(env)->vui_fd = fd;
595         io->ci_state = CIS_LOCKED;
596         io->ci_async_readahead = true;
597         rc = cl_io_start(env, io);
598         if (rc)
599                 GOTO(out_io_fini, rc);
600
601         queue = &io->ci_queue;
602         cl_2queue_init(queue);
603
604         rc = ll_read_ahead_pages(env, io, &queue->c2_qin, ras, ria,
605                                  &ra_end_idx);
606         if (ria->ria_reserved != 0)
607                 ll_ra_count_put(ll_i2sbi(inode), ria->ria_reserved);
608         if (queue->c2_qin.pl_nr > 0) {
609                 int count = queue->c2_qin.pl_nr;
610
611                 rc = cl_io_submit_rw(env, io, CRT_READ, queue);
612                 if (rc == 0)
613                         task_io_account_read(PAGE_SIZE * count);
614         }
615         if (ria->ria_end_idx == ra_end_idx && ra_end_idx == (kms >> PAGE_SHIFT))
616                 ll_ra_stats_inc(inode, RA_STAT_EOF);
617
618         if (ra_end_idx != ria->ria_end_idx)
619                 ll_ra_stats_inc(inode, RA_STAT_FAILED_REACH_END);
620
621         /* TODO: discard all pages until page reinit route is implemented */
622         cl_page_list_discard(env, io, &queue->c2_qin);
623
624         /* Unlock unsent read pages in case of error. */
625         cl_page_list_disown(env, io, &queue->c2_qin);
626
627         cl_2queue_fini(env, queue);
628 out_io_fini:
629         cl_io_end(env, io);
630         cl_io_fini(env, io);
631 out_put_env:
632         cl_env_put(env, &refcheck);
633 out_free_work:
634         if (ra_end_idx > 0)
635                 ll_ra_stats_inc_sbi(ll_i2sbi(inode), RA_STAT_ASYNC);
636         atomic_dec(&sbi->ll_ra_info.ra_async_inflight);
637         ll_readahead_work_free(work);
638 }
639
640 static int ll_readahead(const struct lu_env *env, struct cl_io *io,
641                         struct cl_page_list *queue,
642                         struct ll_readahead_state *ras, bool hit,
643                         struct file *file)
644 {
645         struct vvp_io *vio = vvp_env_io(env);
646         struct ll_thread_info *lti = ll_env_info(env);
647         unsigned long pages, pages_min = 0;
648         pgoff_t ra_end_idx = 0, start_idx = 0, end_idx = 0;
649         struct inode *inode;
650         struct ra_io_arg *ria = &lti->lti_ria;
651         struct cl_object *clob;
652         int ret = 0;
653         __u64 kms;
654         ENTRY;
655
656         clob = io->ci_obj;
657         inode = vvp_object_inode(clob);
658
659         memset(ria, 0, sizeof(*ria));
660         ret = ll_readahead_file_kms(env, io, &kms);
661         if (ret != 0)
662                 RETURN(ret);
663
664         if (kms == 0) {
665                 ll_ra_stats_inc(inode, RA_STAT_ZERO_LEN);
666                 RETURN(0);
667         }
668
669         spin_lock(&ras->ras_lock);
670
671         /**
672          * Note: other thread might rollback the ras_next_readahead_idx,
673          * if it can not get the full size of prepared pages, see the
674          * end of this function. For stride read ahead, it needs to
675          * make sure the offset is no less than ras_stride_offset,
676          * so that stride read ahead can work correctly.
677          */
678         if (stride_io_mode(ras))
679                 start_idx = max_t(pgoff_t, ras->ras_next_readahead_idx,
680                                   ras->ras_stride_offset >> PAGE_SHIFT);
681         else
682                 start_idx = ras->ras_next_readahead_idx;
683
684         if (ras->ras_window_pages > 0)
685                 end_idx = ras->ras_window_start_idx + ras->ras_window_pages - 1;
686
687         /* Enlarge the RA window to encompass the full read */
688         if (vio->vui_ra_valid &&
689             end_idx < vio->vui_ra_start_idx + vio->vui_ra_pages - 1)
690                 end_idx = vio->vui_ra_start_idx + vio->vui_ra_pages - 1;
691
692         if (end_idx != 0) {
693                 pgoff_t eof_index;
694
695                 /* Truncate RA window to end of file */
696                 eof_index = (pgoff_t)((kms - 1) >> PAGE_SHIFT);
697                 if (eof_index <= end_idx) {
698                         end_idx = eof_index;
699                         ria->ria_eof = true;
700                 }
701         }
702         ria->ria_start_idx = start_idx;
703         ria->ria_end_idx = end_idx;
704         /* If stride I/O mode is detected, get stride window*/
705         if (stride_io_mode(ras)) {
706                 ria->ria_stoff = ras->ras_stride_offset;
707                 ria->ria_length = ras->ras_stride_length;
708                 ria->ria_bytes = ras->ras_stride_bytes;
709         }
710         spin_unlock(&ras->ras_lock);
711
712         if (end_idx == 0) {
713                 ll_ra_stats_inc(inode, RA_STAT_ZERO_WINDOW);
714                 RETURN(0);
715         }
716         pages = ria_page_count(ria);
717         if (pages == 0) {
718                 ll_ra_stats_inc(inode, RA_STAT_ZERO_WINDOW);
719                 RETURN(0);
720         }
721
722         RAS_CDEBUG(ras);
723         CDEBUG(D_READA, DFID": ria: %lu/%lu, bead: %lu/%lu, hit: %d\n",
724                PFID(lu_object_fid(&clob->co_lu)),
725                ria->ria_start_idx, ria->ria_end_idx,
726                vio->vui_ra_valid ? vio->vui_ra_start_idx : 0,
727                vio->vui_ra_valid ? vio->vui_ra_pages : 0,
728                hit);
729
730         /* at least to extend the readahead window to cover current read */
731         if (!hit && vio->vui_ra_valid &&
732             vio->vui_ra_start_idx + vio->vui_ra_pages > ria->ria_start_idx) {
733                 ria->ria_end_idx_min =
734                         vio->vui_ra_start_idx + vio->vui_ra_pages - 1;
735                 pages_min = vio->vui_ra_start_idx + vio->vui_ra_pages -
736                                 ria->ria_start_idx;
737         }
738
739         ria->ria_reserved = ll_ra_count_get(ll_i2sbi(inode), ria, pages,
740                                             pages_min);
741         if (ria->ria_reserved < pages)
742                 ll_ra_stats_inc(inode, RA_STAT_MAX_IN_FLIGHT);
743
744         CDEBUG(D_READA, "reserved pages: %lu/%lu/%lu, ra_cur %d, ra_max %lu\n",
745                ria->ria_reserved, pages, pages_min,
746                atomic_read(&ll_i2sbi(inode)->ll_ra_info.ra_cur_pages),
747                ll_i2sbi(inode)->ll_ra_info.ra_max_pages);
748
749         ret = ll_read_ahead_pages(env, io, queue, ras, ria, &ra_end_idx);
750
751         if (ria->ria_reserved != 0)
752                 ll_ra_count_put(ll_i2sbi(inode), ria->ria_reserved);
753
754         if (ra_end_idx == end_idx && ra_end_idx == (kms >> PAGE_SHIFT))
755                 ll_ra_stats_inc(inode, RA_STAT_EOF);
756
757         CDEBUG(D_READA,
758                "ra_end_idx = %lu end_idx = %lu stride end = %lu pages = %d\n",
759                ra_end_idx, end_idx, ria->ria_end_idx, ret);
760
761         if (ra_end_idx != end_idx)
762                 ll_ra_stats_inc(inode, RA_STAT_FAILED_REACH_END);
763         if (ra_end_idx > 0) {
764                 /* update the ras so that the next read-ahead tries from
765                  * where we left off. */
766                 spin_lock(&ras->ras_lock);
767                 ras->ras_next_readahead_idx = ra_end_idx + 1;
768                 spin_unlock(&ras->ras_lock);
769                 RAS_CDEBUG(ras);
770         }
771
772         RETURN(ret);
773 }
774
775 static void ras_set_start(struct ll_readahead_state *ras, pgoff_t index)
776 {
777         ras->ras_window_start_idx = ras_align(ras, index);
778 }
779
780 /* called with the ras_lock held or from places where it doesn't matter */
781 static void ras_reset(struct ll_readahead_state *ras, pgoff_t index)
782 {
783         ras->ras_consecutive_requests = 0;
784         ras->ras_consecutive_bytes = 0;
785         ras->ras_window_pages = 0;
786         ras_set_start(ras, index);
787         ras->ras_next_readahead_idx = max(ras->ras_window_start_idx, index + 1);
788
789         RAS_CDEBUG(ras);
790 }
791
792 /* called with the ras_lock held or from places where it doesn't matter */
793 static void ras_stride_reset(struct ll_readahead_state *ras)
794 {
795         ras->ras_consecutive_stride_requests = 0;
796         ras->ras_stride_length = 0;
797         ras->ras_stride_bytes = 0;
798         RAS_CDEBUG(ras);
799 }
800
801 void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras)
802 {
803         spin_lock_init(&ras->ras_lock);
804         ras->ras_rpc_pages = PTLRPC_MAX_BRW_PAGES;
805         ras_reset(ras, 0);
806         ras->ras_last_read_end_bytes = 0;
807         ras->ras_requests = 0;
808 }
809
810 /*
811  * Check whether the read request is in the stride window.
812  * If it is in the stride window, return true, otherwise return false.
813  */
814 static bool read_in_stride_window(struct ll_readahead_state *ras,
815                                   loff_t pos, loff_t count)
816 {
817         loff_t stride_gap;
818
819         if (ras->ras_stride_length == 0 || ras->ras_stride_bytes == 0 ||
820             ras->ras_stride_bytes == ras->ras_stride_length)
821                 return false;
822
823         stride_gap = pos - ras->ras_last_read_end_bytes - 1;
824
825         /* If it is contiguous read */
826         if (stride_gap == 0)
827                 return ras->ras_consecutive_bytes + count <=
828                         ras->ras_stride_bytes;
829
830         /* Otherwise check the stride by itself */
831         return (ras->ras_stride_length - ras->ras_stride_bytes) == stride_gap &&
832                 ras->ras_consecutive_bytes == ras->ras_stride_bytes &&
833                 count <= ras->ras_stride_bytes;
834 }
835
836 static void ras_init_stride_detector(struct ll_readahead_state *ras,
837                                      loff_t pos, loff_t count)
838 {
839         loff_t stride_gap = pos - ras->ras_last_read_end_bytes - 1;
840
841         LASSERT(ras->ras_consecutive_stride_requests == 0);
842
843         if (pos <= ras->ras_last_read_end_bytes) {
844                 /*Reset stride window for forward read*/
845                 ras_stride_reset(ras);
846                 return;
847         }
848
849         ras->ras_stride_bytes = ras->ras_consecutive_bytes;
850         ras->ras_stride_length = stride_gap + ras->ras_consecutive_bytes;
851         ras->ras_consecutive_stride_requests++;
852         ras->ras_stride_offset = pos;
853
854         RAS_CDEBUG(ras);
855 }
856
857 static unsigned long
858 stride_page_count(struct ll_readahead_state *ras, loff_t len)
859 {
860         loff_t bytes_count =
861                 stride_byte_count(ras->ras_stride_offset,
862                                   ras->ras_stride_length, ras->ras_stride_bytes,
863                                   ras->ras_window_start_idx << PAGE_SHIFT, len);
864
865         return (bytes_count + PAGE_SIZE - 1) >> PAGE_SHIFT;
866 }
867
868 /* Stride Read-ahead window will be increased inc_len according to
869  * stride I/O pattern */
870 static void ras_stride_increase_window(struct ll_readahead_state *ras,
871                                        struct ll_ra_info *ra, loff_t inc_bytes)
872 {
873         loff_t window_bytes, stride_bytes;
874         u64 left_bytes;
875         u64 step;
876         loff_t end;
877
878         /* temporarily store in page units to reduce LASSERT() cost below */
879         end = ras->ras_window_start_idx + ras->ras_window_pages;
880
881         LASSERT(ras->ras_stride_length > 0);
882         LASSERTF(end >= (ras->ras_stride_offset >> PAGE_SHIFT),
883                  "window_start_idx %lu, window_pages %lu stride_offset %llu\n",
884                  ras->ras_window_start_idx, ras->ras_window_pages,
885                  ras->ras_stride_offset);
886
887         end <<= PAGE_SHIFT;
888         if (end <= ras->ras_stride_offset)
889                 stride_bytes = 0;
890         else
891                 stride_bytes = end - ras->ras_stride_offset;
892
893         div64_u64_rem(stride_bytes, ras->ras_stride_length, &left_bytes);
894         window_bytes = (ras->ras_window_pages << PAGE_SHIFT);
895         if (left_bytes < ras->ras_stride_bytes) {
896                 if (ras->ras_stride_bytes - left_bytes >= inc_bytes) {
897                         window_bytes += inc_bytes;
898                         goto out;
899                 } else {
900                         window_bytes += (ras->ras_stride_bytes - left_bytes);
901                         inc_bytes -= (ras->ras_stride_bytes - left_bytes);
902                 }
903         } else {
904                 window_bytes += (ras->ras_stride_length - left_bytes);
905         }
906
907         LASSERT(ras->ras_stride_bytes != 0);
908
909         step = div64_u64_rem(inc_bytes, ras->ras_stride_bytes, &left_bytes);
910
911         window_bytes += step * ras->ras_stride_length + left_bytes;
912         LASSERT(window_bytes > 0);
913
914 out:
915         if (stride_page_count(ras, window_bytes) <=
916             ra->ra_max_pages_per_file || ras->ras_window_pages == 0)
917                 ras->ras_window_pages = (window_bytes >> PAGE_SHIFT);
918
919         LASSERT(ras->ras_window_pages > 0);
920
921         RAS_CDEBUG(ras);
922 }
923
924 static void ras_increase_window(struct inode *inode,
925                                 struct ll_readahead_state *ras,
926                                 struct ll_ra_info *ra)
927 {
928         /* The stretch of ra-window should be aligned with max rpc_size
929          * but current clio architecture does not support retrieve such
930          * information from lower layer. FIXME later
931          */
932         if (stride_io_mode(ras)) {
933                 ras_stride_increase_window(ras, ra,
934                                       (loff_t)ras->ras_rpc_pages << PAGE_SHIFT);
935         } else {
936                 pgoff_t window_pages;
937
938                 window_pages = min(ras->ras_window_pages + ras->ras_rpc_pages,
939                                    ra->ra_max_pages_per_file);
940                 if (window_pages < ras->ras_rpc_pages)
941                         ras->ras_window_pages = window_pages;
942                 else
943                         ras->ras_window_pages = ras_align(ras, window_pages);
944         }
945 }
946
947 /**
948  * Seek within 8 pages are considered as sequential read for now.
949  */
950 static inline bool is_loose_seq_read(struct ll_readahead_state *ras, loff_t pos)
951 {
952         return pos_in_window(pos, ras->ras_last_read_end_bytes,
953                              8UL << PAGE_SHIFT, 8UL << PAGE_SHIFT);
954 }
955
956 static void ras_detect_read_pattern(struct ll_readahead_state *ras,
957                                     struct ll_sb_info *sbi,
958                                     loff_t pos, size_t count, bool mmap)
959 {
960         bool stride_detect = false;
961         pgoff_t index = pos >> PAGE_SHIFT;
962
963         /*
964          * Reset the read-ahead window in two cases. First when the app seeks
965          * or reads to some other part of the file. Secondly if we get a
966          * read-ahead miss that we think we've previously issued. This can
967          * be a symptom of there being so many read-ahead pages that the VM
968          * is reclaiming it before we get to it.
969          */
970         if (!is_loose_seq_read(ras, pos)) {
971                 /* Check whether it is in stride I/O mode */
972                 if (!read_in_stride_window(ras, pos, count)) {
973                         if (ras->ras_consecutive_stride_requests == 0)
974                                 ras_init_stride_detector(ras, pos, count);
975                         else
976                                 ras_stride_reset(ras);
977                         ras->ras_consecutive_bytes = 0;
978                         ras_reset(ras, index);
979                 } else {
980                         ras->ras_consecutive_bytes = 0;
981                         ras->ras_consecutive_requests = 0;
982                         if (++ras->ras_consecutive_stride_requests > 1)
983                                 stride_detect = true;
984                         RAS_CDEBUG(ras);
985                 }
986                 ll_ra_stats_inc_sbi(sbi, RA_STAT_DISTANT_READPAGE);
987         } else if (stride_io_mode(ras)) {
988                 /*
989                  * If this is contiguous read but in stride I/O mode
990                  * currently, check whether stride step still is valid,
991                  * if invalid, it will reset the stride ra window to
992                  * be zero.
993                  */
994                 if (!read_in_stride_window(ras, pos, count)) {
995                         ras_stride_reset(ras);
996                         ras->ras_window_pages = 0;
997                         ras->ras_next_readahead_idx = index;
998                 }
999         }
1000
1001         ras->ras_consecutive_bytes += count;
1002         if (mmap) {
1003                 pgoff_t idx = ras->ras_consecutive_bytes >> PAGE_SHIFT;
1004
1005                 if ((idx >= 4 && (idx & 3UL) == 0) || stride_detect)
1006                         ras->ras_need_increase_window = true;
1007         } else if ((ras->ras_consecutive_requests > 1 || stride_detect)) {
1008                 ras->ras_need_increase_window = true;
1009         }
1010
1011         ras->ras_last_read_end_bytes = pos + count - 1;
1012 }
1013
1014 void ll_ras_enter(struct file *f, loff_t pos, size_t count)
1015 {
1016         struct ll_file_data *fd = f->private_data;
1017         struct ll_readahead_state *ras = &fd->fd_ras;
1018         struct inode *inode = file_inode(f);
1019         unsigned long index = pos >> PAGE_SHIFT;
1020         struct ll_sb_info *sbi = ll_i2sbi(inode);
1021
1022         spin_lock(&ras->ras_lock);
1023         ras->ras_requests++;
1024         ras->ras_consecutive_requests++;
1025         ras->ras_need_increase_window = false;
1026         ras->ras_no_miss_check = false;
1027         /*
1028          * On the second access to a file smaller than the tunable
1029          * ra_max_read_ahead_whole_pages trigger RA on all pages in the
1030          * file up to ra_max_pages_per_file.  This is simply a best effort
1031          * and only occurs once per open file. Normal RA behavior is reverted
1032          * to for subsequent IO.
1033          */
1034         if (ras->ras_requests >= 2) {
1035                 __u64 kms_pages;
1036                 struct ll_ra_info *ra = &sbi->ll_ra_info;
1037
1038                 kms_pages = (i_size_read(inode) + PAGE_SIZE - 1) >>
1039                             PAGE_SHIFT;
1040
1041                 CDEBUG(D_READA, "kmsp %llu mwp %lu mp %lu\n", kms_pages,
1042                        ra->ra_max_read_ahead_whole_pages,
1043                        ra->ra_max_pages_per_file);
1044
1045                 if (kms_pages &&
1046                     kms_pages <= ra->ra_max_read_ahead_whole_pages) {
1047                         ras->ras_window_start_idx = 0;
1048                         ras->ras_next_readahead_idx = index + 1;
1049                         ras->ras_window_pages = min(ra->ra_max_pages_per_file,
1050                                             ra->ra_max_read_ahead_whole_pages);
1051                         ras->ras_no_miss_check = true;
1052                         GOTO(out_unlock, 0);
1053                 }
1054         }
1055         ras_detect_read_pattern(ras, sbi, pos, count, false);
1056 out_unlock:
1057         spin_unlock(&ras->ras_lock);
1058 }
1059
1060 static bool index_in_stride_window(struct ll_readahead_state *ras,
1061                                    pgoff_t index)
1062 {
1063         loff_t pos = (loff_t)index << PAGE_SHIFT;
1064
1065         if (ras->ras_stride_length == 0 || ras->ras_stride_bytes == 0 ||
1066             ras->ras_stride_bytes == ras->ras_stride_length)
1067                 return false;
1068
1069         if (pos >= ras->ras_stride_offset) {
1070                 u64 offset;
1071
1072                 div64_u64_rem(pos - ras->ras_stride_offset,
1073                               ras->ras_stride_length, &offset);
1074                 if (offset < ras->ras_stride_bytes ||
1075                     ras->ras_stride_length - offset < PAGE_SIZE)
1076                         return true;
1077         } else if (ras->ras_stride_offset - pos < PAGE_SIZE) {
1078                 return true;
1079         }
1080
1081         return false;
1082 }
1083
1084 /*
1085  * ll_ras_enter() is used to detect read pattern according to pos and count.
1086  *
1087  * ras_update() is used to detect cache miss and
1088  * reset window or increase window accordingly
1089  */
1090 static void ras_update(struct ll_sb_info *sbi, struct inode *inode,
1091                        struct ll_readahead_state *ras, pgoff_t index,
1092                        enum ras_update_flags flags)
1093 {
1094         struct ll_ra_info *ra = &sbi->ll_ra_info;
1095         bool hit = flags & LL_RAS_HIT;
1096
1097         ENTRY;
1098         spin_lock(&ras->ras_lock);
1099
1100         if (!hit)
1101                 CDEBUG(D_READA, DFID " pages at %lu miss.\n",
1102                        PFID(ll_inode2fid(inode)), index);
1103         ll_ra_stats_inc_sbi(sbi, hit ? RA_STAT_HIT : RA_STAT_MISS);
1104
1105         /*
1106          * The readahead window has been expanded to cover whole
1107          * file size, we don't care whether ra miss happen or not.
1108          * Because we will read whole file to page cache even if
1109          * some pages missed.
1110          */
1111         if (ras->ras_no_miss_check)
1112                 GOTO(out_unlock, 0);
1113
1114         if (flags & LL_RAS_MMAP)
1115                 ras_detect_read_pattern(ras, sbi, (loff_t)index << PAGE_SHIFT,
1116                                         PAGE_SIZE, true);
1117
1118         if (!hit && ras->ras_window_pages &&
1119             index < ras->ras_next_readahead_idx &&
1120             pos_in_window(index, ras->ras_window_start_idx, 0,
1121                           ras->ras_window_pages)) {
1122                 ll_ra_stats_inc_sbi(sbi, RA_STAT_MISS_IN_WINDOW);
1123                 ras->ras_need_increase_window = false;
1124
1125                 if (index_in_stride_window(ras, index) &&
1126                     stride_io_mode(ras)) {
1127                         /*
1128                          * if (index != ras->ras_last_readpage + 1)
1129                          *      ras->ras_consecutive_pages = 0;
1130                          */
1131                         ras_reset(ras, index);
1132
1133                         /*
1134                          * If stride-RA hit cache miss, the stride
1135                          * detector will not be reset to avoid the
1136                          * overhead of redetecting read-ahead mode,
1137                          * but on the condition that the stride window
1138                          * is still intersect with normal sequential
1139                          * read-ahead window.
1140                          */
1141                         if (ras->ras_window_start_idx < ras->ras_stride_offset)
1142                                 ras_stride_reset(ras);
1143                         RAS_CDEBUG(ras);
1144                 } else {
1145                         /*
1146                          * Reset both stride window and normal RA
1147                          * window.
1148                          */
1149                         ras_reset(ras, index);
1150                         /* ras->ras_consecutive_pages++; */
1151                         ras->ras_consecutive_bytes = 0;
1152                         ras_stride_reset(ras);
1153                         GOTO(out_unlock, 0);
1154                 }
1155         }
1156         ras_set_start(ras, index);
1157
1158         if (stride_io_mode(ras)) {
1159                 /* Since stride readahead is sentivite to the offset
1160                  * of read-ahead, so we use original offset here,
1161                  * instead of ras_window_start_idx, which is RPC aligned.
1162                  */
1163                 ras->ras_next_readahead_idx = max(index + 1,
1164                                                   ras->ras_next_readahead_idx);
1165                 ras->ras_window_start_idx =
1166                                 max_t(pgoff_t, ras->ras_window_start_idx,
1167                                       ras->ras_stride_offset >> PAGE_SHIFT);
1168         } else {
1169                 if (ras->ras_next_readahead_idx < ras->ras_window_start_idx)
1170                         ras->ras_next_readahead_idx = ras->ras_window_start_idx;
1171                 if (!hit)
1172                         ras->ras_next_readahead_idx = index + 1;
1173         }
1174
1175         if (ras->ras_need_increase_window) {
1176                 ras_increase_window(inode, ras, ra);
1177                 ras->ras_need_increase_window = false;
1178         }
1179
1180         EXIT;
1181 out_unlock:
1182         spin_unlock(&ras->ras_lock);
1183 }
1184
1185 int ll_writepage(struct page *vmpage, struct writeback_control *wbc)
1186 {
1187         struct inode           *inode = vmpage->mapping->host;
1188         struct ll_inode_info   *lli   = ll_i2info(inode);
1189         struct lu_env          *env;
1190         struct cl_io           *io;
1191         struct cl_page         *page;
1192         struct cl_object       *clob;
1193         bool redirtied = false;
1194         bool unlocked = false;
1195         int result;
1196         __u16 refcheck;
1197         ENTRY;
1198
1199         LASSERT(PageLocked(vmpage));
1200         LASSERT(!PageWriteback(vmpage));
1201
1202         LASSERT(ll_i2dtexp(inode) != NULL);
1203
1204         env = cl_env_get(&refcheck);
1205         if (IS_ERR(env))
1206                 GOTO(out, result = PTR_ERR(env));
1207
1208         clob  = ll_i2info(inode)->lli_clob;
1209         LASSERT(clob != NULL);
1210
1211         io = vvp_env_thread_io(env);
1212         io->ci_obj = clob;
1213         io->ci_ignore_layout = 1;
1214         result = cl_io_init(env, io, CIT_MISC, clob);
1215         if (result == 0) {
1216                 page = cl_page_find(env, clob, vmpage->index,
1217                                     vmpage, CPT_CACHEABLE);
1218                 if (!IS_ERR(page)) {
1219                         lu_ref_add(&page->cp_reference, "writepage",
1220                                    current);
1221                         cl_page_assume(env, io, page);
1222                         result = cl_page_flush(env, io, page);
1223                         if (result != 0) {
1224                                 /*
1225                                  * Re-dirty page on error so it retries write,
1226                                  * but not in case when IO has actually
1227                                  * occurred and completed with an error.
1228                                  */
1229                                 if (!PageError(vmpage)) {
1230                                         redirty_page_for_writepage(wbc, vmpage);
1231                                         result = 0;
1232                                         redirtied = true;
1233                                 }
1234                         }
1235                         cl_page_disown(env, io, page);
1236                         unlocked = true;
1237                         lu_ref_del(&page->cp_reference,
1238                                    "writepage", current);
1239                         cl_page_put(env, page);
1240                 } else {
1241                         result = PTR_ERR(page);
1242                 }
1243         }
1244         cl_io_fini(env, io);
1245
1246         if (redirtied && wbc->sync_mode == WB_SYNC_ALL) {
1247                 loff_t offset = cl_offset(clob, vmpage->index);
1248
1249                 /* Flush page failed because the extent is being written out.
1250                  * Wait for the write of extent to be finished to avoid
1251                  * breaking kernel which assumes ->writepage should mark
1252                  * PageWriteback or clean the page. */
1253                 result = cl_sync_file_range(inode, offset,
1254                                             offset + PAGE_SIZE - 1,
1255                                             CL_FSYNC_LOCAL, 1);
1256                 if (result > 0) {
1257                         /* actually we may have written more than one page.
1258                          * decreasing this page because the caller will count
1259                          * it. */
1260                         wbc->nr_to_write -= result - 1;
1261                         result = 0;
1262                 }
1263         }
1264
1265         cl_env_put(env, &refcheck);
1266         GOTO(out, result);
1267
1268 out:
1269         if (result < 0) {
1270                 if (!lli->lli_async_rc)
1271                         lli->lli_async_rc = result;
1272                 SetPageError(vmpage);
1273                 if (!unlocked)
1274                         unlock_page(vmpage);
1275         }
1276         return result;
1277 }
1278
1279 int ll_writepages(struct address_space *mapping, struct writeback_control *wbc)
1280 {
1281         struct inode *inode = mapping->host;
1282         loff_t start;
1283         loff_t end;
1284         enum cl_fsync_mode mode;
1285         int range_whole = 0;
1286         int result;
1287         ENTRY;
1288
1289         if (wbc->range_cyclic) {
1290                 start = (loff_t)mapping->writeback_index << PAGE_SHIFT;
1291                 end = OBD_OBJECT_EOF;
1292         } else {
1293                 start = wbc->range_start;
1294                 end = wbc->range_end;
1295                 if (end == LLONG_MAX) {
1296                         end = OBD_OBJECT_EOF;
1297                         range_whole = start == 0;
1298                 }
1299         }
1300
1301         mode = CL_FSYNC_NONE;
1302         if (wbc->sync_mode == WB_SYNC_ALL)
1303                 mode = CL_FSYNC_LOCAL;
1304
1305         if (ll_i2info(inode)->lli_clob == NULL)
1306                 RETURN(0);
1307
1308         /* for directio, it would call writepages() to evict cached pages
1309          * inside the IO context of write, which will cause deadlock at
1310          * layout_conf since it waits for active IOs to complete. */
1311         result = cl_sync_file_range(inode, start, end, mode, 1);
1312         if (result > 0) {
1313                 wbc->nr_to_write -= result;
1314                 result = 0;
1315          }
1316
1317         if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) {
1318                 if (end == OBD_OBJECT_EOF)
1319                         mapping->writeback_index = 0;
1320                 else
1321                         mapping->writeback_index = (end >> PAGE_SHIFT) + 1;
1322         }
1323         RETURN(result);
1324 }
1325
1326 struct ll_cl_context *ll_cl_find(struct file *file)
1327 {
1328         struct ll_file_data *fd = file->private_data;
1329         struct ll_cl_context *lcc;
1330         struct ll_cl_context *found = NULL;
1331
1332         read_lock(&fd->fd_lock);
1333         list_for_each_entry(lcc, &fd->fd_lccs, lcc_list) {
1334                 if (lcc->lcc_cookie == current) {
1335                         found = lcc;
1336                         break;
1337                 }
1338         }
1339         read_unlock(&fd->fd_lock);
1340
1341         return found;
1342 }
1343
1344 void ll_cl_add(struct file *file, const struct lu_env *env, struct cl_io *io,
1345                enum lcc_type type)
1346 {
1347         struct ll_file_data *fd = file->private_data;
1348         struct ll_cl_context *lcc = &ll_env_info(env)->lti_io_ctx;
1349
1350         memset(lcc, 0, sizeof(*lcc));
1351         INIT_LIST_HEAD(&lcc->lcc_list);
1352         lcc->lcc_cookie = current;
1353         lcc->lcc_env = env;
1354         lcc->lcc_io = io;
1355         lcc->lcc_type = type;
1356
1357         write_lock(&fd->fd_lock);
1358         list_add(&lcc->lcc_list, &fd->fd_lccs);
1359         write_unlock(&fd->fd_lock);
1360 }
1361
1362 void ll_cl_remove(struct file *file, const struct lu_env *env)
1363 {
1364         struct ll_file_data *fd = file->private_data;
1365         struct ll_cl_context *lcc = &ll_env_info(env)->lti_io_ctx;
1366
1367         write_lock(&fd->fd_lock);
1368         list_del_init(&lcc->lcc_list);
1369         write_unlock(&fd->fd_lock);
1370 }
1371
1372 int ll_io_read_page(const struct lu_env *env, struct cl_io *io,
1373                            struct cl_page *page, struct file *file)
1374 {
1375         struct inode              *inode  = vvp_object_inode(page->cp_obj);
1376         struct ll_sb_info         *sbi    = ll_i2sbi(inode);
1377         struct ll_file_data       *fd     = file->private_data;
1378         struct ll_readahead_state *ras    = &fd->fd_ras;
1379         struct cl_2queue          *queue  = &io->ci_queue;
1380         struct cl_sync_io         *anchor = NULL;
1381         struct vvp_page           *vpg;
1382         int                        rc = 0;
1383         bool                       uptodate;
1384         ENTRY;
1385
1386         vpg = cl2vvp_page(cl_object_page_slice(page->cp_obj, page));
1387         uptodate = vpg->vpg_defer_uptodate;
1388
1389         if (sbi->ll_ra_info.ra_max_pages_per_file > 0 &&
1390             sbi->ll_ra_info.ra_max_pages > 0 &&
1391             !vpg->vpg_ra_updated) {
1392                 struct vvp_io *vio = vvp_env_io(env);
1393                 enum ras_update_flags flags = 0;
1394
1395                 if (uptodate)
1396                         flags |= LL_RAS_HIT;
1397                 if (!vio->vui_ra_valid)
1398                         flags |= LL_RAS_MMAP;
1399                 ras_update(sbi, inode, ras, vvp_index(vpg), flags);
1400         }
1401
1402         cl_2queue_init(queue);
1403         if (uptodate) {
1404                 vpg->vpg_ra_used = 1;
1405                 cl_page_export(env, page, 1);
1406                 cl_page_disown(env, io, page);
1407         } else {
1408                 anchor = &vvp_env_info(env)->vti_anchor;
1409                 cl_sync_io_init(anchor, 1);
1410                 page->cp_sync_io = anchor;
1411
1412                 cl_2queue_add(queue, page);
1413         }
1414
1415         if (sbi->ll_ra_info.ra_max_pages_per_file > 0 &&
1416             sbi->ll_ra_info.ra_max_pages > 0) {
1417                 int rc2;
1418
1419                 rc2 = ll_readahead(env, io, &queue->c2_qin, ras,
1420                                    uptodate, file);
1421                 CDEBUG(D_READA, DFID "%d pages read ahead at %lu\n",
1422                        PFID(ll_inode2fid(inode)), rc2, vvp_index(vpg));
1423         }
1424
1425         if (queue->c2_qin.pl_nr > 0) {
1426                 int count = queue->c2_qin.pl_nr;
1427                 rc = cl_io_submit_rw(env, io, CRT_READ, queue);
1428                 if (rc == 0)
1429                         task_io_account_read(PAGE_SIZE * count);
1430         }
1431
1432
1433         if (anchor != NULL && !cl_page_is_owned(page, io)) { /* have sent */
1434                 rc = cl_sync_io_wait(env, anchor, 0);
1435
1436                 cl_page_assume(env, io, page);
1437                 cl_page_list_del(env, &queue->c2_qout, page);
1438
1439                 if (!PageUptodate(cl_page_vmpage(page))) {
1440                         /* Failed to read a mirror, discard this page so that
1441                          * new page can be created with new mirror.
1442                          *
1443                          * TODO: this is not needed after page reinit
1444                          * route is implemented */
1445                         cl_page_discard(env, io, page);
1446                 }
1447                 cl_page_disown(env, io, page);
1448         }
1449
1450         /* TODO: discard all pages until page reinit route is implemented */
1451         cl_page_list_discard(env, io, &queue->c2_qin);
1452
1453         /* Unlock unsent read pages in case of error. */
1454         cl_page_list_disown(env, io, &queue->c2_qin);
1455
1456         cl_2queue_fini(env, queue);
1457
1458         RETURN(rc);
1459 }
1460
1461 /*
1462  * Possible return value:
1463  * 0 no async readahead triggered and fast read could not be used.
1464  * 1 no async readahead, but fast read could be used.
1465  * 2 async readahead triggered and fast read could be used too.
1466  * < 0 on error.
1467  */
1468 static int kickoff_async_readahead(struct file *file, unsigned long pages)
1469 {
1470         struct ll_readahead_work *lrw;
1471         struct inode *inode = file_inode(file);
1472         struct ll_sb_info *sbi = ll_i2sbi(inode);
1473         struct ll_file_data *fd = file->private_data;
1474         struct ll_readahead_state *ras = &fd->fd_ras;
1475         struct ll_ra_info *ra = &sbi->ll_ra_info;
1476         unsigned long throttle;
1477         pgoff_t start_idx = ras_align(ras, ras->ras_next_readahead_idx);
1478         pgoff_t end_idx = start_idx + pages - 1;
1479
1480         throttle = min(ra->ra_async_pages_per_file_threshold,
1481                        ra->ra_max_pages_per_file);
1482         /*
1483          * If this is strided i/o or the window is smaller than the
1484          * throttle limit, we do not do async readahead. Otherwise,
1485          * we do async readahead, allowing the user thread to do fast i/o.
1486          */
1487         if (stride_io_mode(ras) || !throttle ||
1488             ras->ras_window_pages < throttle ||
1489             atomic_read(&ra->ra_async_inflight) > ra->ra_async_max_active)
1490                 return 0;
1491
1492         if ((atomic_read(&ra->ra_cur_pages) + pages) > ra->ra_max_pages)
1493                 return 0;
1494
1495         if (ras->ras_async_last_readpage_idx == start_idx)
1496                 return 1;
1497
1498         /* ll_readahead_work_free() free it */
1499         OBD_ALLOC_PTR(lrw);
1500         if (lrw) {
1501                 atomic_inc(&sbi->ll_ra_info.ra_async_inflight);
1502                 lrw->lrw_file = get_file(file);
1503                 lrw->lrw_start_idx = start_idx;
1504                 lrw->lrw_end_idx = end_idx;
1505                 spin_lock(&ras->ras_lock);
1506                 ras->ras_next_readahead_idx = end_idx + 1;
1507                 ras->ras_async_last_readpage_idx = start_idx;
1508                 spin_unlock(&ras->ras_lock);
1509                 ll_readahead_work_add(inode, lrw);
1510         } else {
1511                 return -ENOMEM;
1512         }
1513
1514         return 2;
1515 }
1516
1517 /*
1518  * Check if we can issue a readahead RPC, if that is
1519  * the case, we can't do fast IO because we will need
1520  * a cl_io to issue the RPC.
1521  */
1522 static bool ll_use_fast_io(struct file *file,
1523                            struct ll_readahead_state *ras, pgoff_t index)
1524 {
1525         unsigned long fast_read_pages =
1526                 max(RA_REMAIN_WINDOW_MIN, ras->ras_rpc_pages);
1527         loff_t skip_pages;
1528
1529         if (stride_io_mode(ras)) {
1530                 skip_pages = (ras->ras_stride_length +
1531                         ras->ras_stride_bytes - 1) / ras->ras_stride_bytes;
1532                 skip_pages *= fast_read_pages;
1533         } else {
1534                 skip_pages = fast_read_pages;
1535         }
1536
1537         if (ras->ras_window_start_idx + ras->ras_window_pages <
1538             ras->ras_next_readahead_idx + skip_pages ||
1539             kickoff_async_readahead(file, fast_read_pages) > 0)
1540                 return true;
1541
1542         return false;
1543 }
1544
1545 int ll_readpage(struct file *file, struct page *vmpage)
1546 {
1547         struct inode *inode = file_inode(file);
1548         struct cl_object *clob = ll_i2info(inode)->lli_clob;
1549         struct ll_cl_context *lcc;
1550         const struct lu_env  *env = NULL;
1551         struct cl_io   *io = NULL;
1552         struct cl_page *page;
1553         struct ll_sb_info *sbi = ll_i2sbi(inode);
1554         int result;
1555         ENTRY;
1556
1557         lcc = ll_cl_find(file);
1558         if (lcc != NULL) {
1559                 env = lcc->lcc_env;
1560                 io  = lcc->lcc_io;
1561         }
1562
1563         if (io == NULL) { /* fast read */
1564                 struct inode *inode = file_inode(file);
1565                 struct ll_file_data *fd = file->private_data;
1566                 struct ll_readahead_state *ras = &fd->fd_ras;
1567                 struct lu_env  *local_env = NULL;
1568                 struct vvp_page *vpg;
1569
1570                 result = -ENODATA;
1571
1572                 /* TODO: need to verify the layout version to make sure
1573                  * the page is not invalid due to layout change. */
1574                 page = cl_vmpage_page(vmpage, clob);
1575                 if (page == NULL) {
1576                         unlock_page(vmpage);
1577                         ll_ra_stats_inc_sbi(sbi, RA_STAT_FAILED_FAST_READ);
1578                         RETURN(result);
1579                 }
1580
1581                 vpg = cl2vvp_page(cl_object_page_slice(page->cp_obj, page));
1582                 if (vpg->vpg_defer_uptodate) {
1583                         enum ras_update_flags flags = LL_RAS_HIT;
1584
1585                         if (lcc && lcc->lcc_type == LCC_MMAP)
1586                                 flags |= LL_RAS_MMAP;
1587
1588                         /* For fast read, it updates read ahead state only
1589                          * if the page is hit in cache because non cache page
1590                          * case will be handled by slow read later. */
1591                         ras_update(sbi, inode, ras, vvp_index(vpg), flags);
1592                         /* avoid duplicate ras_update() call */
1593                         vpg->vpg_ra_updated = 1;
1594
1595                         if (ll_use_fast_io(file, ras, vvp_index(vpg)))
1596                                 result = 0;
1597                 }
1598
1599                 if (!env) {
1600                         local_env = cl_env_percpu_get();
1601                         env = local_env;
1602                 }
1603
1604                 /* export the page and skip io stack */
1605                 if (result == 0) {
1606                         vpg->vpg_ra_used = 1;
1607                         cl_page_export(env, page, 1);
1608                 } else {
1609                         ll_ra_stats_inc_sbi(sbi, RA_STAT_FAILED_FAST_READ);
1610                 }
1611                 /* release page refcount before unlocking the page to ensure
1612                  * the object won't be destroyed in the calling path of
1613                  * cl_page_put(). Please see comment in ll_releasepage(). */
1614                 cl_page_put(env, page);
1615                 unlock_page(vmpage);
1616                 if (local_env)
1617                         cl_env_percpu_put(local_env);
1618
1619                 RETURN(result);
1620         }
1621
1622         /**
1623          * Direct read can fall back to buffered read, but DIO is done
1624          * with lockless i/o, and buffered requires LDLM locking, so in
1625          * this case we must restart without lockless.
1626          */
1627         if (file->f_flags & O_DIRECT &&
1628             lcc && lcc->lcc_type == LCC_RW &&
1629             !io->ci_ignore_lockless) {
1630                 unlock_page(vmpage);
1631                 io->ci_ignore_lockless = 1;
1632                 io->ci_need_restart = 1;
1633                 RETURN(-ENOLCK);
1634         }
1635
1636         LASSERT(io->ci_state == CIS_IO_GOING);
1637         page = cl_page_find(env, clob, vmpage->index, vmpage, CPT_CACHEABLE);
1638         if (!IS_ERR(page)) {
1639                 LASSERT(page->cp_type == CPT_CACHEABLE);
1640                 if (likely(!PageUptodate(vmpage))) {
1641                         cl_page_assume(env, io, page);
1642
1643                         result = ll_io_read_page(env, io, page, file);
1644                 } else {
1645                         /* Page from a non-object file. */
1646                         unlock_page(vmpage);
1647                         result = 0;
1648                 }
1649                 cl_page_put(env, page);
1650         } else {
1651                 unlock_page(vmpage);
1652                 result = PTR_ERR(page);
1653         }
1654         RETURN(result);
1655 }