Whamcloud - gitweb
LU-4198 clio: AIO support for direct IO
[fs/lustre-release.git] / lustre / osc / osc_cache.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  *
28  */
29 /*
30  * This file is part of Lustre, http://www.lustre.org/
31  * Lustre is a trademark of Sun Microsystems, Inc.
32  *
33  * osc cache management.
34  *
35  * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
36  */
37
38 #define DEBUG_SUBSYSTEM S_OSC
39
40 #include <lustre_osc.h>
41
42 #include "osc_internal.h"
43
44 static int extent_debug; /* set it to be true for more debug */
45
46 static void osc_update_pending(struct osc_object *obj, int cmd, int delta);
47 static int osc_extent_wait(const struct lu_env *env, struct osc_extent *ext,
48                            enum osc_extent_state state);
49 static void osc_ap_completion(const struct lu_env *env, struct client_obd *cli,
50                               struct osc_async_page *oap, int sent, int rc);
51 static int osc_make_ready(const struct lu_env *env, struct osc_async_page *oap,
52                           int cmd);
53 static int osc_refresh_count(const struct lu_env *env,
54                              struct osc_async_page *oap, int cmd);
55 static int osc_io_unplug_async(const struct lu_env *env,
56                                struct client_obd *cli, struct osc_object *osc);
57 static void osc_free_grant(struct client_obd *cli, unsigned int nr_pages,
58                            unsigned int lost_grant, unsigned int dirty_grant);
59
60 static void osc_extent_tree_dump0(int mask, struct osc_object *obj,
61                                   const char *func, int line);
62 #define osc_extent_tree_dump(mask, obj) \
63         osc_extent_tree_dump0(mask, obj, __func__, __LINE__)
64
65 static void osc_unreserve_grant(struct client_obd *cli, unsigned int reserved,
66                                 unsigned int unused);
67
68 /** \addtogroup osc
69  *  @{
70  */
71
72 /* ------------------ osc extent ------------------ */
73 static inline char *ext_flags(struct osc_extent *ext, char *flags)
74 {
75         char *buf = flags;
76         *buf++ = ext->oe_rw ? 'r' : 'w';
77         if (ext->oe_intree)
78                 *buf++ = 'i';
79         if (ext->oe_sync)
80                 *buf++ = 'S';
81         if (ext->oe_srvlock)
82                 *buf++ = 's';
83         if (ext->oe_hp)
84                 *buf++ = 'h';
85         if (ext->oe_urgent)
86                 *buf++ = 'u';
87         if (ext->oe_memalloc)
88                 *buf++ = 'm';
89         if (ext->oe_trunc_pending)
90                 *buf++ = 't';
91         if (ext->oe_fsync_wait)
92                 *buf++ = 'Y';
93         *buf = 0;
94         return flags;
95 }
96
97 static inline char list_empty_marker(struct list_head *list)
98 {
99         return list_empty(list) ? '-' : '+';
100 }
101
102 #define EXTSTR       "[%lu -> %lu/%lu]"
103 #define EXTPARA(ext) (ext)->oe_start, (ext)->oe_end, (ext)->oe_max_end
104 static const char *oes_strings[] = {
105         "inv", "active", "cache", "locking", "lockdone", "rpc", "trunc", NULL };
106
107 #define OSC_EXTENT_DUMP_WITH_LOC(file, func, line, mask, extent, fmt, ...) do {\
108         static struct cfs_debug_limit_state cdls;                             \
109         struct osc_extent *__ext = (extent);                                  \
110         char __buf[16];                                                       \
111                                                                               \
112         __CDEBUG_WITH_LOC(file, func, line, mask, &cdls,                      \
113                 "extent %p@{" EXTSTR ", "                                     \
114                 "[%d|%d|%c|%s|%s|%p], [%d|%d|%c|%c|%p|%u|%p]} " fmt,          \
115                 /* ----- extent part 0 ----- */                               \
116                 __ext, EXTPARA(__ext),                                        \
117                 /* ----- part 1 ----- */                                      \
118                 atomic_read(&__ext->oe_refc),                                 \
119                 atomic_read(&__ext->oe_users),                                \
120                 list_empty_marker(&__ext->oe_link),                           \
121                 oes_strings[__ext->oe_state], ext_flags(__ext, __buf),        \
122                 __ext->oe_obj,                                                \
123                 /* ----- part 2 ----- */                                      \
124                 __ext->oe_grants, __ext->oe_nr_pages,                         \
125                 list_empty_marker(&__ext->oe_pages),                          \
126                 waitqueue_active(&__ext->oe_waitq) ? '+' : '-',               \
127                 __ext->oe_dlmlock, __ext->oe_mppr, __ext->oe_owner,           \
128                 /* ----- part 4 ----- */                                      \
129                 ## __VA_ARGS__);                                              \
130         if (mask == D_ERROR && __ext->oe_dlmlock != NULL)                     \
131                 LDLM_ERROR(__ext->oe_dlmlock, "extent: %p", __ext);           \
132         else                                                                  \
133                 LDLM_DEBUG(__ext->oe_dlmlock, "extent: %p", __ext);           \
134 } while (0)
135
136 #define OSC_EXTENT_DUMP(mask, ext, fmt, ...)                            \
137         OSC_EXTENT_DUMP_WITH_LOC(__FILE__, __func__, __LINE__,          \
138                                  mask, ext, fmt, ## __VA_ARGS__)
139
140 #undef EASSERTF
141 #define EASSERTF(expr, ext, fmt, args...) do {                          \
142         if (!(expr)) {                                                  \
143                 OSC_EXTENT_DUMP(D_ERROR, (ext), fmt, ##args);           \
144                 osc_extent_tree_dump(D_ERROR, (ext)->oe_obj);           \
145                 LASSERT(expr);                                          \
146         }                                                               \
147 } while (0)
148
149 #undef EASSERT
150 #define EASSERT(expr, ext) EASSERTF(expr, ext, "\n")
151
152 static inline struct osc_extent *rb_extent(struct rb_node *n)
153 {
154         if (n == NULL)
155                 return NULL;
156
157         return container_of(n, struct osc_extent, oe_node);
158 }
159
160 static inline struct osc_extent *next_extent(struct osc_extent *ext)
161 {
162         if (ext == NULL)
163                 return NULL;
164
165         LASSERT(ext->oe_intree);
166         return rb_extent(rb_next(&ext->oe_node));
167 }
168
169 static inline struct osc_extent *prev_extent(struct osc_extent *ext)
170 {
171         if (ext == NULL)
172                 return NULL;
173
174         LASSERT(ext->oe_intree);
175         return rb_extent(rb_prev(&ext->oe_node));
176 }
177
178 static inline struct osc_extent *first_extent(struct osc_object *obj)
179 {
180         return rb_extent(rb_first(&obj->oo_root));
181 }
182
183 /* object must be locked by caller. */
184 static int osc_extent_sanity_check0(struct osc_extent *ext,
185                                     const char *func, const int line)
186 {
187         struct osc_object *obj = ext->oe_obj;
188         struct osc_async_page *oap;
189         size_t page_count;
190         int rc = 0;
191
192         if (!osc_object_is_locked(obj))
193                 GOTO(out, rc = 9);
194
195         if (ext->oe_state >= OES_STATE_MAX)
196                 GOTO(out, rc = 10);
197
198         if (atomic_read(&ext->oe_refc) <= 0)
199                 GOTO(out, rc = 20);
200
201         if (atomic_read(&ext->oe_refc) < atomic_read(&ext->oe_users))
202                 GOTO(out, rc = 30);
203
204         switch (ext->oe_state) {
205         case OES_INV:
206                 if (ext->oe_nr_pages > 0 || !list_empty(&ext->oe_pages))
207                         GOTO(out, rc = 35);
208                 GOTO(out, rc = 0);
209                 break;
210         case OES_ACTIVE:
211                 if (atomic_read(&ext->oe_users) == 0)
212                         GOTO(out, rc = 40);
213                 if (ext->oe_hp)
214                         GOTO(out, rc = 50);
215                 if (ext->oe_fsync_wait && !ext->oe_urgent)
216                         GOTO(out, rc = 55);
217                 break;
218         case OES_CACHE:
219                 if (ext->oe_grants == 0)
220                         GOTO(out, rc = 60);
221                 if (ext->oe_fsync_wait && !ext->oe_urgent && !ext->oe_hp)
222                         GOTO(out, rc = 65);
223                 /* fallthrough */
224         default:
225                 if (atomic_read(&ext->oe_users) > 0)
226                         GOTO(out, rc = 70);
227         }
228
229         if (ext->oe_max_end < ext->oe_end || ext->oe_end < ext->oe_start)
230                 GOTO(out, rc = 80);
231
232         if (ext->oe_sync && ext->oe_grants > 0)
233                 GOTO(out, rc = 90);
234
235         if (ext->oe_dlmlock != NULL &&
236             ext->oe_dlmlock->l_resource->lr_type == LDLM_EXTENT &&
237             !ldlm_is_failed(ext->oe_dlmlock)) {
238                 struct ldlm_extent *extent;
239
240                 extent = &ext->oe_dlmlock->l_policy_data.l_extent;
241                 if (!(extent->start <= cl_offset(osc2cl(obj), ext->oe_start) &&
242                       extent->end   >= cl_offset(osc2cl(obj), ext->oe_max_end)))
243                         GOTO(out, rc = 100);
244
245                 if (!(ext->oe_dlmlock->l_granted_mode & (LCK_PW | LCK_GROUP)))
246                         GOTO(out, rc = 102);
247         }
248
249         if (ext->oe_nr_pages > ext->oe_mppr)
250                 GOTO(out, rc = 105);
251
252         /* Do not verify page list if extent is in RPC. This is because an
253          * in-RPC extent is supposed to be exclusively accessible w/o lock. */
254         if (ext->oe_state > OES_CACHE)
255                 GOTO(out, rc = 0);
256
257         if (!extent_debug)
258                 GOTO(out, rc = 0);
259
260         page_count = 0;
261         list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
262                 pgoff_t index = osc_index(oap2osc(oap));
263                 ++page_count;
264                 if (index > ext->oe_end || index < ext->oe_start)
265                         GOTO(out, rc = 110);
266         }
267         if (page_count != ext->oe_nr_pages)
268                 GOTO(out, rc = 120);
269
270 out:
271         if (rc != 0)
272                 OSC_EXTENT_DUMP_WITH_LOC(__FILE__, func, line, D_ERROR, ext,
273                                          "sanity check %p failed: rc = %d\n",
274                                          ext, rc);
275         return rc;
276 }
277
278 #define sanity_check_nolock(ext) \
279         osc_extent_sanity_check0(ext, __func__, __LINE__)
280
281 #define sanity_check(ext) ({                                                   \
282         int __res;                                                             \
283         osc_object_lock((ext)->oe_obj);                                        \
284         __res = sanity_check_nolock(ext);                                      \
285         osc_object_unlock((ext)->oe_obj);                                      \
286         __res;                                                                 \
287 })
288
289
290 /**
291  * sanity check - to make sure there is no overlapped extent in the tree.
292  */
293 static int osc_extent_is_overlapped(struct osc_object *obj,
294                                     struct osc_extent *ext)
295 {
296         struct osc_extent *tmp;
297
298         LASSERT(osc_object_is_locked(obj));
299
300         if (!extent_debug)
301                 return 0;
302
303         for (tmp = first_extent(obj); tmp != NULL; tmp = next_extent(tmp)) {
304                 if (tmp == ext)
305                         continue;
306                 if (tmp->oe_end >= ext->oe_start &&
307                     tmp->oe_start <= ext->oe_end)
308                         return 1;
309         }
310         return 0;
311 }
312
313 static void osc_extent_state_set(struct osc_extent *ext, int state)
314 {
315         LASSERT(osc_object_is_locked(ext->oe_obj));
316         LASSERT(state >= OES_INV && state < OES_STATE_MAX);
317
318         /* Never try to sanity check a state changing extent :-) */
319         /* LASSERT(sanity_check_nolock(ext) == 0); */
320
321         /* TODO: validate the state machine */
322         ext->oe_state = state;
323         wake_up_all(&ext->oe_waitq);
324 }
325
326 static struct osc_extent *osc_extent_alloc(struct osc_object *obj)
327 {
328         struct osc_extent *ext;
329
330         OBD_SLAB_ALLOC_PTR_GFP(ext, osc_extent_kmem, GFP_NOFS);
331         if (ext == NULL)
332                 return NULL;
333
334         RB_CLEAR_NODE(&ext->oe_node);
335         ext->oe_obj = obj;
336         cl_object_get(osc2cl(obj));
337         atomic_set(&ext->oe_refc, 1);
338         atomic_set(&ext->oe_users, 0);
339         INIT_LIST_HEAD(&ext->oe_link);
340         ext->oe_state = OES_INV;
341         INIT_LIST_HEAD(&ext->oe_pages);
342         init_waitqueue_head(&ext->oe_waitq);
343         ext->oe_dlmlock = NULL;
344
345         return ext;
346 }
347
348 static void osc_extent_free(struct osc_extent *ext)
349 {
350         OBD_SLAB_FREE_PTR(ext, osc_extent_kmem);
351 }
352
353 static struct osc_extent *osc_extent_get(struct osc_extent *ext)
354 {
355         LASSERT(atomic_read(&ext->oe_refc) >= 0);
356         atomic_inc(&ext->oe_refc);
357         return ext;
358 }
359
360 static void osc_extent_put(const struct lu_env *env, struct osc_extent *ext)
361 {
362         LASSERT(atomic_read(&ext->oe_refc) > 0);
363         if (atomic_dec_and_test(&ext->oe_refc)) {
364                 LASSERT(list_empty(&ext->oe_link));
365                 LASSERT(atomic_read(&ext->oe_users) == 0);
366                 LASSERT(ext->oe_state == OES_INV);
367                 LASSERT(!ext->oe_intree);
368
369                 if (ext->oe_dlmlock != NULL) {
370                         lu_ref_add(&ext->oe_dlmlock->l_reference,
371                                    "osc_extent", ext);
372                         LDLM_LOCK_PUT(ext->oe_dlmlock);
373                         ext->oe_dlmlock = NULL;
374                 }
375                 cl_object_put(env, osc2cl(ext->oe_obj));
376                 osc_extent_free(ext);
377         }
378 }
379
380 /**
381  * osc_extent_put_trust() is a special version of osc_extent_put() when
382  * it's known that the caller is not the last user. This is to address the
383  * problem of lacking of lu_env ;-).
384  */
385 static void osc_extent_put_trust(struct osc_extent *ext)
386 {
387         LASSERT(atomic_read(&ext->oe_refc) > 1);
388         LASSERT(osc_object_is_locked(ext->oe_obj));
389         atomic_dec(&ext->oe_refc);
390 }
391
392 /**
393  * Return the extent which includes pgoff @index, or return the greatest
394  * previous extent in the tree.
395  */
396 static struct osc_extent *osc_extent_search(struct osc_object *obj,
397                                             pgoff_t index)
398 {
399         struct rb_node    *n = obj->oo_root.rb_node;
400         struct osc_extent *tmp, *p = NULL;
401
402         LASSERT(osc_object_is_locked(obj));
403         while (n != NULL) {
404                 tmp = rb_extent(n);
405                 if (index < tmp->oe_start) {
406                         n = n->rb_left;
407                 } else if (index > tmp->oe_end) {
408                         p = rb_extent(n);
409                         n = n->rb_right;
410                 } else {
411                         return tmp;
412                 }
413         }
414         return p;
415 }
416
417 /*
418  * Return the extent covering @index, otherwise return NULL.
419  * caller must have held object lock.
420  */
421 static struct osc_extent *osc_extent_lookup(struct osc_object *obj,
422                                             pgoff_t index)
423 {
424         struct osc_extent *ext;
425
426         ext = osc_extent_search(obj, index);
427         if (ext != NULL && ext->oe_start <= index && index <= ext->oe_end)
428                 return osc_extent_get(ext);
429         return NULL;
430 }
431
432 /* caller must have held object lock. */
433 static void osc_extent_insert(struct osc_object *obj, struct osc_extent *ext)
434 {
435         struct rb_node   **n      = &obj->oo_root.rb_node;
436         struct rb_node    *parent = NULL;
437         struct osc_extent *tmp;
438
439         LASSERT(ext->oe_intree == 0);
440         LASSERT(ext->oe_obj == obj);
441         LASSERT(osc_object_is_locked(obj));
442         while (*n != NULL) {
443                 tmp = rb_extent(*n);
444                 parent = *n;
445
446                 if (ext->oe_end < tmp->oe_start)
447                         n = &(*n)->rb_left;
448                 else if (ext->oe_start > tmp->oe_end)
449                         n = &(*n)->rb_right;
450                 else
451                         EASSERTF(0, tmp, EXTSTR"\n", EXTPARA(ext));
452         }
453         rb_link_node(&ext->oe_node, parent, n);
454         rb_insert_color(&ext->oe_node, &obj->oo_root);
455         osc_extent_get(ext);
456         ext->oe_intree = 1;
457 }
458
459 /* caller must have held object lock. */
460 static void osc_extent_erase(struct osc_extent *ext)
461 {
462         struct osc_object *obj = ext->oe_obj;
463         LASSERT(osc_object_is_locked(obj));
464         if (ext->oe_intree) {
465                 rb_erase(&ext->oe_node, &obj->oo_root);
466                 ext->oe_intree = 0;
467                 /* rbtree held a refcount */
468                 osc_extent_put_trust(ext);
469         }
470 }
471
472 static struct osc_extent *osc_extent_hold(struct osc_extent *ext)
473 {
474         struct osc_object *obj = ext->oe_obj;
475
476         LASSERT(osc_object_is_locked(obj));
477         LASSERT(ext->oe_state == OES_ACTIVE || ext->oe_state == OES_CACHE);
478         if (ext->oe_state == OES_CACHE) {
479                 osc_extent_state_set(ext, OES_ACTIVE);
480                 osc_update_pending(obj, OBD_BRW_WRITE, -ext->oe_nr_pages);
481         }
482         atomic_inc(&ext->oe_users);
483         list_del_init(&ext->oe_link);
484         return osc_extent_get(ext);
485 }
486
487 static void __osc_extent_remove(struct osc_extent *ext)
488 {
489         LASSERT(osc_object_is_locked(ext->oe_obj));
490         LASSERT(list_empty(&ext->oe_pages));
491         osc_extent_erase(ext);
492         list_del_init(&ext->oe_link);
493         osc_extent_state_set(ext, OES_INV);
494         OSC_EXTENT_DUMP(D_CACHE, ext, "destroyed.\n");
495 }
496
497 static void osc_extent_remove(struct osc_extent *ext)
498 {
499         struct osc_object *obj = ext->oe_obj;
500
501         osc_object_lock(obj);
502         __osc_extent_remove(ext);
503         osc_object_unlock(obj);
504 }
505
506 /**
507  * This function is used to merge extents to get better performance. It checks
508  * if @cur and @victim are contiguous at block level.
509  */
510 static int osc_extent_merge(const struct lu_env *env, struct osc_extent *cur,
511                             struct osc_extent *victim)
512 {
513         struct osc_object       *obj = cur->oe_obj;
514         struct client_obd       *cli = osc_cli(obj);
515         pgoff_t                  chunk_start;
516         pgoff_t                  chunk_end;
517         int                      ppc_bits;
518
519         LASSERT(cur->oe_state == OES_CACHE);
520         LASSERT(osc_object_is_locked(obj));
521         if (victim == NULL)
522                 return -EINVAL;
523
524         if (victim->oe_state != OES_CACHE || victim->oe_fsync_wait)
525                 return -EBUSY;
526
527         if (cur->oe_max_end != victim->oe_max_end)
528                 return -ERANGE;
529
530         LASSERT(cur->oe_dlmlock == victim->oe_dlmlock);
531         ppc_bits = osc_cli(obj)->cl_chunkbits - PAGE_SHIFT;
532         chunk_start = cur->oe_start >> ppc_bits;
533         chunk_end   = cur->oe_end   >> ppc_bits;
534         if (chunk_start   != (victim->oe_end >> ppc_bits) + 1 &&
535             chunk_end + 1 != victim->oe_start >> ppc_bits)
536                 return -ERANGE;
537
538         /* overall extent size should not exceed the max supported limit
539          * reported by the server */
540         if (cur->oe_end - cur->oe_start + 1 +
541             victim->oe_end - victim->oe_start + 1 > cli->cl_max_extent_pages)
542                 return -ERANGE;
543
544         OSC_EXTENT_DUMP(D_CACHE, victim, "will be merged by %p.\n", cur);
545
546         cur->oe_start     = min(cur->oe_start, victim->oe_start);
547         cur->oe_end       = max(cur->oe_end,   victim->oe_end);
548         /* per-extent tax should be accounted only once for the whole extent */
549         cur->oe_grants   += victim->oe_grants - cli->cl_grant_extent_tax;
550         cur->oe_nr_pages += victim->oe_nr_pages;
551         /* only the following bits are needed to merge */
552         cur->oe_urgent   |= victim->oe_urgent;
553         cur->oe_memalloc |= victim->oe_memalloc;
554         list_splice_init(&victim->oe_pages, &cur->oe_pages);
555         list_del_init(&victim->oe_link);
556         victim->oe_nr_pages = 0;
557
558         osc_extent_get(victim);
559         __osc_extent_remove(victim);
560         osc_extent_put(env, victim);
561
562         OSC_EXTENT_DUMP(D_CACHE, cur, "after merging %p.\n", victim);
563         return 0;
564 }
565
566 /**
567  * Drop user count of osc_extent, and unplug IO asynchronously.
568  */
569 int osc_extent_release(const struct lu_env *env, struct osc_extent *ext)
570 {
571         struct osc_object *obj = ext->oe_obj;
572         struct client_obd *cli = osc_cli(obj);
573         int rc = 0;
574         ENTRY;
575
576         LASSERT(atomic_read(&ext->oe_users) > 0);
577         LASSERT(sanity_check(ext) == 0);
578         LASSERT(ext->oe_grants > 0);
579
580         if (atomic_dec_and_lock(&ext->oe_users, &obj->oo_lock)) {
581                 LASSERT(ext->oe_state == OES_ACTIVE);
582                 if (ext->oe_trunc_pending) {
583                         /* a truncate process is waiting for this extent.
584                          * This may happen due to a race, check
585                          * osc_cache_truncate_start(). */
586                         osc_extent_state_set(ext, OES_TRUNC);
587                         ext->oe_trunc_pending = 0;
588                         osc_object_unlock(obj);
589                 } else {
590                         int grant = 0;
591
592                         osc_extent_state_set(ext, OES_CACHE);
593                         osc_update_pending(obj, OBD_BRW_WRITE,
594                                            ext->oe_nr_pages);
595
596                         /* try to merge the previous and next extent. */
597                         if (osc_extent_merge(env, ext, prev_extent(ext)) == 0)
598                                 grant += cli->cl_grant_extent_tax;
599                         if (osc_extent_merge(env, ext, next_extent(ext)) == 0)
600                                 grant += cli->cl_grant_extent_tax;
601
602                         if (ext->oe_urgent)
603                                 list_move_tail(&ext->oe_link,
604                                                &obj->oo_urgent_exts);
605                         else if (ext->oe_nr_pages == ext->oe_mppr) {
606                                 list_move_tail(&ext->oe_link,
607                                                &obj->oo_full_exts);
608                         }
609                         osc_object_unlock(obj);
610                         if (grant > 0)
611                                 osc_unreserve_grant(cli, 0, grant);
612                 }
613
614                 osc_io_unplug_async(env, cli, obj);
615         }
616         osc_extent_put(env, ext);
617         RETURN(rc);
618 }
619
620 static inline bool
621 overlapped(const struct osc_extent *ex1, const struct osc_extent *ex2)
622 {
623         return !(ex1->oe_end < ex2->oe_start || ex2->oe_end < ex1->oe_start);
624 }
625
626 /**
627  * Find or create an extent which includes @index, core function to manage
628  * extent tree.
629  */
630 static struct osc_extent *osc_extent_find(const struct lu_env *env,
631                                           struct osc_object *obj, pgoff_t index,
632                                           unsigned int *grants)
633 {
634         struct client_obd *cli = osc_cli(obj);
635         struct osc_lock   *olck;
636         struct cl_lock_descr *descr;
637         struct osc_extent *cur;
638         struct osc_extent *ext;
639         struct osc_extent *conflict = NULL;
640         struct osc_extent *found = NULL;
641         pgoff_t    chunk;
642         pgoff_t    max_end;
643         unsigned int max_pages; /* max_pages_per_rpc */
644         unsigned int chunksize;
645         int        ppc_bits; /* pages per chunk bits */
646         pgoff_t    chunk_mask;
647         int        rc;
648         ENTRY;
649
650         cur = osc_extent_alloc(obj);
651         if (cur == NULL)
652                 RETURN(ERR_PTR(-ENOMEM));
653
654         olck = osc_env_io(env)->oi_write_osclock;
655         LASSERTF(olck != NULL, "page %lu is not covered by lock\n", index);
656         LASSERT(olck->ols_state == OLS_GRANTED);
657
658         descr = &olck->ols_cl.cls_lock->cll_descr;
659         LASSERT(descr->cld_mode >= CLM_WRITE);
660
661         LASSERTF(cli->cl_chunkbits >= PAGE_SHIFT,
662                  "chunkbits: %u\n", cli->cl_chunkbits);
663         ppc_bits   = cli->cl_chunkbits - PAGE_SHIFT;
664         chunk_mask = ~((1 << ppc_bits) - 1);
665         chunksize  = 1 << cli->cl_chunkbits;
666         chunk      = index >> ppc_bits;
667
668         /* align end to RPC edge. */
669         max_pages = cli->cl_max_pages_per_rpc;
670         if ((max_pages & ~chunk_mask) != 0) {
671                 CERROR("max_pages: %#x chunkbits: %u chunk_mask: %#lx\n",
672                        max_pages, cli->cl_chunkbits, chunk_mask);
673                 RETURN(ERR_PTR(-EINVAL));
674         }
675         max_end = index - (index % max_pages) + max_pages - 1;
676         max_end = min_t(pgoff_t, max_end, descr->cld_end);
677
678         /* initialize new extent by parameters so far */
679         cur->oe_max_end = max_end;
680         cur->oe_start   = index & chunk_mask;
681         cur->oe_end     = ((index + ~chunk_mask + 1) & chunk_mask) - 1;
682         if (cur->oe_start < descr->cld_start)
683                 cur->oe_start = descr->cld_start;
684         if (cur->oe_end > max_end)
685                 cur->oe_end = max_end;
686         cur->oe_grants  = 0;
687         cur->oe_mppr    = max_pages;
688         if (olck->ols_dlmlock != NULL) {
689                 LASSERT(olck->ols_hold);
690                 cur->oe_dlmlock = LDLM_LOCK_GET(olck->ols_dlmlock);
691                 lu_ref_add(&olck->ols_dlmlock->l_reference, "osc_extent", cur);
692         }
693
694         /* grants has been allocated by caller */
695         LASSERTF(*grants >= chunksize + cli->cl_grant_extent_tax,
696                  "%u/%u/%u.\n", *grants, chunksize, cli->cl_grant_extent_tax);
697         LASSERTF((max_end - cur->oe_start) < max_pages, EXTSTR"\n",
698                  EXTPARA(cur));
699
700 restart:
701         osc_object_lock(obj);
702         ext = osc_extent_search(obj, cur->oe_start);
703         if (ext == NULL)
704                 ext = first_extent(obj);
705         while (ext != NULL) {
706                 pgoff_t ext_chk_start = ext->oe_start >> ppc_bits;
707                 pgoff_t ext_chk_end   = ext->oe_end   >> ppc_bits;
708
709                 LASSERT(sanity_check_nolock(ext) == 0);
710                 if (chunk > ext_chk_end + 1 || chunk < ext_chk_start)
711                         break;
712
713                 /* if covering by different locks, no chance to match */
714                 if (olck->ols_dlmlock != ext->oe_dlmlock) {
715                         EASSERTF(!overlapped(ext, cur), ext,
716                                  EXTSTR"\n", EXTPARA(cur));
717
718                         ext = next_extent(ext);
719                         continue;
720                 }
721
722                 /* discontiguous chunks? */
723                 if (chunk + 1 < ext_chk_start) {
724                         ext = next_extent(ext);
725                         continue;
726                 }
727
728                 /* ok, from now on, ext and cur have these attrs:
729                  * 1. covered by the same lock
730                  * 2. contiguous at chunk level or overlapping. */
731
732                 if (overlapped(ext, cur)) {
733                         /* cur is the minimum unit, so overlapping means
734                          * full contain. */
735                         EASSERTF((ext->oe_start <= cur->oe_start &&
736                                   ext->oe_end >= cur->oe_end),
737                                  ext, EXTSTR"\n", EXTPARA(cur));
738
739                         if (ext->oe_state > OES_CACHE || ext->oe_fsync_wait) {
740                                 /* for simplicity, we wait for this extent to
741                                  * finish before going forward. */
742                                 conflict = osc_extent_get(ext);
743                                 break;
744                         }
745
746                         found = osc_extent_hold(ext);
747                         break;
748                 }
749
750                 /* non-overlapped extent */
751                 if (ext->oe_state != OES_CACHE || ext->oe_fsync_wait) {
752                         /* we can't do anything for a non OES_CACHE extent, or
753                          * if there is someone waiting for this extent to be
754                          * flushed, try next one. */
755                         ext = next_extent(ext);
756                         continue;
757                 }
758
759                 /* check if they belong to the same rpc slot before trying to
760                  * merge. the extents are not overlapped and contiguous at
761                  * chunk level to get here. */
762                 if (ext->oe_max_end != max_end) {
763                         /* if they don't belong to the same RPC slot or
764                          * max_pages_per_rpc has ever changed, do not merge. */
765                         ext = next_extent(ext);
766                         continue;
767                 }
768
769                 /* check whether maximum extent size will be hit */
770                 if ((ext_chk_end - ext_chk_start + 1 + 1) << ppc_bits >
771                     cli->cl_max_extent_pages) {
772                         ext = next_extent(ext);
773                         continue;
774                 }
775
776                 /* it's required that an extent must be contiguous at chunk
777                  * level so that we know the whole extent is covered by grant
778                  * (the pages in the extent are NOT required to be contiguous).
779                  * Otherwise, it will be too much difficult to know which
780                  * chunks have grants allocated. */
781
782                 /* try to do front merge - extend ext's start */
783                 if (chunk + 1 == ext_chk_start) {
784                         /* ext must be chunk size aligned */
785                         EASSERT((ext->oe_start & ~chunk_mask) == 0, ext);
786
787                         /* pull ext's start back to cover cur */
788                         ext->oe_start   = cur->oe_start;
789                         ext->oe_grants += chunksize;
790                         LASSERT(*grants >= chunksize);
791                         *grants -= chunksize;
792
793                         found = osc_extent_hold(ext);
794                 } else if (chunk == ext_chk_end + 1) {
795                         /* rear merge */
796                         ext->oe_end     = cur->oe_end;
797                         ext->oe_grants += chunksize;
798                         LASSERT(*grants >= chunksize);
799                         *grants -= chunksize;
800
801                         /* try to merge with the next one because we just fill
802                          * in a gap */
803                         if (osc_extent_merge(env, ext, next_extent(ext)) == 0)
804                                 /* we can save extent tax from next extent */
805                                 *grants += cli->cl_grant_extent_tax;
806
807                         found = osc_extent_hold(ext);
808                 }
809                 if (found != NULL)
810                         break;
811
812                 ext = next_extent(ext);
813         }
814
815         osc_extent_tree_dump(D_CACHE, obj);
816         if (found != NULL) {
817                 LASSERT(conflict == NULL);
818                 if (!IS_ERR(found)) {
819                         LASSERT(found->oe_dlmlock == cur->oe_dlmlock);
820                         OSC_EXTENT_DUMP(D_CACHE, found,
821                                         "found caching ext for %lu.\n", index);
822                 }
823         } else if (conflict == NULL) {
824                 /* create a new extent */
825                 EASSERT(osc_extent_is_overlapped(obj, cur) == 0, cur);
826                 cur->oe_grants = chunksize + cli->cl_grant_extent_tax;
827                 LASSERT(*grants >= cur->oe_grants);
828                 *grants -= cur->oe_grants;
829
830                 cur->oe_state = OES_CACHE;
831                 found = osc_extent_hold(cur);
832                 osc_extent_insert(obj, cur);
833                 OSC_EXTENT_DUMP(D_CACHE, cur, "add into tree %lu/%lu.\n",
834                                 index, descr->cld_end);
835         }
836         osc_object_unlock(obj);
837
838         if (conflict != NULL) {
839                 LASSERT(found == NULL);
840
841                 /* waiting for IO to finish. Please notice that it's impossible
842                  * to be an OES_TRUNC extent. */
843                 rc = osc_extent_wait(env, conflict, OES_INV);
844                 osc_extent_put(env, conflict);
845                 conflict = NULL;
846                 if (rc < 0)
847                         GOTO(out, found = ERR_PTR(rc));
848
849                 goto restart;
850         }
851         EXIT;
852
853 out:
854         osc_extent_put(env, cur);
855         return found;
856 }
857
858 /**
859  * Called when IO is finished to an extent.
860  */
861 int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext,
862                       int sent, int rc)
863 {
864         struct client_obd *cli = osc_cli(ext->oe_obj);
865         struct osc_async_page *oap;
866         struct osc_async_page *tmp;
867         int nr_pages = ext->oe_nr_pages;
868         int lost_grant = 0;
869         int blocksize = cli->cl_import->imp_obd->obd_osfs.os_bsize ? : 4096;
870         loff_t last_off = 0;
871         int last_count = -1;
872         ENTRY;
873
874         OSC_EXTENT_DUMP(D_CACHE, ext, "extent finished.\n");
875
876         ext->oe_rc = rc ?: ext->oe_nr_pages;
877         EASSERT(ergo(rc == 0, ext->oe_state == OES_RPC), ext);
878
879         osc_lru_add_batch(cli, &ext->oe_pages);
880         list_for_each_entry_safe(oap, tmp, &ext->oe_pages,
881                                      oap_pending_item) {
882                 list_del_init(&oap->oap_rpc_item);
883                 list_del_init(&oap->oap_pending_item);
884                 if (last_off <= oap->oap_obj_off) {
885                         last_off = oap->oap_obj_off;
886                         last_count = oap->oap_count;
887                 }
888
889                 --ext->oe_nr_pages;
890                 osc_ap_completion(env, cli, oap, sent, rc);
891         }
892         EASSERT(ext->oe_nr_pages == 0, ext);
893
894         if (!sent) {
895                 lost_grant = ext->oe_grants;
896         } else if (blocksize < PAGE_SIZE &&
897                    last_count != PAGE_SIZE) {
898                 /* For short writes we shouldn't count parts of pages that
899                  * span a whole chunk on the OST side, or our accounting goes
900                  * wrong.  Should match the code in filter_grant_check. */
901                 int offset = last_off & ~PAGE_MASK;
902                 int count = last_count + (offset & (blocksize - 1));
903                 int end = (offset + last_count) & (blocksize - 1);
904                 if (end)
905                         count += blocksize - end;
906
907                 lost_grant = PAGE_SIZE - count;
908         }
909         if (ext->oe_grants > 0)
910                 osc_free_grant(cli, nr_pages, lost_grant, ext->oe_grants);
911
912         osc_extent_remove(ext);
913         /* put the refcount for RPC */
914         osc_extent_put(env, ext);
915         RETURN(0);
916 }
917
918 static int extent_wait_cb(struct osc_extent *ext, enum osc_extent_state state)
919 {
920         int ret;
921
922         osc_object_lock(ext->oe_obj);
923         ret = ext->oe_state == state;
924         osc_object_unlock(ext->oe_obj);
925
926         return ret;
927 }
928
929 /**
930  * Wait for the extent's state to become @state.
931  */
932 static int osc_extent_wait(const struct lu_env *env, struct osc_extent *ext,
933                            enum osc_extent_state state)
934 {
935         struct osc_object *obj = ext->oe_obj;
936         int rc = 0;
937         ENTRY;
938
939         osc_object_lock(obj);
940         LASSERT(sanity_check_nolock(ext) == 0);
941         /* `Kick' this extent only if the caller is waiting for it to be
942          * written out. */
943         if (state == OES_INV && !ext->oe_urgent && !ext->oe_hp) {
944                 if (ext->oe_state == OES_ACTIVE) {
945                         ext->oe_urgent = 1;
946                 } else if (ext->oe_state == OES_CACHE) {
947                         ext->oe_urgent = 1;
948                         osc_extent_hold(ext);
949                         rc = 1;
950                 }
951         }
952         osc_object_unlock(obj);
953         if (rc == 1)
954                 osc_extent_release(env, ext);
955
956         /* wait for the extent until its state becomes @state */
957         rc = wait_event_idle_timeout(ext->oe_waitq, extent_wait_cb(ext, state),
958                                      cfs_time_seconds(600));
959         if (rc == 0) {
960                 OSC_EXTENT_DUMP(D_ERROR, ext,
961                         "%s: wait ext to %u timedout, recovery in progress?\n",
962                         cli_name(osc_cli(obj)), state);
963
964                 wait_event_idle(ext->oe_waitq, extent_wait_cb(ext, state));
965         }
966         if (ext->oe_rc < 0)
967                 rc = ext->oe_rc;
968         else
969                 rc = 0;
970         RETURN(rc);
971 }
972
973 /**
974  * Discard pages with index greater than @size. If @ext is overlapped with
975  * @size, then partial truncate happens.
976  */
977 static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index,
978                                 bool partial)
979 {
980         struct lu_env         *env;
981         struct cl_io          *io;
982         struct osc_object     *obj = ext->oe_obj;
983         struct client_obd     *cli = osc_cli(obj);
984         struct osc_async_page *oap;
985         struct osc_async_page *tmp;
986         struct pagevec        *pvec;
987         int                    pages_in_chunk = 0;
988         int                    ppc_bits    = cli->cl_chunkbits -
989                                              PAGE_SHIFT;
990         __u64                  trunc_chunk = trunc_index >> ppc_bits;
991         int                    grants   = 0;
992         int                    nr_pages = 0;
993         int                    rc       = 0;
994         __u16                  refcheck;
995         ENTRY;
996
997         LASSERT(sanity_check(ext) == 0);
998         LASSERT(ext->oe_state == OES_TRUNC);
999         LASSERT(!ext->oe_urgent);
1000
1001         /* Request new lu_env.
1002          * We can't use that env from osc_cache_truncate_start() because
1003          * it's from lov_io_sub and not fully initialized. */
1004         env = cl_env_get(&refcheck);
1005         if (IS_ERR(env))
1006                 RETURN(PTR_ERR(env));
1007
1008         io  = osc_env_thread_io(env);
1009         io->ci_obj = cl_object_top(osc2cl(obj));
1010         io->ci_ignore_layout = 1;
1011         pvec = &osc_env_info(env)->oti_pagevec;
1012         ll_pagevec_init(pvec, 0);
1013         rc = cl_io_init(env, io, CIT_MISC, io->ci_obj);
1014         if (rc < 0)
1015                 GOTO(out, rc);
1016
1017         /* discard all pages with index greater than trunc_index */
1018         list_for_each_entry_safe(oap, tmp, &ext->oe_pages,
1019                                      oap_pending_item) {
1020                 pgoff_t index = osc_index(oap2osc(oap));
1021                 struct cl_page  *page = oap2cl_page(oap);
1022
1023                 LASSERT(list_empty(&oap->oap_rpc_item));
1024
1025                 /* only discard the pages with their index greater than
1026                  * trunc_index, and ... */
1027                 if (index < trunc_index ||
1028                     (index == trunc_index && partial)) {
1029                         /* accounting how many pages remaining in the chunk
1030                          * so that we can calculate grants correctly. */
1031                         if (index >> ppc_bits == trunc_chunk)
1032                                 ++pages_in_chunk;
1033                         continue;
1034                 }
1035
1036                 list_del_init(&oap->oap_pending_item);
1037
1038                 cl_page_get(page);
1039                 lu_ref_add(&page->cp_reference, "truncate", current);
1040
1041                 if (cl_page_own(env, io, page) == 0) {
1042                         cl_page_discard(env, io, page);
1043                         cl_page_disown(env, io, page);
1044                 } else {
1045                         LASSERT(page->cp_state == CPS_FREEING);
1046                         LASSERT(0);
1047                 }
1048
1049                 lu_ref_del(&page->cp_reference, "truncate", current);
1050                 cl_pagevec_put(env, page, pvec);
1051
1052                 --ext->oe_nr_pages;
1053                 ++nr_pages;
1054         }
1055         pagevec_release(pvec);
1056
1057         EASSERTF(ergo(ext->oe_start >= trunc_index + !!partial,
1058                       ext->oe_nr_pages == 0),
1059                 ext, "trunc_index %lu, partial %d\n", trunc_index, partial);
1060
1061         osc_object_lock(obj);
1062         if (ext->oe_nr_pages == 0) {
1063                 LASSERT(pages_in_chunk == 0);
1064                 grants = ext->oe_grants;
1065                 ext->oe_grants = 0;
1066         } else { /* calculate how many grants we can free */
1067                 int     chunks = (ext->oe_end >> ppc_bits) - trunc_chunk;
1068                 pgoff_t last_index;
1069
1070
1071                 /* if there is no pages in this chunk, we can also free grants
1072                  * for the last chunk */
1073                 if (pages_in_chunk == 0) {
1074                         /* if this is the 1st chunk and no pages in this chunk,
1075                          * ext->oe_nr_pages must be zero, so we should be in
1076                          * the other if-clause. */
1077                         LASSERT(trunc_chunk > 0);
1078                         --trunc_chunk;
1079                         ++chunks;
1080                 }
1081
1082                 /* this is what we can free from this extent */
1083                 grants          = chunks << cli->cl_chunkbits;
1084                 ext->oe_grants -= grants;
1085                 last_index      = ((trunc_chunk + 1) << ppc_bits) - 1;
1086                 ext->oe_end     = min(last_index, ext->oe_max_end);
1087                 LASSERT(ext->oe_end >= ext->oe_start);
1088                 LASSERT(ext->oe_grants > 0);
1089         }
1090         osc_object_unlock(obj);
1091
1092         if (grants > 0 || nr_pages > 0)
1093                 osc_free_grant(cli, nr_pages, grants, grants);
1094
1095 out:
1096         cl_io_fini(env, io);
1097         cl_env_put(env, &refcheck);
1098         RETURN(rc);
1099 }
1100
1101 /**
1102  * This function is used to make the extent prepared for transfer.
1103  * A race with flusing page - ll_writepage() has to be handled cautiously.
1104  */
1105 static int osc_extent_make_ready(const struct lu_env *env,
1106                                  struct osc_extent *ext)
1107 {
1108         struct osc_async_page *oap;
1109         struct osc_async_page *last = NULL;
1110         struct osc_object *obj = ext->oe_obj;
1111         unsigned int page_count = 0;
1112         int rc;
1113         ENTRY;
1114
1115         /* we're going to grab page lock, so object lock must not be taken. */
1116         LASSERT(sanity_check(ext) == 0);
1117         /* in locking state, any process should not touch this extent. */
1118         EASSERT(ext->oe_state == OES_LOCKING, ext);
1119         EASSERT(ext->oe_owner != NULL, ext);
1120
1121         OSC_EXTENT_DUMP(D_CACHE, ext, "make ready\n");
1122
1123         list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
1124                 ++page_count;
1125                 if (last == NULL || last->oap_obj_off < oap->oap_obj_off)
1126                         last = oap;
1127
1128                 /* checking ASYNC_READY is race safe */
1129                 if ((oap->oap_async_flags & ASYNC_READY) != 0)
1130                         continue;
1131
1132                 rc = osc_make_ready(env, oap, OBD_BRW_WRITE);
1133                 switch (rc) {
1134                 case 0:
1135                         spin_lock(&oap->oap_lock);
1136                         oap->oap_async_flags |= ASYNC_READY;
1137                         spin_unlock(&oap->oap_lock);
1138                         break;
1139                 case -EALREADY:
1140                         LASSERT((oap->oap_async_flags & ASYNC_READY) != 0);
1141                         break;
1142                 default:
1143                         LASSERTF(0, "unknown return code: %d\n", rc);
1144                 }
1145         }
1146
1147         LASSERT(page_count == ext->oe_nr_pages);
1148         LASSERT(last != NULL);
1149         /* the last page is the only one we need to refresh its count by
1150          * the size of file. */
1151         if (!(last->oap_async_flags & ASYNC_COUNT_STABLE)) {
1152                 int last_oap_count = osc_refresh_count(env, last, OBD_BRW_WRITE);
1153                 LASSERT(last_oap_count > 0);
1154                 LASSERT(last->oap_page_off + last_oap_count <= PAGE_SIZE);
1155                 last->oap_count = last_oap_count;
1156                 spin_lock(&last->oap_lock);
1157                 last->oap_async_flags |= ASYNC_COUNT_STABLE;
1158                 spin_unlock(&last->oap_lock);
1159         }
1160
1161         /* for the rest of pages, we don't need to call osf_refresh_count()
1162          * because it's known they are not the last page */
1163         list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
1164                 if (!(oap->oap_async_flags & ASYNC_COUNT_STABLE)) {
1165                         oap->oap_count = PAGE_SIZE - oap->oap_page_off;
1166                         spin_lock(&oap->oap_lock);
1167                         oap->oap_async_flags |= ASYNC_COUNT_STABLE;
1168                         spin_unlock(&oap->oap_lock);
1169                 }
1170         }
1171
1172         osc_object_lock(obj);
1173         osc_extent_state_set(ext, OES_RPC);
1174         osc_object_unlock(obj);
1175         /* get a refcount for RPC. */
1176         osc_extent_get(ext);
1177
1178         RETURN(0);
1179 }
1180
1181 /**
1182  * Quick and simple version of osc_extent_find(). This function is frequently
1183  * called to expand the extent for the same IO. To expand the extent, the
1184  * page index must be in the same or next chunk of ext->oe_end.
1185  */
1186 static int osc_extent_expand(struct osc_extent *ext, pgoff_t index,
1187                              unsigned int *grants)
1188 {
1189         struct osc_object *obj = ext->oe_obj;
1190         struct client_obd *cli = osc_cli(obj);
1191         struct osc_extent *next;
1192         int ppc_bits = cli->cl_chunkbits - PAGE_SHIFT;
1193         pgoff_t chunk = index >> ppc_bits;
1194         pgoff_t end_chunk;
1195         pgoff_t end_index;
1196         unsigned int chunksize = 1 << cli->cl_chunkbits;
1197         int rc = 0;
1198         ENTRY;
1199
1200         LASSERT(ext->oe_max_end >= index && ext->oe_start <= index);
1201         osc_object_lock(obj);
1202         LASSERT(sanity_check_nolock(ext) == 0);
1203         end_chunk = ext->oe_end >> ppc_bits;
1204         if (chunk > end_chunk + 1)
1205                 GOTO(out, rc = -ERANGE);
1206
1207         if (end_chunk >= chunk)
1208                 GOTO(out, rc = 0);
1209
1210         LASSERT(end_chunk + 1 == chunk);
1211
1212         /* try to expand this extent to cover @index */
1213         end_index = min(ext->oe_max_end, ((chunk + 1) << ppc_bits) - 1);
1214
1215         /* don't go over the maximum extent size reported by server */
1216         if (end_index - ext->oe_start + 1 > cli->cl_max_extent_pages)
1217                 GOTO(out, rc = -ERANGE);
1218
1219         next = next_extent(ext);
1220         if (next != NULL && next->oe_start <= end_index)
1221                 /* complex mode - overlapped with the next extent,
1222                  * this case will be handled by osc_extent_find() */
1223                 GOTO(out, rc = -EAGAIN);
1224
1225         ext->oe_end = end_index;
1226         ext->oe_grants += chunksize;
1227         LASSERT(*grants >= chunksize);
1228         *grants -= chunksize;
1229         EASSERTF(osc_extent_is_overlapped(obj, ext) == 0, ext,
1230                  "overlapped after expanding for %lu.\n", index);
1231         EXIT;
1232
1233 out:
1234         osc_object_unlock(obj);
1235         RETURN(rc);
1236 }
1237
1238 static void osc_extent_tree_dump0(int mask, struct osc_object *obj,
1239                                   const char *func, int line)
1240 {
1241         struct osc_extent *ext;
1242         int cnt;
1243
1244         if (!cfs_cdebug_show(mask, DEBUG_SUBSYSTEM))
1245                 return;
1246
1247         CDEBUG(mask, "Dump object %p extents at %s:%d, mppr: %u.\n",
1248                obj, func, line, osc_cli(obj)->cl_max_pages_per_rpc);
1249
1250         /* osc_object_lock(obj); */
1251         cnt = 1;
1252         for (ext = first_extent(obj); ext != NULL; ext = next_extent(ext))
1253                 OSC_EXTENT_DUMP(mask, ext, "in tree %d.\n", cnt++);
1254
1255         cnt = 1;
1256         list_for_each_entry(ext, &obj->oo_hp_exts, oe_link)
1257                 OSC_EXTENT_DUMP(mask, ext, "hp %d.\n", cnt++);
1258
1259         cnt = 1;
1260         list_for_each_entry(ext, &obj->oo_urgent_exts, oe_link)
1261                 OSC_EXTENT_DUMP(mask, ext, "urgent %d.\n", cnt++);
1262
1263         cnt = 1;
1264         list_for_each_entry(ext, &obj->oo_reading_exts, oe_link)
1265                 OSC_EXTENT_DUMP(mask, ext, "reading %d.\n", cnt++);
1266         /* osc_object_unlock(obj); */
1267 }
1268
1269 /* ------------------ osc extent end ------------------ */
1270
1271 static inline int osc_is_ready(struct osc_object *osc)
1272 {
1273         return !list_empty(&osc->oo_ready_item) ||
1274                !list_empty(&osc->oo_hp_ready_item);
1275 }
1276
1277 #define OSC_IO_DEBUG(OSC, STR, args...)                                        \
1278         CDEBUG(D_CACHE, "obj %p ready %d|%c|%c wr %d|%c|%c rd %d|%c " STR,     \
1279                (OSC), osc_is_ready(OSC),                                       \
1280                list_empty_marker(&(OSC)->oo_hp_ready_item),                    \
1281                list_empty_marker(&(OSC)->oo_ready_item),                       \
1282                atomic_read(&(OSC)->oo_nr_writes),                              \
1283                list_empty_marker(&(OSC)->oo_hp_exts),                          \
1284                list_empty_marker(&(OSC)->oo_urgent_exts),                      \
1285                atomic_read(&(OSC)->oo_nr_reads),                               \
1286                list_empty_marker(&(OSC)->oo_reading_exts),                     \
1287                ##args)
1288
1289 static int osc_make_ready(const struct lu_env *env, struct osc_async_page *oap,
1290                           int cmd)
1291 {
1292         struct osc_page *opg  = oap2osc_page(oap);
1293         struct cl_page  *page = oap2cl_page(oap);
1294         int result;
1295
1296         LASSERT(cmd == OBD_BRW_WRITE); /* no cached reads */
1297
1298         ENTRY;
1299         result = cl_page_make_ready(env, page, CRT_WRITE);
1300         if (result == 0)
1301                 opg->ops_submit_time = ktime_get();
1302         RETURN(result);
1303 }
1304
1305 static int osc_refresh_count(const struct lu_env *env,
1306                              struct osc_async_page *oap, int cmd)
1307 {
1308         struct osc_page  *opg = oap2osc_page(oap);
1309         pgoff_t index = osc_index(oap2osc(oap));
1310         struct cl_object *obj;
1311         struct cl_attr   *attr = &osc_env_info(env)->oti_attr;
1312         int result;
1313         loff_t kms;
1314
1315         /* readpage queues with _COUNT_STABLE, shouldn't get here. */
1316         LASSERT(!(cmd & OBD_BRW_READ));
1317         LASSERT(opg != NULL);
1318         obj = opg->ops_cl.cpl_obj;
1319
1320         cl_object_attr_lock(obj);
1321         result = cl_object_attr_get(env, obj, attr);
1322         cl_object_attr_unlock(obj);
1323         if (result < 0)
1324                 return result;
1325         kms = attr->cat_kms;
1326         if (cl_offset(obj, index) >= kms)
1327                 /* catch race with truncate */
1328                 return 0;
1329         else if (cl_offset(obj, index + 1) > kms)
1330                 /* catch sub-page write at end of file */
1331                 return kms & ~PAGE_MASK;
1332         else
1333                 return PAGE_SIZE;
1334 }
1335
1336 static int osc_completion(const struct lu_env *env, struct osc_async_page *oap,
1337                           int cmd, int rc)
1338 {
1339         struct osc_page   *opg  = oap2osc_page(oap);
1340         struct cl_page    *page = oap2cl_page(oap);
1341         enum cl_req_type   crt;
1342         int srvlock;
1343
1344         ENTRY;
1345
1346         cmd &= ~OBD_BRW_NOQUOTA;
1347         LASSERTF(equi(page->cp_state == CPS_PAGEIN,  cmd == OBD_BRW_READ),
1348                  "cp_state:%u, cmd:%d\n", page->cp_state, cmd);
1349         LASSERTF(equi(page->cp_state == CPS_PAGEOUT, cmd == OBD_BRW_WRITE),
1350                 "cp_state:%u, cmd:%d\n", page->cp_state, cmd);
1351         LASSERT(opg->ops_transfer_pinned);
1352
1353         crt = cmd == OBD_BRW_READ ? CRT_READ : CRT_WRITE;
1354         /* Clear opg->ops_transfer_pinned before VM lock is released. */
1355         opg->ops_transfer_pinned = 0;
1356
1357         opg->ops_submit_time = ktime_set(0, 0);
1358         srvlock = oap->oap_brw_flags & OBD_BRW_SRVLOCK;
1359
1360         /* statistic */
1361         if (rc == 0 && srvlock) {
1362                 struct lu_device *ld    = opg->ops_cl.cpl_obj->co_lu.lo_dev;
1363                 struct osc_stats *stats = &lu2osc_dev(ld)->od_stats;
1364                 size_t bytes = oap->oap_count;
1365
1366                 if (crt == CRT_READ)
1367                         stats->os_lockless_reads += bytes;
1368                 else
1369                         stats->os_lockless_writes += bytes;
1370         }
1371
1372         /*
1373          * This has to be the last operation with the page, as locks are
1374          * released in cl_page_completion() and nothing except for the
1375          * reference counter protects page from concurrent reclaim.
1376          */
1377         lu_ref_del(&page->cp_reference, "transfer", page);
1378
1379         cl_page_completion(env, page, crt, rc);
1380         cl_page_put(env, page);
1381
1382         RETURN(0);
1383 }
1384
1385 #define OSC_DUMP_GRANT(mask, cli, fmt, args...) do {                    \
1386         struct client_obd *__tmp = (cli);                               \
1387         CDEBUG(mask, "%s: grant { dirty: %ld/%ld dirty_pages: %ld/%lu " \
1388                "dropped: %ld avail: %ld, dirty_grant: %ld, "            \
1389                "reserved: %ld, flight: %d } lru {in list: %ld, "        \
1390                "left: %ld, waiters: %d }" fmt "\n",                     \
1391                cli_name(__tmp),                                         \
1392                __tmp->cl_dirty_pages, __tmp->cl_dirty_max_pages,        \
1393                atomic_long_read(&obd_dirty_pages), obd_max_dirty_pages, \
1394                __tmp->cl_lost_grant, __tmp->cl_avail_grant,             \
1395                __tmp->cl_dirty_grant,                                   \
1396                __tmp->cl_reserved_grant, __tmp->cl_w_in_flight,         \
1397                atomic_long_read(&__tmp->cl_lru_in_list),                \
1398                atomic_long_read(&__tmp->cl_lru_busy),                   \
1399                atomic_read(&__tmp->cl_lru_shrinkers), ##args);          \
1400 } while (0)
1401
1402 /* caller must hold loi_list_lock */
1403 static void osc_consume_write_grant(struct client_obd *cli,
1404                                     struct brw_page *pga)
1405 {
1406         assert_spin_locked(&cli->cl_loi_list_lock);
1407         LASSERT(!(pga->flag & OBD_BRW_FROM_GRANT));
1408         cli->cl_dirty_pages++;
1409         pga->flag |= OBD_BRW_FROM_GRANT;
1410         CDEBUG(D_CACHE, "using %lu grant credits for brw %p page %p\n",
1411                PAGE_SIZE, pga, pga->pg);
1412         osc_update_next_shrink(cli);
1413 }
1414
1415 /* the companion to osc_consume_write_grant, called when a brw has completed.
1416  * must be called with the loi lock held. */
1417 static void osc_release_write_grant(struct client_obd *cli,
1418                                     struct brw_page *pga)
1419 {
1420         ENTRY;
1421
1422         assert_spin_locked(&cli->cl_loi_list_lock);
1423         if (!(pga->flag & OBD_BRW_FROM_GRANT)) {
1424                 EXIT;
1425                 return;
1426         }
1427
1428         pga->flag &= ~OBD_BRW_FROM_GRANT;
1429         atomic_long_dec(&obd_dirty_pages);
1430         cli->cl_dirty_pages--;
1431         EXIT;
1432 }
1433
1434 /**
1435  * To avoid sleeping with object lock held, it's good for us allocate enough
1436  * grants before entering into critical section.
1437  *
1438  * client_obd_list_lock held by caller
1439  */
1440 static int osc_reserve_grant(struct client_obd *cli, unsigned int bytes)
1441 {
1442         int rc = -EDQUOT;
1443
1444         if (cli->cl_avail_grant >= bytes) {
1445                 cli->cl_avail_grant    -= bytes;
1446                 cli->cl_reserved_grant += bytes;
1447                 rc = 0;
1448         }
1449         return rc;
1450 }
1451
1452 static void __osc_unreserve_grant(struct client_obd *cli,
1453                                   unsigned int reserved, unsigned int unused)
1454 {
1455         /* it's quite normal for us to get more grant than reserved.
1456          * Thinking about a case that two extents merged by adding a new
1457          * chunk, we can save one extent tax. If extent tax is greater than
1458          * one chunk, we can save more grant by adding a new chunk */
1459         cli->cl_reserved_grant -= reserved;
1460         if (unused > reserved) {
1461                 cli->cl_avail_grant += reserved;
1462                 cli->cl_lost_grant  += unused - reserved;
1463                 cli->cl_dirty_grant -= unused - reserved;
1464         } else {
1465                 cli->cl_avail_grant += unused;
1466                 cli->cl_dirty_grant += reserved - unused;
1467         }
1468 }
1469
1470 static void osc_unreserve_grant_nolock(struct client_obd *cli,
1471                                        unsigned int reserved,
1472                                        unsigned int unused)
1473 {
1474         __osc_unreserve_grant(cli, reserved, unused);
1475         if (unused > 0)
1476                 osc_wake_cache_waiters(cli);
1477 }
1478
1479 static void osc_unreserve_grant(struct client_obd *cli,
1480                                 unsigned int reserved, unsigned int unused)
1481 {
1482         spin_lock(&cli->cl_loi_list_lock);
1483         osc_unreserve_grant_nolock(cli, reserved, unused);
1484         spin_unlock(&cli->cl_loi_list_lock);
1485 }
1486
1487 /**
1488  * Free grant after IO is finished or canceled.
1489  *
1490  * @lost_grant is used to remember how many grants we have allocated but not
1491  * used, we should return these grants to OST. There're two cases where grants
1492  * can be lost:
1493  * 1. truncate;
1494  * 2. blocksize at OST is less than PAGE_SIZE and a partial page was
1495  *    written. In this case OST may use less chunks to serve this partial
1496  *    write. OSTs don't actually know the page size on the client side. so
1497  *    clients have to calculate lost grant by the blocksize on the OST.
1498  *    See filter_grant_check() for details.
1499  */
1500 static void osc_free_grant(struct client_obd *cli, unsigned int nr_pages,
1501                            unsigned int lost_grant, unsigned int dirty_grant)
1502 {
1503         unsigned long grant;
1504
1505         grant = (1 << cli->cl_chunkbits) + cli->cl_grant_extent_tax;
1506
1507         spin_lock(&cli->cl_loi_list_lock);
1508         atomic_long_sub(nr_pages, &obd_dirty_pages);
1509         cli->cl_dirty_pages -= nr_pages;
1510         cli->cl_lost_grant += lost_grant;
1511         cli->cl_dirty_grant -= dirty_grant;
1512         if (cli->cl_avail_grant < grant && cli->cl_lost_grant >= grant) {
1513                 /* borrow some grant from truncate to avoid the case that
1514                  * truncate uses up all avail grant */
1515                 cli->cl_lost_grant -= grant;
1516                 cli->cl_avail_grant += grant;
1517         }
1518         osc_wake_cache_waiters(cli);
1519         spin_unlock(&cli->cl_loi_list_lock);
1520         CDEBUG(D_CACHE, "lost %u grant: %lu avail: %lu dirty: %lu/%lu\n",
1521                lost_grant, cli->cl_lost_grant,
1522                cli->cl_avail_grant, cli->cl_dirty_pages << PAGE_SHIFT,
1523                cli->cl_dirty_grant);
1524 }
1525
1526 /**
1527  * The companion to osc_enter_cache(), called when @oap is no longer part of
1528  * the dirty accounting due to error.
1529  */
1530 static void osc_exit_cache(struct client_obd *cli, struct osc_async_page *oap)
1531 {
1532         spin_lock(&cli->cl_loi_list_lock);
1533         osc_release_write_grant(cli, &oap->oap_brw_page);
1534         spin_unlock(&cli->cl_loi_list_lock);
1535 }
1536
1537 /**
1538  * Non-blocking version of osc_enter_cache() that consumes grant only when it
1539  * is available.
1540  */
1541 static int osc_enter_cache_try(struct client_obd *cli,
1542                                struct osc_async_page *oap,
1543                                int bytes)
1544 {
1545         int rc;
1546
1547         OSC_DUMP_GRANT(D_CACHE, cli, "need:%d\n", bytes);
1548
1549         rc = osc_reserve_grant(cli, bytes);
1550         if (rc < 0)
1551                 return 0;
1552
1553         if (cli->cl_dirty_pages < cli->cl_dirty_max_pages) {
1554                 if (atomic_long_add_return(1, &obd_dirty_pages) <=
1555                     obd_max_dirty_pages) {
1556                         osc_consume_write_grant(cli, &oap->oap_brw_page);
1557                         rc = 1;
1558                         goto out;
1559                 } else
1560                         atomic_long_dec(&obd_dirty_pages);
1561         }
1562         __osc_unreserve_grant(cli, bytes, bytes);
1563
1564 out:
1565         return rc;
1566 }
1567
1568 static int ocw_granted(struct client_obd *cli, struct osc_cache_waiter *ocw)
1569 {
1570         int rc;
1571         spin_lock(&cli->cl_loi_list_lock);
1572         rc = list_empty(&ocw->ocw_entry);
1573         spin_unlock(&cli->cl_loi_list_lock);
1574         return rc;
1575 }
1576
1577 /**
1578  * The main entry to reserve dirty page accounting. Usually the grant reserved
1579  * in this function will be freed in bulk in osc_free_grant() unless it fails
1580  * to add osc cache, in that case, it will be freed in osc_exit_cache().
1581  *
1582  * The process will be put into sleep if it's already run out of grant.
1583  */
1584 static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli,
1585                            struct osc_async_page *oap, int bytes)
1586 {
1587         struct osc_object       *osc = oap->oap_obj;
1588         struct lov_oinfo        *loi = osc->oo_oinfo;
1589         struct osc_cache_waiter  ocw;
1590         int                      rc = -EDQUOT;
1591         ENTRY;
1592
1593         OSC_DUMP_GRANT(D_CACHE, cli, "need:%d\n", bytes);
1594
1595         spin_lock(&cli->cl_loi_list_lock);
1596
1597         /* force the caller to try sync io.  this can jump the list
1598          * of queued writes and create a discontiguous rpc stream */
1599         if (OBD_FAIL_CHECK(OBD_FAIL_OSC_NO_GRANT) ||
1600             cli->cl_dirty_max_pages == 0 ||
1601             cli->cl_ar.ar_force_sync || loi->loi_ar.ar_force_sync) {
1602                 OSC_DUMP_GRANT(D_CACHE, cli, "forced sync i/o\n");
1603                 GOTO(out, rc = -EDQUOT);
1604         }
1605
1606         /* Hopefully normal case - cache space and write credits available */
1607         if (list_empty(&cli->cl_cache_waiters) &&
1608             osc_enter_cache_try(cli, oap, bytes)) {
1609                 OSC_DUMP_GRANT(D_CACHE, cli, "granted from cache\n");
1610                 GOTO(out, rc = 0);
1611         }
1612
1613         /* We can get here for two reasons: too many dirty pages in cache, or
1614          * run out of grants. In both cases we should write dirty pages out.
1615          * Adding a cache waiter will trigger urgent write-out no matter what
1616          * RPC size will be.
1617          * The exiting condition is no avail grants and no dirty pages caching,
1618          * that really means there is no space on the OST. */
1619         init_waitqueue_head(&ocw.ocw_waitq);
1620         ocw.ocw_oap   = oap;
1621         ocw.ocw_grant = bytes;
1622         while (cli->cl_dirty_pages > 0 || cli->cl_w_in_flight > 0) {
1623                 list_add_tail(&ocw.ocw_entry, &cli->cl_cache_waiters);
1624                 ocw.ocw_rc = 0;
1625                 spin_unlock(&cli->cl_loi_list_lock);
1626
1627                 osc_io_unplug_async(env, cli, NULL);
1628
1629                 CDEBUG(D_CACHE, "%s: sleeping for cache space @ %p for %p\n",
1630                        cli_name(cli), &ocw, oap);
1631
1632                 rc = wait_event_idle_timeout(ocw.ocw_waitq,
1633                                              ocw_granted(cli, &ocw),
1634                                              cfs_time_seconds(AT_OFF ?
1635                                                               obd_timeout :
1636                                                               at_max));
1637
1638                 spin_lock(&cli->cl_loi_list_lock);
1639
1640                 if (rc <= 0) {
1641                         /* l_wait_event is interrupted by signal or timed out */
1642                         list_del_init(&ocw.ocw_entry);
1643                         if (rc == 0)
1644                                 rc = -ETIMEDOUT;
1645                         break;
1646                 }
1647                 LASSERT(list_empty(&ocw.ocw_entry));
1648                 rc = ocw.ocw_rc;
1649
1650                 if (rc != -EDQUOT)
1651                         break;
1652                 if (osc_enter_cache_try(cli, oap, bytes)) {
1653                         rc = 0;
1654                         break;
1655                 }
1656         }
1657
1658         switch (rc) {
1659         case 0:
1660                 OSC_DUMP_GRANT(D_CACHE, cli, "finally got grant space\n");
1661                 break;
1662         case -ETIMEDOUT:
1663                 OSC_DUMP_GRANT(D_CACHE, cli,
1664                                "timeout, fall back to sync i/o\n");
1665                 osc_extent_tree_dump(D_CACHE, osc);
1666                 /* fall back to synchronous I/O */
1667                 rc = -EDQUOT;
1668                 break;
1669         case -EINTR:
1670                 /* Ensures restartability - LU-3581 */
1671                 OSC_DUMP_GRANT(D_CACHE, cli, "interrupted\n");
1672                 rc = -ERESTARTSYS;
1673                 break;
1674         case -EDQUOT:
1675                 OSC_DUMP_GRANT(D_CACHE, cli,
1676                                "no grant space, fall back to sync i/o\n");
1677                 break;
1678         default:
1679                 CDEBUG(D_CACHE, "%s: event for cache space @ %p never arrived "
1680                        "due to %d, fall back to sync i/o\n",
1681                        cli_name(cli), &ocw, rc);
1682                 break;
1683         }
1684         EXIT;
1685 out:
1686         spin_unlock(&cli->cl_loi_list_lock);
1687         RETURN(rc);
1688 }
1689
1690 /* caller must hold loi_list_lock */
1691 void osc_wake_cache_waiters(struct client_obd *cli)
1692 {
1693         struct list_head *l, *tmp;
1694         struct osc_cache_waiter *ocw;
1695
1696         ENTRY;
1697         list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
1698                 ocw = list_entry(l, struct osc_cache_waiter, ocw_entry);
1699
1700                 ocw->ocw_rc = -EDQUOT;
1701
1702                 if (osc_enter_cache_try(cli, ocw->ocw_oap, ocw->ocw_grant))
1703                         ocw->ocw_rc = 0;
1704
1705                 if (ocw->ocw_rc == 0 ||
1706                     !(cli->cl_dirty_pages > 0 || cli->cl_w_in_flight > 0)) {
1707                         list_del_init(&ocw->ocw_entry);
1708                         CDEBUG(D_CACHE, "wake up %p for oap %p, avail grant "
1709                                "%ld, %d\n", ocw, ocw->ocw_oap,
1710                                cli->cl_avail_grant, ocw->ocw_rc);
1711
1712                         wake_up(&ocw->ocw_waitq);
1713                 }
1714         }
1715
1716         EXIT;
1717 }
1718 EXPORT_SYMBOL(osc_wake_cache_waiters);
1719
1720 static int osc_max_rpc_in_flight(struct client_obd *cli, struct osc_object *osc)
1721 {
1722         int hprpc = !!list_empty(&osc->oo_hp_exts);
1723         return rpcs_in_flight(cli) >= cli->cl_max_rpcs_in_flight + hprpc;
1724 }
1725
1726 /* This maintains the lists of pending pages to read/write for a given object
1727  * (lop).  This is used by osc_check_rpcs->osc_next_obj() and osc_list_maint()
1728  * to quickly find objects that are ready to send an RPC. */
1729 static int osc_makes_rpc(struct client_obd *cli, struct osc_object *osc,
1730                          int cmd)
1731 {
1732         int invalid_import = 0;
1733         ENTRY;
1734
1735         /* if we have an invalid import we want to drain the queued pages
1736          * by forcing them through rpcs that immediately fail and complete
1737          * the pages.  recovery relies on this to empty the queued pages
1738          * before canceling the locks and evicting down the llite pages */
1739         if ((cli->cl_import == NULL || cli->cl_import->imp_invalid))
1740                 invalid_import = 1;
1741
1742         if (cmd & OBD_BRW_WRITE) {
1743                 if (atomic_read(&osc->oo_nr_writes) == 0)
1744                         RETURN(0);
1745                 if (invalid_import) {
1746                         CDEBUG(D_CACHE, "invalid import forcing RPC\n");
1747                         RETURN(1);
1748                 }
1749                 if (!list_empty(&osc->oo_hp_exts)) {
1750                         CDEBUG(D_CACHE, "high prio request forcing RPC\n");
1751                         RETURN(1);
1752                 }
1753                 if (!list_empty(&osc->oo_urgent_exts)) {
1754                         CDEBUG(D_CACHE, "urgent request forcing RPC\n");
1755                         RETURN(1);
1756                 }
1757                 /* trigger a write rpc stream as long as there are dirtiers
1758                  * waiting for space.  as they're waiting, they're not going to
1759                  * create more pages to coalesce with what's waiting.. */
1760                 if (!list_empty(&cli->cl_cache_waiters)) {
1761                         CDEBUG(D_CACHE, "cache waiters forcing RPC\n");
1762                         RETURN(1);
1763                 }
1764                 if (!list_empty(&osc->oo_full_exts)) {
1765                         CDEBUG(D_CACHE, "full extent ready, make an RPC\n");
1766                         RETURN(1);
1767                 }
1768         } else {
1769                 if (atomic_read(&osc->oo_nr_reads) == 0)
1770                         RETURN(0);
1771                 if (invalid_import) {
1772                         CDEBUG(D_CACHE, "invalid import forcing RPC\n");
1773                         RETURN(1);
1774                 }
1775                 /* all read are urgent. */
1776                 if (!list_empty(&osc->oo_reading_exts))
1777                         RETURN(1);
1778         }
1779
1780         RETURN(0);
1781 }
1782
1783 static void osc_update_pending(struct osc_object *obj, int cmd, int delta)
1784 {
1785         struct client_obd *cli = osc_cli(obj);
1786         if (cmd & OBD_BRW_WRITE) {
1787                 atomic_add(delta, &obj->oo_nr_writes);
1788                 atomic_add(delta, &cli->cl_pending_w_pages);
1789                 LASSERT(atomic_read(&obj->oo_nr_writes) >= 0);
1790         } else {
1791                 atomic_add(delta, &obj->oo_nr_reads);
1792                 atomic_add(delta, &cli->cl_pending_r_pages);
1793                 LASSERT(atomic_read(&obj->oo_nr_reads) >= 0);
1794         }
1795         OSC_IO_DEBUG(obj, "update pending cmd %d delta %d.\n", cmd, delta);
1796 }
1797
1798 static int osc_makes_hprpc(struct osc_object *obj)
1799 {
1800         return !list_empty(&obj->oo_hp_exts);
1801 }
1802
1803 static void on_list(struct list_head *item, struct list_head *list,
1804                     int should_be_on)
1805 {
1806         if (list_empty(item) && should_be_on)
1807                 list_add_tail(item, list);
1808         else if (!list_empty(item) && !should_be_on)
1809                 list_del_init(item);
1810 }
1811
1812 /* maintain the osc's cli list membership invariants so that osc_send_oap_rpc
1813  * can find pages to build into rpcs quickly */
1814 static int __osc_list_maint(struct client_obd *cli, struct osc_object *osc)
1815 {
1816         if (osc_makes_hprpc(osc)) {
1817                 /* HP rpc */
1818                 on_list(&osc->oo_ready_item, &cli->cl_loi_ready_list, 0);
1819                 on_list(&osc->oo_hp_ready_item, &cli->cl_loi_hp_ready_list, 1);
1820         } else {
1821                 on_list(&osc->oo_hp_ready_item, &cli->cl_loi_hp_ready_list, 0);
1822                 on_list(&osc->oo_ready_item, &cli->cl_loi_ready_list,
1823                         osc_makes_rpc(cli, osc, OBD_BRW_WRITE) ||
1824                         osc_makes_rpc(cli, osc, OBD_BRW_READ));
1825         }
1826
1827         on_list(&osc->oo_write_item, &cli->cl_loi_write_list,
1828                 atomic_read(&osc->oo_nr_writes) > 0);
1829
1830         on_list(&osc->oo_read_item, &cli->cl_loi_read_list,
1831                 atomic_read(&osc->oo_nr_reads) > 0);
1832
1833         return osc_is_ready(osc);
1834 }
1835
1836 static int osc_list_maint(struct client_obd *cli, struct osc_object *osc)
1837 {
1838         int is_ready;
1839
1840         spin_lock(&cli->cl_loi_list_lock);
1841         is_ready = __osc_list_maint(cli, osc);
1842         spin_unlock(&cli->cl_loi_list_lock);
1843
1844         return is_ready;
1845 }
1846
1847 /* this is trying to propogate async writeback errors back up to the
1848  * application.  As an async write fails we record the error code for later if
1849  * the app does an fsync.  As long as errors persist we force future rpcs to be
1850  * sync so that the app can get a sync error and break the cycle of queueing
1851  * pages for which writeback will fail. */
1852 static void osc_process_ar(struct osc_async_rc *ar, __u64 xid,
1853                            int rc)
1854 {
1855         if (rc) {
1856                 if (!ar->ar_rc)
1857                         ar->ar_rc = rc;
1858
1859                 ar->ar_force_sync = 1;
1860                 ar->ar_min_xid = ptlrpc_sample_next_xid();
1861                 return;
1862
1863         }
1864
1865         if (ar->ar_force_sync && (xid >= ar->ar_min_xid))
1866                 ar->ar_force_sync = 0;
1867 }
1868
1869 /* this must be called holding the loi list lock to give coverage to exit_cache,
1870  * async_flag maintenance, and oap_request */
1871 static void osc_ap_completion(const struct lu_env *env, struct client_obd *cli,
1872                               struct osc_async_page *oap, int sent, int rc)
1873 {
1874         struct osc_object *osc = oap->oap_obj;
1875         struct lov_oinfo  *loi = osc->oo_oinfo;
1876         __u64 xid = 0;
1877
1878         ENTRY;
1879         if (oap->oap_request != NULL) {
1880                 xid = ptlrpc_req_xid(oap->oap_request);
1881                 ptlrpc_req_finished(oap->oap_request);
1882                 oap->oap_request = NULL;
1883         }
1884
1885         /* As the transfer for this page is being done, clear the flags */
1886         spin_lock(&oap->oap_lock);
1887         oap->oap_async_flags = 0;
1888         spin_unlock(&oap->oap_lock);
1889         oap->oap_interrupted = 0;
1890
1891         if (oap->oap_cmd & OBD_BRW_WRITE && xid > 0) {
1892                 spin_lock(&cli->cl_loi_list_lock);
1893                 osc_process_ar(&cli->cl_ar, xid, rc);
1894                 osc_process_ar(&loi->loi_ar, xid, rc);
1895                 spin_unlock(&cli->cl_loi_list_lock);
1896         }
1897
1898         rc = osc_completion(env, oap, oap->oap_cmd, rc);
1899         if (rc)
1900                 CERROR("completion on oap %p obj %p returns %d.\n",
1901                        oap, osc, rc);
1902
1903         EXIT;
1904 }
1905
1906 struct extent_rpc_data {
1907         struct list_head        *erd_rpc_list;
1908         unsigned int            erd_page_count;
1909         unsigned int            erd_max_pages;
1910         unsigned int            erd_max_chunks;
1911         unsigned int            erd_max_extents;
1912 };
1913
1914 static inline unsigned osc_extent_chunks(const struct osc_extent *ext)
1915 {
1916         struct client_obd *cli = osc_cli(ext->oe_obj);
1917         unsigned ppc_bits = cli->cl_chunkbits - PAGE_SHIFT;
1918
1919         return (ext->oe_end >> ppc_bits) - (ext->oe_start >> ppc_bits) + 1;
1920 }
1921
1922 static inline bool
1923 can_merge(const struct osc_extent *ext, const struct osc_extent *in_rpc)
1924 {
1925         if (ext->oe_no_merge || in_rpc->oe_no_merge)
1926                 return false;
1927
1928         if (ext->oe_srvlock != in_rpc->oe_srvlock)
1929                 return false;
1930
1931         if (ext->oe_ndelay != in_rpc->oe_ndelay)
1932                 return false;
1933
1934         if (!ext->oe_grants != !in_rpc->oe_grants)
1935                 return false;
1936
1937         if (ext->oe_dio != in_rpc->oe_dio)
1938                 return false;
1939
1940         /* It's possible to have overlap on DIO */
1941         if (in_rpc->oe_dio && overlapped(ext, in_rpc))
1942                 return false;
1943
1944         return true;
1945 }
1946
1947 /**
1948  * Try to add extent to one RPC. We need to think about the following things:
1949  * - # of pages must not be over max_pages_per_rpc
1950  * - extent must be compatible with previous ones
1951  */
1952 static int try_to_add_extent_for_io(struct client_obd *cli,
1953                                     struct osc_extent *ext,
1954                                     struct extent_rpc_data *data)
1955 {
1956         struct osc_extent *tmp;
1957         unsigned int chunk_count;
1958         ENTRY;
1959
1960         EASSERT((ext->oe_state == OES_CACHE || ext->oe_state == OES_LOCK_DONE),
1961                 ext);
1962         OSC_EXTENT_DUMP(D_CACHE, ext, "trying to add this extent\n");
1963
1964         if (data->erd_max_extents == 0)
1965                 RETURN(0);
1966
1967         chunk_count = osc_extent_chunks(ext);
1968         EASSERTF(data->erd_page_count != 0 ||
1969                  chunk_count <= data->erd_max_chunks, ext,
1970                  "The first extent to be fit in a RPC contains %u chunks, "
1971                  "which is over the limit %u.\n", chunk_count,
1972                  data->erd_max_chunks);
1973         if (chunk_count > data->erd_max_chunks)
1974                 RETURN(0);
1975
1976         data->erd_max_pages = max(ext->oe_mppr, data->erd_max_pages);
1977         EASSERTF(data->erd_page_count != 0 ||
1978                 ext->oe_nr_pages <= data->erd_max_pages, ext,
1979                 "The first extent to be fit in a RPC contains %u pages, "
1980                 "which is over the limit %u.\n", ext->oe_nr_pages,
1981                 data->erd_max_pages);
1982         if (data->erd_page_count + ext->oe_nr_pages > data->erd_max_pages)
1983                 RETURN(0);
1984
1985         list_for_each_entry(tmp, data->erd_rpc_list, oe_link) {
1986                 EASSERT(tmp->oe_owner == current, tmp);
1987
1988                 if (!can_merge(ext, tmp))
1989                         RETURN(0);
1990         }
1991
1992         data->erd_max_extents--;
1993         data->erd_max_chunks -= chunk_count;
1994         data->erd_page_count += ext->oe_nr_pages;
1995         list_move_tail(&ext->oe_link, data->erd_rpc_list);
1996         ext->oe_owner = current;
1997         RETURN(1);
1998 }
1999
2000 /**
2001  * In order to prevent multiple ptlrpcd from breaking contiguous extents,
2002  * get_write_extent() takes all appropriate extents in atomic.
2003  *
2004  * The following policy is used to collect extents for IO:
2005  * 1. Add as many HP extents as possible;
2006  * 2. Add the first urgent extent in urgent extent list and take it out of
2007  *    urgent list;
2008  * 3. Add subsequent extents of this urgent extent;
2009  * 4. If urgent list is not empty, goto 2;
2010  * 5. Traverse the extent tree from the 1st extent;
2011  * 6. Above steps exit if there is no space in this RPC.
2012  */
2013 static unsigned int get_write_extents(struct osc_object *obj,
2014                                       struct list_head *rpclist)
2015 {
2016         struct client_obd *cli = osc_cli(obj);
2017         struct osc_extent *ext;
2018         struct extent_rpc_data data = {
2019                 .erd_rpc_list   = rpclist,
2020                 .erd_page_count = 0,
2021                 .erd_max_pages  = cli->cl_max_pages_per_rpc,
2022                 .erd_max_chunks = osc_max_write_chunks(cli),
2023                 .erd_max_extents = 256,
2024         };
2025
2026         LASSERT(osc_object_is_locked(obj));
2027         while (!list_empty(&obj->oo_hp_exts)) {
2028                 ext = list_entry(obj->oo_hp_exts.next, struct osc_extent,
2029                                  oe_link);
2030                 LASSERT(ext->oe_state == OES_CACHE);
2031                 if (!try_to_add_extent_for_io(cli, ext, &data))
2032                         return data.erd_page_count;
2033                 EASSERT(ext->oe_nr_pages <= data.erd_max_pages, ext);
2034         }
2035         if (data.erd_page_count == data.erd_max_pages)
2036                 return data.erd_page_count;
2037
2038         while (!list_empty(&obj->oo_urgent_exts)) {
2039                 ext = list_entry(obj->oo_urgent_exts.next,
2040                                  struct osc_extent, oe_link);
2041                 if (!try_to_add_extent_for_io(cli, ext, &data))
2042                         return data.erd_page_count;
2043         }
2044         if (data.erd_page_count == data.erd_max_pages)
2045                 return data.erd_page_count;
2046
2047         /* One key difference between full extents and other extents: full
2048          * extents can usually only be added if the rpclist was empty, so if we
2049          * can't add one, we continue on to trying to add normal extents.  This
2050          * is so we don't miss adding extra extents to an RPC containing high
2051          * priority or urgent extents. */
2052         while (!list_empty(&obj->oo_full_exts)) {
2053                 ext = list_entry(obj->oo_full_exts.next,
2054                                  struct osc_extent, oe_link);
2055                 if (!try_to_add_extent_for_io(cli, ext, &data))
2056                         break;
2057         }
2058         if (data.erd_page_count == data.erd_max_pages)
2059                 return data.erd_page_count;
2060
2061         ext = first_extent(obj);
2062         while (ext != NULL) {
2063                 if ((ext->oe_state != OES_CACHE) ||
2064                     /* this extent may be already in current rpclist */
2065                     (!list_empty(&ext->oe_link) && ext->oe_owner != NULL)) {
2066                         ext = next_extent(ext);
2067                         continue;
2068                 }
2069
2070                 if (!try_to_add_extent_for_io(cli, ext, &data))
2071                         return data.erd_page_count;
2072
2073                 ext = next_extent(ext);
2074         }
2075         return data.erd_page_count;
2076 }
2077
2078 static int
2079 osc_send_write_rpc(const struct lu_env *env, struct client_obd *cli,
2080                    struct osc_object *osc)
2081 __must_hold(osc)
2082 {
2083         LIST_HEAD(rpclist);
2084         struct osc_extent *ext;
2085         struct osc_extent *tmp;
2086         struct osc_extent *first = NULL;
2087         unsigned int page_count = 0;
2088         int srvlock = 0;
2089         int rc = 0;
2090         ENTRY;
2091
2092         LASSERT(osc_object_is_locked(osc));
2093
2094         page_count = get_write_extents(osc, &rpclist);
2095         LASSERT(equi(page_count == 0, list_empty(&rpclist)));
2096
2097         if (list_empty(&rpclist))
2098                 RETURN(0);
2099
2100         osc_update_pending(osc, OBD_BRW_WRITE, -page_count);
2101
2102         list_for_each_entry(ext, &rpclist, oe_link) {
2103                 LASSERT(ext->oe_state == OES_CACHE ||
2104                         ext->oe_state == OES_LOCK_DONE);
2105                 if (ext->oe_state == OES_CACHE)
2106                         osc_extent_state_set(ext, OES_LOCKING);
2107                 else
2108                         osc_extent_state_set(ext, OES_RPC);
2109         }
2110
2111         /* we're going to grab page lock, so release object lock because
2112          * lock order is page lock -> object lock. */
2113         osc_object_unlock(osc);
2114
2115         list_for_each_entry_safe(ext, tmp, &rpclist, oe_link) {
2116                 if (ext->oe_state == OES_LOCKING) {
2117                         rc = osc_extent_make_ready(env, ext);
2118                         if (unlikely(rc < 0)) {
2119                                 list_del_init(&ext->oe_link);
2120                                 osc_extent_finish(env, ext, 0, rc);
2121                                 continue;
2122                         }
2123                 }
2124                 if (first == NULL) {
2125                         first = ext;
2126                         srvlock = ext->oe_srvlock;
2127                 } else {
2128                         LASSERT(srvlock == ext->oe_srvlock);
2129                 }
2130         }
2131
2132         if (!list_empty(&rpclist)) {
2133                 LASSERT(page_count > 0);
2134                 rc = osc_build_rpc(env, cli, &rpclist, OBD_BRW_WRITE);
2135                 LASSERT(list_empty(&rpclist));
2136         }
2137
2138         osc_object_lock(osc);
2139         RETURN(rc);
2140 }
2141
2142 /**
2143  * prepare pages for ASYNC io and put pages in send queue.
2144  *
2145  * \param cmd OBD_BRW_* macroses
2146  * \param lop pending pages
2147  *
2148  * \return zero if no page added to send queue.
2149  * \return 1 if pages successfully added to send queue.
2150  * \return negative on errors.
2151  */
2152 static int
2153 osc_send_read_rpc(const struct lu_env *env, struct client_obd *cli,
2154                   struct osc_object *osc)
2155 __must_hold(osc)
2156 {
2157         struct osc_extent *ext;
2158         struct osc_extent *next;
2159         LIST_HEAD(rpclist);
2160         struct extent_rpc_data data = {
2161                 .erd_rpc_list   = &rpclist,
2162                 .erd_page_count = 0,
2163                 .erd_max_pages  = cli->cl_max_pages_per_rpc,
2164                 .erd_max_chunks = UINT_MAX,
2165                 .erd_max_extents = UINT_MAX,
2166         };
2167         int rc = 0;
2168         ENTRY;
2169
2170         LASSERT(osc_object_is_locked(osc));
2171         list_for_each_entry_safe(ext, next, &osc->oo_reading_exts, oe_link) {
2172                 EASSERT(ext->oe_state == OES_LOCK_DONE, ext);
2173                 if (!try_to_add_extent_for_io(cli, ext, &data))
2174                         break;
2175                 osc_extent_state_set(ext, OES_RPC);
2176                 EASSERT(ext->oe_nr_pages <= data.erd_max_pages, ext);
2177         }
2178         LASSERT(data.erd_page_count <= data.erd_max_pages);
2179
2180         osc_update_pending(osc, OBD_BRW_READ, -data.erd_page_count);
2181
2182         if (!list_empty(&rpclist)) {
2183                 osc_object_unlock(osc);
2184
2185                 rc = osc_build_rpc(env, cli, &rpclist, OBD_BRW_READ);
2186                 LASSERT(list_empty(&rpclist));
2187
2188                 osc_object_lock(osc);
2189         }
2190         RETURN(rc);
2191 }
2192
2193 #define list_to_obj(list, item) ({                                            \
2194         struct list_head *__tmp = (list)->next;                               \
2195         list_del_init(__tmp);                                         \
2196         list_entry(__tmp, struct osc_object, oo_##item);                      \
2197 })
2198
2199 /* This is called by osc_check_rpcs() to find which objects have pages that
2200  * we could be sending.  These lists are maintained by osc_makes_rpc(). */
2201 static struct osc_object *osc_next_obj(struct client_obd *cli)
2202 {
2203         ENTRY;
2204
2205         /* First return objects that have blocked locks so that they
2206          * will be flushed quickly and other clients can get the lock,
2207          * then objects which have pages ready to be stuffed into RPCs */
2208         if (!list_empty(&cli->cl_loi_hp_ready_list))
2209                 RETURN(list_to_obj(&cli->cl_loi_hp_ready_list, hp_ready_item));
2210         if (!list_empty(&cli->cl_loi_ready_list))
2211                 RETURN(list_to_obj(&cli->cl_loi_ready_list, ready_item));
2212
2213         /* then if we have cache waiters, return all objects with queued
2214          * writes.  This is especially important when many small files
2215          * have filled up the cache and not been fired into rpcs because
2216          * they don't pass the nr_pending/object threshhold */
2217         if (!list_empty(&cli->cl_cache_waiters) &&
2218             !list_empty(&cli->cl_loi_write_list))
2219                 RETURN(list_to_obj(&cli->cl_loi_write_list, write_item));
2220
2221         /* then return all queued objects when we have an invalid import
2222          * so that they get flushed */
2223         if (cli->cl_import == NULL || cli->cl_import->imp_invalid) {
2224                 if (!list_empty(&cli->cl_loi_write_list))
2225                         RETURN(list_to_obj(&cli->cl_loi_write_list,
2226                                            write_item));
2227                 if (!list_empty(&cli->cl_loi_read_list))
2228                         RETURN(list_to_obj(&cli->cl_loi_read_list,
2229                                            read_item));
2230         }
2231         RETURN(NULL);
2232 }
2233
2234 /* called with the loi list lock held */
2235 static void osc_check_rpcs(const struct lu_env *env, struct client_obd *cli)
2236 __must_hold(&cli->cl_loi_list_lock)
2237 {
2238         struct osc_object *osc;
2239         int rc = 0;
2240         ENTRY;
2241
2242         while ((osc = osc_next_obj(cli)) != NULL) {
2243                 struct cl_object *obj = osc2cl(osc);
2244                 struct lu_ref_link link;
2245
2246                 OSC_IO_DEBUG(osc, "%lu in flight\n", rpcs_in_flight(cli));
2247
2248                 if (osc_max_rpc_in_flight(cli, osc)) {
2249                         __osc_list_maint(cli, osc);
2250                         break;
2251                 }
2252
2253                 cl_object_get(obj);
2254                 spin_unlock(&cli->cl_loi_list_lock);
2255                 lu_object_ref_add_at(&obj->co_lu, &link, "check", current);
2256
2257                 /* attempt some read/write balancing by alternating between
2258                  * reads and writes in an object.  The makes_rpc checks here
2259                  * would be redundant if we were getting read/write work items
2260                  * instead of objects.  we don't want send_oap_rpc to drain a
2261                  * partial read pending queue when we're given this object to
2262                  * do io on writes while there are cache waiters */
2263                 osc_object_lock(osc);
2264                 if (osc_makes_rpc(cli, osc, OBD_BRW_WRITE)) {
2265                         rc = osc_send_write_rpc(env, cli, osc);
2266                         if (rc < 0) {
2267                                 CERROR("Write request failed with %d\n", rc);
2268
2269                                 /* osc_send_write_rpc failed, mostly because of
2270                                  * memory pressure.
2271                                  *
2272                                  * It can't break here, because if:
2273                                  *  - a page was submitted by osc_io_submit, so
2274                                  *    page locked;
2275                                  *  - no request in flight
2276                                  *  - no subsequent request
2277                                  * The system will be in live-lock state,
2278                                  * because there is no chance to call
2279                                  * osc_io_unplug() and osc_check_rpcs() any
2280                                  * more. pdflush can't help in this case,
2281                                  * because it might be blocked at grabbing
2282                                  * the page lock as we mentioned.
2283                                  *
2284                                  * Anyway, continue to drain pages. */
2285                                 /* break; */
2286                         }
2287                 }
2288                 if (osc_makes_rpc(cli, osc, OBD_BRW_READ)) {
2289                         rc = osc_send_read_rpc(env, cli, osc);
2290                         if (rc < 0)
2291                                 CERROR("Read request failed with %d\n", rc);
2292                 }
2293                 osc_object_unlock(osc);
2294
2295                 osc_list_maint(cli, osc);
2296                 lu_object_ref_del_at(&obj->co_lu, &link, "check", current);
2297                 cl_object_put(env, obj);
2298
2299                 spin_lock(&cli->cl_loi_list_lock);
2300         }
2301 }
2302
2303 int osc_io_unplug0(const struct lu_env *env, struct client_obd *cli,
2304                    struct osc_object *osc, int async)
2305 {
2306         int rc = 0;
2307
2308         if (osc != NULL && osc_list_maint(cli, osc) == 0)
2309                 return 0;
2310
2311         if (!async) {
2312                 spin_lock(&cli->cl_loi_list_lock);
2313                 osc_check_rpcs(env, cli);
2314                 spin_unlock(&cli->cl_loi_list_lock);
2315         } else {
2316                 CDEBUG(D_CACHE, "Queue writeback work for client %p.\n", cli);
2317                 LASSERT(cli->cl_writeback_work != NULL);
2318                 rc = ptlrpcd_queue_work(cli->cl_writeback_work);
2319         }
2320         return rc;
2321 }
2322 EXPORT_SYMBOL(osc_io_unplug0);
2323
2324 int osc_prep_async_page(struct osc_object *osc, struct osc_page *ops,
2325                         struct page *page, loff_t offset)
2326 {
2327         struct obd_export     *exp = osc_export(osc);
2328         struct osc_async_page *oap = &ops->ops_oap;
2329         ENTRY;
2330
2331         if (!page)
2332                 return cfs_size_round(sizeof(*oap));
2333
2334         oap->oap_magic = OAP_MAGIC;
2335         oap->oap_cli = &exp->exp_obd->u.cli;
2336         oap->oap_obj = osc;
2337
2338         oap->oap_page = page;
2339         oap->oap_obj_off = offset;
2340         LASSERT(!(offset & ~PAGE_MASK));
2341
2342         INIT_LIST_HEAD(&oap->oap_pending_item);
2343         INIT_LIST_HEAD(&oap->oap_rpc_item);
2344
2345         spin_lock_init(&oap->oap_lock);
2346         CDEBUG(D_INFO, "oap %p page %p obj off %llu\n",
2347                oap, page, oap->oap_obj_off);
2348         RETURN(0);
2349 }
2350 EXPORT_SYMBOL(osc_prep_async_page);
2351
2352 int osc_queue_async_io(const struct lu_env *env, struct cl_io *io,
2353                        struct osc_page *ops, cl_commit_cbt cb)
2354 {
2355         struct osc_io *oio = osc_env_io(env);
2356         struct osc_extent     *ext = NULL;
2357         struct osc_async_page *oap = &ops->ops_oap;
2358         struct client_obd     *cli = oap->oap_cli;
2359         struct osc_object     *osc = oap->oap_obj;
2360         struct pagevec        *pvec = &osc_env_info(env)->oti_pagevec;
2361         pgoff_t index;
2362         unsigned int tmp;
2363         unsigned int grants = 0;
2364         u32    brw_flags = OBD_BRW_ASYNC;
2365         int    cmd = OBD_BRW_WRITE;
2366         int    need_release = 0;
2367         int    rc = 0;
2368         ENTRY;
2369
2370         if (oap->oap_magic != OAP_MAGIC)
2371                 RETURN(-EINVAL);
2372
2373         if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
2374                 RETURN(-EIO);
2375
2376         if (!list_empty(&oap->oap_pending_item) ||
2377             !list_empty(&oap->oap_rpc_item))
2378                 RETURN(-EBUSY);
2379
2380         /* Set the OBD_BRW_SRVLOCK before the page is queued. */
2381         brw_flags |= ops->ops_srvlock ? OBD_BRW_SRVLOCK : 0;
2382         if (oio->oi_cap_sys_resource) {
2383                 brw_flags |= OBD_BRW_NOQUOTA;
2384                 cmd |= OBD_BRW_NOQUOTA;
2385         }
2386
2387         /* check if the file's owner/group is over quota */
2388         if (!(cmd & OBD_BRW_NOQUOTA)) {
2389                 struct cl_object *obj;
2390                 struct cl_attr   *attr;
2391                 unsigned int qid[LL_MAXQUOTAS];
2392
2393                 obj = cl_object_top(&osc->oo_cl);
2394                 attr = &osc_env_info(env)->oti_attr;
2395
2396                 cl_object_attr_lock(obj);
2397                 rc = cl_object_attr_get(env, obj, attr);
2398                 cl_object_attr_unlock(obj);
2399
2400                 qid[USRQUOTA] = attr->cat_uid;
2401                 qid[GRPQUOTA] = attr->cat_gid;
2402                 qid[PRJQUOTA] = attr->cat_projid;
2403                 if (rc == 0 && osc_quota_chkdq(cli, qid) == NO_QUOTA)
2404                         rc = -EDQUOT;
2405                 if (rc)
2406                         RETURN(rc);
2407         }
2408
2409         oap->oap_cmd = cmd;
2410         oap->oap_page_off = ops->ops_from;
2411         oap->oap_count = ops->ops_to - ops->ops_from;
2412         /* No need to hold a lock here,
2413          * since this page is not in any list yet. */
2414         oap->oap_async_flags = 0;
2415         oap->oap_brw_flags = brw_flags;
2416
2417         OSC_IO_DEBUG(osc, "oap %p page %p added for cmd %d\n",
2418                      oap, oap->oap_page, oap->oap_cmd & OBD_BRW_RWMASK);
2419
2420         index = osc_index(oap2osc(oap));
2421
2422         /* Add this page into extent by the following steps:
2423          * 1. if there exists an active extent for this IO, mostly this page
2424          *    can be added to the active extent and sometimes we need to
2425          *    expand extent to accomodate this page;
2426          * 2. otherwise, a new extent will be allocated. */
2427
2428         ext = oio->oi_active;
2429         if (ext != NULL && ext->oe_start <= index && ext->oe_max_end >= index) {
2430                 /* one chunk plus extent overhead must be enough to write this
2431                  * page */
2432                 grants = (1 << cli->cl_chunkbits) + cli->cl_grant_extent_tax;
2433                 if (ext->oe_end >= index)
2434                         grants = 0;
2435
2436                 /* it doesn't need any grant to dirty this page */
2437                 spin_lock(&cli->cl_loi_list_lock);
2438                 rc = osc_enter_cache_try(cli, oap, grants);
2439                 if (rc == 0) { /* try failed */
2440                         grants = 0;
2441                         need_release = 1;
2442                 } else if (ext->oe_end < index) {
2443                         tmp = grants;
2444                         /* try to expand this extent */
2445                         rc = osc_extent_expand(ext, index, &tmp);
2446                         if (rc < 0) {
2447                                 need_release = 1;
2448                                 /* don't free reserved grant */
2449                         } else {
2450                                 OSC_EXTENT_DUMP(D_CACHE, ext,
2451                                                 "expanded for %lu.\n", index);
2452                                 osc_unreserve_grant_nolock(cli, grants, tmp);
2453                                 grants = 0;
2454                         }
2455                 }
2456                 spin_unlock(&cli->cl_loi_list_lock);
2457                 rc = 0;
2458         } else if (ext != NULL) {
2459                 /* index is located outside of active extent */
2460                 need_release = 1;
2461         }
2462         if (need_release) {
2463                 osc_extent_release(env, ext);
2464                 oio->oi_active = NULL;
2465                 ext = NULL;
2466         }
2467
2468         if (ext == NULL) {
2469                 tmp = (1 << cli->cl_chunkbits) + cli->cl_grant_extent_tax;
2470
2471                 /* try to find new extent to cover this page */
2472                 LASSERT(oio->oi_active == NULL);
2473                 /* we may have allocated grant for this page if we failed
2474                  * to expand the previous active extent. */
2475                 LASSERT(ergo(grants > 0, grants >= tmp));
2476
2477                 rc = 0;
2478                 if (grants == 0) {
2479                         /* We haven't allocated grant for this page, and we
2480                          * must not hold a page lock while we do enter_cache,
2481                          * so we must mark dirty & unlock any pages in the
2482                          * write commit pagevec. */
2483                         if (pagevec_count(pvec)) {
2484                                 cb(env, io, pvec);
2485                                 pagevec_reinit(pvec);
2486                         }
2487                         rc = osc_enter_cache(env, cli, oap, tmp);
2488                         if (rc == 0)
2489                                 grants = tmp;
2490                 }
2491
2492                 tmp = grants;
2493                 if (rc == 0) {
2494                         ext = osc_extent_find(env, osc, index, &tmp);
2495                         if (IS_ERR(ext)) {
2496                                 LASSERT(tmp == grants);
2497                                 osc_exit_cache(cli, oap);
2498                                 rc = PTR_ERR(ext);
2499                                 ext = NULL;
2500                         } else {
2501                                 oio->oi_active = ext;
2502                         }
2503                 }
2504                 if (grants > 0)
2505                         osc_unreserve_grant(cli, grants, tmp);
2506         }
2507
2508         LASSERT(ergo(rc == 0, ext != NULL));
2509         if (ext != NULL) {
2510                 EASSERTF(ext->oe_end >= index && ext->oe_start <= index,
2511                          ext, "index = %lu.\n", index);
2512                 LASSERT((oap->oap_brw_flags & OBD_BRW_FROM_GRANT) != 0);
2513
2514                 osc_object_lock(osc);
2515                 if (ext->oe_nr_pages == 0)
2516                         ext->oe_srvlock = ops->ops_srvlock;
2517                 else
2518                         LASSERT(ext->oe_srvlock == ops->ops_srvlock);
2519                 ++ext->oe_nr_pages;
2520                 list_add_tail(&oap->oap_pending_item, &ext->oe_pages);
2521                 osc_object_unlock(osc);
2522
2523                 if (!ext->oe_layout_version)
2524                         ext->oe_layout_version = io->ci_layout_version;
2525         }
2526
2527         RETURN(rc);
2528 }
2529
2530 int osc_teardown_async_page(const struct lu_env *env,
2531                             struct osc_object *obj, struct osc_page *ops)
2532 {
2533         struct osc_async_page *oap = &ops->ops_oap;
2534         int rc = 0;
2535         ENTRY;
2536
2537         LASSERT(oap->oap_magic == OAP_MAGIC);
2538
2539         CDEBUG(D_INFO, "teardown oap %p page %p at index %lu.\n",
2540                oap, ops, osc_index(oap2osc(oap)));
2541
2542         if (!list_empty(&oap->oap_rpc_item)) {
2543                 CDEBUG(D_CACHE, "oap %p is not in cache.\n", oap);
2544                 rc = -EBUSY;
2545         } else if (!list_empty(&oap->oap_pending_item)) {
2546                 struct osc_extent *ext = NULL;
2547
2548                 osc_object_lock(obj);
2549                 ext = osc_extent_lookup(obj, osc_index(oap2osc(oap)));
2550                 osc_object_unlock(obj);
2551                 /* only truncated pages are allowed to be taken out.
2552                  * See osc_extent_truncate() and osc_cache_truncate_start()
2553                  * for details. */
2554                 if (ext != NULL && ext->oe_state != OES_TRUNC) {
2555                         OSC_EXTENT_DUMP(D_ERROR, ext, "trunc at %lu.\n",
2556                                         osc_index(oap2osc(oap)));
2557                         rc = -EBUSY;
2558                 }
2559                 if (ext != NULL)
2560                         osc_extent_put(env, ext);
2561         }
2562         RETURN(rc);
2563 }
2564
2565 /**
2566  * This is called when a page is picked up by kernel to write out.
2567  *
2568  * We should find out the corresponding extent and add the whole extent
2569  * into urgent list. The extent may be being truncated or used, handle it
2570  * carefully.
2571  */
2572 int osc_flush_async_page(const struct lu_env *env, struct cl_io *io,
2573                          struct osc_page *ops)
2574 {
2575         struct osc_extent *ext   = NULL;
2576         struct osc_object *obj   = cl2osc(ops->ops_cl.cpl_obj);
2577         struct cl_page    *cp    = ops->ops_cl.cpl_page;
2578         pgoff_t            index = osc_index(ops);
2579         struct osc_async_page *oap = &ops->ops_oap;
2580         bool unplug = false;
2581         int rc = 0;
2582         ENTRY;
2583
2584         osc_object_lock(obj);
2585         ext = osc_extent_lookup(obj, index);
2586         if (ext == NULL) {
2587                 osc_extent_tree_dump(D_ERROR, obj);
2588                 LASSERTF(0, "page index %lu is NOT covered.\n", index);
2589         }
2590
2591         switch (ext->oe_state) {
2592         case OES_RPC:
2593         case OES_LOCK_DONE:
2594                 CL_PAGE_DEBUG(D_ERROR, env, cp, "flush an in-rpc page?\n");
2595                 LASSERT(0);
2596                 break;
2597         case OES_LOCKING:
2598                 /* If we know this extent is being written out, we should abort
2599                  * so that the writer can make this page ready. Otherwise, there
2600                  * exists a deadlock problem because other process can wait for
2601                  * page writeback bit holding page lock; and meanwhile in
2602                  * vvp_page_make_ready(), we need to grab page lock before
2603                  * really sending the RPC. */
2604         case OES_TRUNC:
2605                 /* race with truncate, page will be redirtied */
2606         case OES_ACTIVE:
2607                 /* The extent is active so we need to abort and let the caller
2608                  * re-dirty the page. If we continued on here, and we were the
2609                  * one making the extent active, we could deadlock waiting for
2610                  * the page writeback to clear but it won't because the extent
2611                  * is active and won't be written out. */
2612                 GOTO(out, rc = -EAGAIN);
2613         default:
2614                 break;
2615         }
2616
2617         rc = cl_page_prep(env, io, cp, CRT_WRITE);
2618         if (rc)
2619                 GOTO(out, rc);
2620
2621         spin_lock(&oap->oap_lock);
2622         oap->oap_async_flags |= ASYNC_READY|ASYNC_URGENT;
2623         spin_unlock(&oap->oap_lock);
2624
2625         if (memory_pressure_get())
2626                 ext->oe_memalloc = 1;
2627
2628         ext->oe_urgent = 1;
2629         if (ext->oe_state == OES_CACHE) {
2630                 OSC_EXTENT_DUMP(D_CACHE, ext,
2631                                 "flush page %p make it urgent.\n", oap);
2632                 if (list_empty(&ext->oe_link))
2633                         list_add_tail(&ext->oe_link, &obj->oo_urgent_exts);
2634                 unplug = true;
2635         }
2636         rc = 0;
2637         EXIT;
2638
2639 out:
2640         osc_object_unlock(obj);
2641         osc_extent_put(env, ext);
2642         if (unplug)
2643                 osc_io_unplug_async(env, osc_cli(obj), obj);
2644         return rc;
2645 }
2646
2647 /**
2648  * this is called when a sync waiter receives an interruption.  Its job is to
2649  * get the caller woken as soon as possible.  If its page hasn't been put in an
2650  * rpc yet it can dequeue immediately.  Otherwise it has to mark the rpc as
2651  * desiring interruption which will forcefully complete the rpc once the rpc
2652  * has timed out.
2653  */
2654 int osc_cancel_async_page(const struct lu_env *env, struct osc_page *ops)
2655 {
2656         struct osc_async_page *oap = &ops->ops_oap;
2657         struct osc_object     *obj = oap->oap_obj;
2658         struct client_obd     *cli = osc_cli(obj);
2659         struct osc_extent     *ext;
2660         struct osc_extent     *found = NULL;
2661         struct list_head            *plist;
2662         pgoff_t index = osc_index(ops);
2663         int     rc = -EBUSY;
2664         int     cmd;
2665         ENTRY;
2666
2667         LASSERT(!oap->oap_interrupted);
2668         oap->oap_interrupted = 1;
2669
2670         /* Find out the caching extent */
2671         osc_object_lock(obj);
2672         if (oap->oap_cmd & OBD_BRW_WRITE) {
2673                 plist = &obj->oo_urgent_exts;
2674                 cmd   = OBD_BRW_WRITE;
2675         } else {
2676                 plist = &obj->oo_reading_exts;
2677                 cmd   = OBD_BRW_READ;
2678         }
2679         list_for_each_entry(ext, plist, oe_link) {
2680                 if (ext->oe_start <= index && ext->oe_end >= index) {
2681                         LASSERT(ext->oe_state == OES_LOCK_DONE);
2682                         /* For OES_LOCK_DONE state extent, it has already held
2683                          * a refcount for RPC. */
2684                         found = osc_extent_get(ext);
2685                         break;
2686                 }
2687         }
2688         if (found != NULL) {
2689                 list_del_init(&found->oe_link);
2690                 osc_update_pending(obj, cmd, -found->oe_nr_pages);
2691                 osc_object_unlock(obj);
2692
2693                 osc_extent_finish(env, found, 0, -EINTR);
2694                 osc_extent_put(env, found);
2695                 rc = 0;
2696         } else {
2697                 osc_object_unlock(obj);
2698                 /* ok, it's been put in an rpc. only one oap gets a request
2699                  * reference */
2700                 if (oap->oap_request != NULL) {
2701                         ptlrpc_mark_interrupted(oap->oap_request);
2702                         ptlrpcd_wake(oap->oap_request);
2703                         ptlrpc_req_finished(oap->oap_request);
2704                         oap->oap_request = NULL;
2705                 }
2706         }
2707
2708         osc_list_maint(cli, obj);
2709         RETURN(rc);
2710 }
2711
2712 int osc_queue_sync_pages(const struct lu_env *env, const struct cl_io *io,
2713                          struct osc_object *obj, struct list_head *list,
2714                          int brw_flags)
2715 {
2716         struct client_obd     *cli = osc_cli(obj);
2717         struct osc_extent     *ext;
2718         struct osc_async_page *oap;
2719         int     page_count = 0;
2720         int     mppr       = cli->cl_max_pages_per_rpc;
2721         bool    can_merge   = true;
2722         pgoff_t start      = CL_PAGE_EOF;
2723         pgoff_t end        = 0;
2724         ENTRY;
2725
2726         list_for_each_entry(oap, list, oap_pending_item) {
2727                 struct osc_page *opg = oap2osc_page(oap);
2728                 pgoff_t index = osc_index(opg);
2729
2730                 if (index > end)
2731                         end = index;
2732                 if (index < start)
2733                         start = index;
2734                 ++page_count;
2735                 mppr <<= (page_count > mppr);
2736
2737                 if (unlikely(opg->ops_from > 0 || opg->ops_to < PAGE_SIZE))
2738                         can_merge = false;
2739         }
2740
2741         ext = osc_extent_alloc(obj);
2742         if (ext == NULL) {
2743                 struct osc_async_page *tmp;
2744
2745                 list_for_each_entry_safe(oap, tmp, list, oap_pending_item) {
2746                         list_del_init(&oap->oap_pending_item);
2747                         osc_ap_completion(env, cli, oap, 0, -ENOMEM);
2748                 }
2749                 RETURN(-ENOMEM);
2750         }
2751
2752         ext->oe_rw = !!(brw_flags & OBD_BRW_READ);
2753         ext->oe_sync = 1;
2754         ext->oe_no_merge = !can_merge;
2755         ext->oe_urgent = 1;
2756         ext->oe_start = start;
2757         ext->oe_end = ext->oe_max_end = end;
2758         ext->oe_obj = obj;
2759         ext->oe_srvlock = !!(brw_flags & OBD_BRW_SRVLOCK);
2760         ext->oe_ndelay = !!(brw_flags & OBD_BRW_NDELAY);
2761         ext->oe_dio = !!(brw_flags & OBD_BRW_NOCACHE);
2762         ext->oe_nr_pages = page_count;
2763         ext->oe_mppr = mppr;
2764         list_splice_init(list, &ext->oe_pages);
2765         ext->oe_layout_version = io->ci_layout_version;
2766
2767         osc_object_lock(obj);
2768         /* Reuse the initial refcount for RPC, don't drop it */
2769         osc_extent_state_set(ext, OES_LOCK_DONE);
2770         if (!ext->oe_rw) { /* write */
2771                 list_add_tail(&ext->oe_link, &obj->oo_urgent_exts);
2772                 osc_update_pending(obj, OBD_BRW_WRITE, page_count);
2773         } else {
2774                 list_add_tail(&ext->oe_link, &obj->oo_reading_exts);
2775                 osc_update_pending(obj, OBD_BRW_READ, page_count);
2776         }
2777         osc_object_unlock(obj);
2778
2779         osc_io_unplug_async(env, cli, obj);
2780         RETURN(0);
2781 }
2782
2783 /**
2784  * Called by osc_io_setattr_start() to freeze and destroy covering extents.
2785  */
2786 int osc_cache_truncate_start(const struct lu_env *env, struct osc_object *obj,
2787                              __u64 size, struct osc_extent **extp)
2788 {
2789         struct client_obd *cli = osc_cli(obj);
2790         struct osc_extent *ext;
2791         struct osc_extent *waiting = NULL;
2792         pgoff_t index;
2793         LIST_HEAD(list);
2794         int result = 0;
2795         bool partial;
2796         ENTRY;
2797
2798         /* pages with index greater or equal to index will be truncated. */
2799         index = cl_index(osc2cl(obj), size);
2800         partial = size > cl_offset(osc2cl(obj), index);
2801
2802 again:
2803         osc_object_lock(obj);
2804         ext = osc_extent_search(obj, index);
2805         if (ext == NULL)
2806                 ext = first_extent(obj);
2807         else if (ext->oe_end < index)
2808                 ext = next_extent(ext);
2809         while (ext != NULL) {
2810                 EASSERT(ext->oe_state != OES_TRUNC, ext);
2811
2812                 if (ext->oe_state > OES_CACHE || ext->oe_urgent) {
2813                         /* if ext is in urgent state, it means there must exist
2814                          * a page already having been flushed by write_page().
2815                          * We have to wait for this extent because we can't
2816                          * truncate that page. */
2817                         OSC_EXTENT_DUMP(D_CACHE, ext,
2818                                         "waiting for busy extent\n");
2819                         waiting = osc_extent_get(ext);
2820                         break;
2821                 }
2822
2823                 OSC_EXTENT_DUMP(D_CACHE, ext, "try to trunc:%llu.\n", size);
2824
2825                 osc_extent_get(ext);
2826                 if (ext->oe_state == OES_ACTIVE) {
2827                         /* though we grab inode mutex for write path, but we
2828                          * release it before releasing extent(in osc_io_end()),
2829                          * so there is a race window that an extent is still
2830                          * in OES_ACTIVE when truncate starts. */
2831                         LASSERT(!ext->oe_trunc_pending);
2832                         ext->oe_trunc_pending = 1;
2833                 } else {
2834                         EASSERT(ext->oe_state == OES_CACHE, ext);
2835                         osc_extent_state_set(ext, OES_TRUNC);
2836                         osc_update_pending(obj, OBD_BRW_WRITE,
2837                                            -ext->oe_nr_pages);
2838                 }
2839                 /* This extent could be on the full extents list, that's OK */
2840                 EASSERT(!ext->oe_hp && !ext->oe_urgent, ext);
2841                 if (!list_empty(&ext->oe_link))
2842                         list_move_tail(&ext->oe_link, &list);
2843                 else
2844                         list_add_tail(&ext->oe_link, &list);
2845
2846                 ext = next_extent(ext);
2847         }
2848         osc_object_unlock(obj);
2849
2850         osc_list_maint(cli, obj);
2851
2852         while (!list_empty(&list)) {
2853                 int rc;
2854
2855                 ext = list_entry(list.next, struct osc_extent, oe_link);
2856                 list_del_init(&ext->oe_link);
2857
2858                 /* extent may be in OES_ACTIVE state because inode mutex
2859                  * is released before osc_io_end() in file write case */
2860                 if (ext->oe_state != OES_TRUNC)
2861                         osc_extent_wait(env, ext, OES_TRUNC);
2862
2863                 rc = osc_extent_truncate(ext, index, partial);
2864                 if (rc < 0) {
2865                         if (result == 0)
2866                                 result = rc;
2867
2868                         OSC_EXTENT_DUMP(D_ERROR, ext,
2869                                         "truncate error %d\n", rc);
2870                 } else if (ext->oe_nr_pages == 0) {
2871                         osc_extent_remove(ext);
2872                 } else {
2873                         /* this must be an overlapped extent which means only
2874                          * part of pages in this extent have been truncated.
2875                          */
2876                         EASSERTF(ext->oe_start <= index, ext,
2877                                  "trunc index = %lu/%d.\n", index, partial);
2878                         /* fix index to skip this partially truncated extent */
2879                         index = ext->oe_end + 1;
2880                         partial = false;
2881
2882                         /* we need to hold this extent in OES_TRUNC state so
2883                          * that no writeback will happen. This is to avoid
2884                          * BUG 17397.
2885                          * Only partial truncate can reach here, if @size is
2886                          * not zero, the caller should provide a valid @extp. */
2887                         LASSERT(*extp == NULL);
2888                         *extp = osc_extent_get(ext);
2889                         OSC_EXTENT_DUMP(D_CACHE, ext,
2890                                         "trunc at %llu\n", size);
2891                 }
2892                 osc_extent_put(env, ext);
2893         }
2894         if (waiting != NULL) {
2895                 int rc;
2896
2897                 /* ignore the result of osc_extent_wait the write initiator
2898                  * should take care of it. */
2899                 rc = osc_extent_wait(env, waiting, OES_INV);
2900                 if (rc < 0)
2901                         OSC_EXTENT_DUMP(D_CACHE, waiting, "error: %d.\n", rc);
2902
2903                 osc_extent_put(env, waiting);
2904                 waiting = NULL;
2905                 goto again;
2906         }
2907         RETURN(result);
2908 }
2909 EXPORT_SYMBOL(osc_cache_truncate_start);
2910
2911 /**
2912  * Called after osc_io_setattr_end to add oio->oi_trunc back to cache.
2913  */
2914 void osc_cache_truncate_end(const struct lu_env *env, struct osc_extent *ext)
2915 {
2916         if (ext != NULL) {
2917                 struct osc_object *obj = ext->oe_obj;
2918                 bool unplug = false;
2919
2920                 EASSERT(ext->oe_nr_pages > 0, ext);
2921                 EASSERT(ext->oe_state == OES_TRUNC, ext);
2922                 EASSERT(!ext->oe_urgent, ext);
2923
2924                 OSC_EXTENT_DUMP(D_CACHE, ext, "trunc -> cache.\n");
2925                 osc_object_lock(obj);
2926                 osc_extent_state_set(ext, OES_CACHE);
2927                 if (ext->oe_fsync_wait && !ext->oe_urgent) {
2928                         ext->oe_urgent = 1;
2929                         list_move_tail(&ext->oe_link, &obj->oo_urgent_exts);
2930                         unplug = true;
2931                 }
2932                 osc_update_pending(obj, OBD_BRW_WRITE, ext->oe_nr_pages);
2933                 osc_object_unlock(obj);
2934                 osc_extent_put(env, ext);
2935
2936                 if (unplug)
2937                         osc_io_unplug_async(env, osc_cli(obj), obj);
2938         }
2939 }
2940
2941 /**
2942  * Wait for extents in a specific range to be written out.
2943  * The caller must have called osc_cache_writeback_range() to issue IO
2944  * otherwise it will take a long time for this function to finish.
2945  *
2946  * Caller must hold inode_mutex , or cancel exclusive dlm lock so that
2947  * nobody else can dirty this range of file while we're waiting for
2948  * extents to be written.
2949  */
2950 int osc_cache_wait_range(const struct lu_env *env, struct osc_object *obj,
2951                          pgoff_t start, pgoff_t end)
2952 {
2953         struct osc_extent *ext;
2954         pgoff_t index = start;
2955         int     result = 0;
2956         ENTRY;
2957
2958 again:
2959         osc_object_lock(obj);
2960         ext = osc_extent_search(obj, index);
2961         if (ext == NULL)
2962                 ext = first_extent(obj);
2963         else if (ext->oe_end < index)
2964                 ext = next_extent(ext);
2965         while (ext != NULL) {
2966                 int rc;
2967
2968                 if (ext->oe_start > end)
2969                         break;
2970
2971                 if (!ext->oe_fsync_wait) {
2972                         ext = next_extent(ext);
2973                         continue;
2974                 }
2975
2976                 EASSERT(ergo(ext->oe_state == OES_CACHE,
2977                              ext->oe_hp || ext->oe_urgent), ext);
2978                 EASSERT(ergo(ext->oe_state == OES_ACTIVE,
2979                              !ext->oe_hp && ext->oe_urgent), ext);
2980
2981                 index = ext->oe_end + 1;
2982                 osc_extent_get(ext);
2983                 osc_object_unlock(obj);
2984
2985                 rc = osc_extent_wait(env, ext, OES_INV);
2986                 if (result == 0)
2987                         result = rc;
2988                 osc_extent_put(env, ext);
2989                 goto again;
2990         }
2991         osc_object_unlock(obj);
2992
2993         OSC_IO_DEBUG(obj, "sync file range.\n");
2994         RETURN(result);
2995 }
2996 EXPORT_SYMBOL(osc_cache_wait_range);
2997
2998 /**
2999  * Called to write out a range of osc object.
3000  *
3001  * @hp     : should be set this is caused by lock cancel;
3002  * @discard: is set if dirty pages should be dropped - file will be deleted or
3003  *         truncated, this implies there is no partially discarding extents.
3004  *
3005  * Return how many pages will be issued, or error code if error occurred.
3006  */
3007 int osc_cache_writeback_range(const struct lu_env *env, struct osc_object *obj,
3008                               pgoff_t start, pgoff_t end, int hp, int discard)
3009 {
3010         struct osc_extent *ext;
3011         LIST_HEAD(discard_list);
3012         bool unplug = false;
3013         int result = 0;
3014         ENTRY;
3015
3016         osc_object_lock(obj);
3017         ext = osc_extent_search(obj, start);
3018         if (ext == NULL)
3019                 ext = first_extent(obj);
3020         else if (ext->oe_end < start)
3021                 ext = next_extent(ext);
3022         while (ext != NULL) {
3023                 if (ext->oe_start > end)
3024                         break;
3025
3026                 ext->oe_fsync_wait = 1;
3027                 switch (ext->oe_state) {
3028                 case OES_CACHE:
3029                         result += ext->oe_nr_pages;
3030                         if (!discard) {
3031                                 struct list_head *list = NULL;
3032                                 if (hp) {
3033                                         EASSERT(!ext->oe_hp, ext);
3034                                         ext->oe_hp = 1;
3035                                         list = &obj->oo_hp_exts;
3036                                 } else if (!ext->oe_urgent) {
3037                                         ext->oe_urgent = 1;
3038                                         list = &obj->oo_urgent_exts;
3039                                 }
3040                                 if (list != NULL)
3041                                         list_move_tail(&ext->oe_link, list);
3042                                 unplug = true;
3043                         } else {
3044                                 struct client_obd *cli = osc_cli(obj);
3045                                 int pcc_bits = cli->cl_chunkbits - PAGE_SHIFT;
3046                                 pgoff_t align_by = (1 << pcc_bits);
3047                                 pgoff_t a_start = round_down(start, align_by);
3048                                 pgoff_t a_end = round_up(end, align_by);
3049
3050                                 /* overflow case */
3051                                 if (end && !a_end)
3052                                         a_end = CL_PAGE_EOF;
3053                                 /* the only discarder is lock cancelling, so
3054                                  * [start, end], aligned by chunk size, must
3055                                  * contain this extent */
3056                                 LASSERTF(ext->oe_start >= a_start &&
3057                                          ext->oe_end <= a_end,
3058                                          "ext [%lu, %lu] reg [%lu, %lu] "
3059                                          "orig [%lu %lu] align %lu bits "
3060                                          "%d\n", ext->oe_start, ext->oe_end,
3061                                          a_start, a_end, start, end,
3062                                          align_by, pcc_bits);
3063                                 osc_extent_state_set(ext, OES_LOCKING);
3064                                 ext->oe_owner = current;
3065                                 list_move_tail(&ext->oe_link,
3066                                                    &discard_list);
3067                                 osc_update_pending(obj, OBD_BRW_WRITE,
3068                                                    -ext->oe_nr_pages);
3069                         }
3070                         break;
3071                 case OES_ACTIVE:
3072                         /* It's pretty bad to wait for ACTIVE extents, because
3073                          * we don't know how long we will wait for it to be
3074                          * flushed since it may be blocked at awaiting more
3075                          * grants. We do this for the correctness of fsync. */
3076                         LASSERT(hp == 0 && discard == 0);
3077                         ext->oe_urgent = 1;
3078                         break;
3079                 case OES_TRUNC:
3080                         /* this extent is being truncated, can't do anything
3081                          * for it now. it will be set to urgent after truncate
3082                          * is finished in osc_cache_truncate_end(). */
3083                 default:
3084                         break;
3085                 }
3086                 ext = next_extent(ext);
3087         }
3088         osc_object_unlock(obj);
3089
3090         LASSERT(ergo(!discard, list_empty(&discard_list)));
3091         if (!list_empty(&discard_list)) {
3092                 struct osc_extent *tmp;
3093                 int rc;
3094
3095                 osc_list_maint(osc_cli(obj), obj);
3096                 list_for_each_entry_safe(ext, tmp, &discard_list, oe_link) {
3097                         list_del_init(&ext->oe_link);
3098                         EASSERT(ext->oe_state == OES_LOCKING, ext);
3099
3100                         /* Discard caching pages. We don't actually write this
3101                          * extent out but we complete it as if we did. */
3102                         rc = osc_extent_make_ready(env, ext);
3103                         if (unlikely(rc < 0)) {
3104                                 OSC_EXTENT_DUMP(D_ERROR, ext,
3105                                                 "make_ready returned %d\n", rc);
3106                                 if (result >= 0)
3107                                         result = rc;
3108                         }
3109
3110                         /* finish the extent as if the pages were sent */
3111                         osc_extent_finish(env, ext, 0, 0);
3112                 }
3113         }
3114
3115         if (unplug)
3116                 osc_io_unplug(env, osc_cli(obj), obj);
3117
3118         if (hp || discard) {
3119                 int rc;
3120                 rc = osc_cache_wait_range(env, obj, start, end);
3121                 if (result >= 0 && rc < 0)
3122                         result = rc;
3123         }
3124
3125         OSC_IO_DEBUG(obj, "pageout [%lu, %lu], %d.\n", start, end, result);
3126         RETURN(result);
3127 }
3128 EXPORT_SYMBOL(osc_cache_writeback_range);
3129
3130 /**
3131  * Returns a list of pages by a given [start, end] of \a obj.
3132  *
3133  * \param resched If not NULL, then we give up before hogging CPU for too
3134  * long and set *resched = 1, in that case caller should implement a retry
3135  * logic.
3136  *
3137  * Gang tree lookup (radix_tree_gang_lookup()) optimization is absolutely
3138  * crucial in the face of [offset, EOF] locks.
3139  *
3140  * Return at least one page in @queue unless there is no covered page.
3141  */
3142 int osc_page_gang_lookup(const struct lu_env *env, struct cl_io *io,
3143                         struct osc_object *osc, pgoff_t start, pgoff_t end,
3144                         osc_page_gang_cbt cb, void *cbdata)
3145 {
3146         struct osc_page *ops;
3147         struct pagevec  *pagevec;
3148         void            **pvec;
3149         pgoff_t         idx;
3150         unsigned int    nr;
3151         unsigned int    i;
3152         unsigned int    j;
3153         int             res = CLP_GANG_OKAY;
3154         bool            tree_lock = true;
3155         ENTRY;
3156
3157         idx = start;
3158         pvec = osc_env_info(env)->oti_pvec;
3159         pagevec = &osc_env_info(env)->oti_pagevec;
3160         ll_pagevec_init(pagevec, 0);
3161         spin_lock(&osc->oo_tree_lock);
3162         while ((nr = radix_tree_gang_lookup(&osc->oo_tree, pvec,
3163                                             idx, OTI_PVEC_SIZE)) > 0) {
3164                 struct cl_page *page;
3165                 bool end_of_region = false;
3166
3167                 for (i = 0, j = 0; i < nr; ++i) {
3168                         ops = pvec[i];
3169                         pvec[i] = NULL;
3170
3171                         idx = osc_index(ops);
3172                         if (idx > end) {
3173                                 end_of_region = true;
3174                                 break;
3175                         }
3176
3177                         page = ops->ops_cl.cpl_page;
3178                         LASSERT(page->cp_type == CPT_CACHEABLE);
3179                         if (page->cp_state == CPS_FREEING)
3180                                 continue;
3181
3182                         cl_page_get(page);
3183                         lu_ref_add_atomic(&page->cp_reference,
3184                                           "gang_lookup", current);
3185                         pvec[j++] = ops;
3186                 }
3187                 ++idx;
3188
3189                 /*
3190                  * Here a delicate locking dance is performed. Current thread
3191                  * holds a reference to a page, but has to own it before it
3192                  * can be placed into queue. Owning implies waiting, so
3193                  * radix-tree lock is to be released. After a wait one has to
3194                  * check that pages weren't truncated (cl_page_own() returns
3195                  * error in the latter case).
3196                  */
3197                 spin_unlock(&osc->oo_tree_lock);
3198                 tree_lock = false;
3199
3200                 for (i = 0; i < j; ++i) {
3201                         ops = pvec[i];
3202                         if (res == CLP_GANG_OKAY)
3203                                 res = (*cb)(env, io, ops, cbdata);
3204
3205                         page = ops->ops_cl.cpl_page;
3206                         lu_ref_del(&page->cp_reference, "gang_lookup", current);
3207                         cl_pagevec_put(env, page, pagevec);
3208                 }
3209                 pagevec_release(pagevec);
3210
3211                 if (nr < OTI_PVEC_SIZE || end_of_region)
3212                         break;
3213
3214                 if (res == CLP_GANG_OKAY && need_resched())
3215                         res = CLP_GANG_RESCHED;
3216                 if (res != CLP_GANG_OKAY)
3217                         break;
3218
3219                 spin_lock(&osc->oo_tree_lock);
3220                 tree_lock = true;
3221         }
3222         if (tree_lock)
3223                 spin_unlock(&osc->oo_tree_lock);
3224         RETURN(res);
3225 }
3226 EXPORT_SYMBOL(osc_page_gang_lookup);
3227
3228 /**
3229  * Check if page @page is covered by an extra lock or discard it.
3230  */
3231 static int check_and_discard_cb(const struct lu_env *env, struct cl_io *io,
3232                                 struct osc_page *ops, void *cbdata)
3233 {
3234         struct osc_thread_info *info = osc_env_info(env);
3235         struct osc_object *osc = cbdata;
3236         pgoff_t index;
3237
3238         index = osc_index(ops);
3239         if (index >= info->oti_fn_index) {
3240                 struct ldlm_lock *tmp;
3241                 struct cl_page *page = ops->ops_cl.cpl_page;
3242
3243                 /* refresh non-overlapped index */
3244                 tmp = osc_dlmlock_at_pgoff(env, osc, index,
3245                                            OSC_DAP_FL_TEST_LOCK);
3246                 if (tmp != NULL) {
3247                         __u64 end = tmp->l_policy_data.l_extent.end;
3248                         /* Cache the first-non-overlapped index so as to skip
3249                          * all pages within [index, oti_fn_index). This is safe
3250                          * because if tmp lock is canceled, it will discard
3251                          * these pages. */
3252                         info->oti_fn_index = cl_index(osc2cl(osc), end + 1);
3253                         if (end == OBD_OBJECT_EOF)
3254                                 info->oti_fn_index = CL_PAGE_EOF;
3255                         LDLM_LOCK_PUT(tmp);
3256                 } else if (cl_page_own(env, io, page) == 0) {
3257                         /* discard the page */
3258                         cl_page_discard(env, io, page);
3259                         cl_page_disown(env, io, page);
3260                 } else {
3261                         LASSERT(page->cp_state == CPS_FREEING);
3262                 }
3263         }
3264
3265         info->oti_next_index = index + 1;
3266         return CLP_GANG_OKAY;
3267 }
3268
3269 int osc_discard_cb(const struct lu_env *env, struct cl_io *io,
3270                    struct osc_page *ops, void *cbdata)
3271 {
3272         struct osc_thread_info *info = osc_env_info(env);
3273         struct cl_page *page = ops->ops_cl.cpl_page;
3274
3275         /* page is top page. */
3276         info->oti_next_index = osc_index(ops) + 1;
3277         if (cl_page_own(env, io, page) == 0) {
3278                 if (!ergo(page->cp_type == CPT_CACHEABLE,
3279                           !PageDirty(cl_page_vmpage(page))))
3280                         CL_PAGE_DEBUG(D_ERROR, env, page,
3281                                         "discard dirty page?\n");
3282
3283                 /* discard the page */
3284                 cl_page_discard(env, io, page);
3285                 cl_page_disown(env, io, page);
3286         } else {
3287                 LASSERT(page->cp_state == CPS_FREEING);
3288         }
3289
3290         return CLP_GANG_OKAY;
3291 }
3292 EXPORT_SYMBOL(osc_discard_cb);
3293
3294 /**
3295  * Discard pages protected by the given lock. This function traverses radix
3296  * tree to find all covering pages and discard them. If a page is being covered
3297  * by other locks, it should remain in cache.
3298  *
3299  * If error happens on any step, the process continues anyway (the reasoning
3300  * behind this being that lock cancellation cannot be delayed indefinitely).
3301  */
3302 int osc_lock_discard_pages(const struct lu_env *env, struct osc_object *osc,
3303                            pgoff_t start, pgoff_t end, bool discard)
3304 {
3305         struct osc_thread_info *info = osc_env_info(env);
3306         struct cl_io *io = osc_env_thread_io(env);
3307         osc_page_gang_cbt cb;
3308         int res;
3309         int result;
3310
3311         ENTRY;
3312
3313         io->ci_obj = cl_object_top(osc2cl(osc));
3314         io->ci_ignore_layout = 1;
3315         result = cl_io_init(env, io, CIT_MISC, io->ci_obj);
3316         if (result != 0)
3317                 GOTO(out, result);
3318
3319         cb = discard ? osc_discard_cb : check_and_discard_cb;
3320         info->oti_fn_index = info->oti_next_index = start;
3321         do {
3322                 res = osc_page_gang_lookup(env, io, osc,
3323                                            info->oti_next_index, end, cb, osc);
3324                 if (info->oti_next_index > end)
3325                         break;
3326
3327                 if (res == CLP_GANG_RESCHED)
3328                         cond_resched();
3329         } while (res != CLP_GANG_OKAY);
3330 out:
3331         cl_io_fini(env, io);
3332         RETURN(result);
3333 }
3334
3335
3336 /** @} osc */