Whamcloud - gitweb
LU-2237 tests: new test for re-recreating last_rcvd
[fs/lustre-release.git] / lustre / osc / osc_page.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, Whamcloud, Inc.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * Implementation of cl_page for OSC layer.
37  *
38  *   Author: Nikita Danilov <nikita.danilov@sun.com>
39  */
40
41 #define DEBUG_SUBSYSTEM S_OSC
42
43 #include "osc_cl_internal.h"
44
45 static void osc_lru_del(struct client_obd *cli, struct osc_page *opg, bool del);
46 static void osc_lru_add(struct client_obd *cli, struct osc_page *opg);
47 static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
48                            struct osc_page *opg);
49
50 /** \addtogroup osc
51  *  @{
52  */
53
54 /*
55  * Comment out osc_page_protected because it may sleep inside the
56  * the client_obd_list_lock.
57  * client_obd_list_lock -> osc_ap_completion -> osc_completion ->
58  *   -> osc_page_protected -> osc_page_is_dlocked -> osc_match_base
59  *   -> ldlm_lock_match -> sptlrpc_import_check_ctx -> sleep.
60  */
61 #if 0
62 static int osc_page_is_dlocked(const struct lu_env *env,
63                                const struct osc_page *opg,
64                                enum cl_lock_mode mode, int pending, int unref)
65 {
66         struct cl_page         *page;
67         struct osc_object      *obj;
68         struct osc_thread_info *info;
69         struct ldlm_res_id     *resname;
70         struct lustre_handle   *lockh;
71         ldlm_policy_data_t     *policy;
72         ldlm_mode_t             dlmmode;
73         int                     flags;
74
75         cfs_might_sleep();
76
77         info = osc_env_info(env);
78         resname = &info->oti_resname;
79         policy = &info->oti_policy;
80         lockh = &info->oti_handle;
81         page = opg->ops_cl.cpl_page;
82         obj = cl2osc(opg->ops_cl.cpl_obj);
83
84         flags = LDLM_FL_TEST_LOCK | LDLM_FL_BLOCK_GRANTED;
85         if (pending)
86                 flags |= LDLM_FL_CBPENDING;
87
88         dlmmode = osc_cl_lock2ldlm(mode) | LCK_PW;
89         osc_lock_build_res(env, obj, resname);
90         osc_index2policy(policy, page->cp_obj, page->cp_index, page->cp_index);
91         return osc_match_base(osc_export(obj), resname, LDLM_EXTENT, policy,
92                               dlmmode, &flags, NULL, lockh, unref);
93 }
94
95 /**
96  * Checks an invariant that a page in the cache is covered by a lock, as
97  * needed.
98  */
99 static int osc_page_protected(const struct lu_env *env,
100                               const struct osc_page *opg,
101                               enum cl_lock_mode mode, int unref)
102 {
103         struct cl_object_header *hdr;
104         struct cl_lock          *scan;
105         struct cl_page          *page;
106         struct cl_lock_descr    *descr;
107         int result;
108
109         LINVRNT(!opg->ops_temp);
110
111         page = opg->ops_cl.cpl_page;
112         if (page->cp_owner != NULL &&
113             cl_io_top(page->cp_owner)->ci_lockreq == CILR_NEVER)
114                 /*
115                  * If IO is done without locks (liblustre, or lloop), lock is
116                  * not required.
117                  */
118                 result = 1;
119         else
120                 /* otherwise check for a DLM lock */
121         result = osc_page_is_dlocked(env, opg, mode, 1, unref);
122         if (result == 0) {
123                 /* maybe this page is a part of a lockless io? */
124                 hdr = cl_object_header(opg->ops_cl.cpl_obj);
125                 descr = &osc_env_info(env)->oti_descr;
126                 descr->cld_mode = mode;
127                 descr->cld_start = page->cp_index;
128                 descr->cld_end   = page->cp_index;
129                 spin_lock(&hdr->coh_lock_guard);
130                 cfs_list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) {
131                         /*
132                          * Lock-less sub-lock has to be either in HELD state
133                          * (when io is actively going on), or in CACHED state,
134                          * when top-lock is being unlocked:
135                          * cl_io_unlock()->cl_unuse()->...->lov_lock_unuse().
136                          */
137                         if ((scan->cll_state == CLS_HELD ||
138                              scan->cll_state == CLS_CACHED) &&
139                             cl_lock_ext_match(&scan->cll_descr, descr)) {
140                                 struct osc_lock *olck;
141
142                                 olck = osc_lock_at(scan);
143                                 result = osc_lock_is_lockless(olck);
144                                 break;
145                         }
146                 }
147                 spin_unlock(&hdr->coh_lock_guard);
148         }
149         return result;
150 }
151 #else
152 static int osc_page_protected(const struct lu_env *env,
153                               const struct osc_page *opg,
154                               enum cl_lock_mode mode, int unref)
155 {
156         return 1;
157 }
158 #endif
159
160 /*****************************************************************************
161  *
162  * Page operations.
163  *
164  */
165 static void osc_page_fini(const struct lu_env *env,
166                           struct cl_page_slice *slice)
167 {
168         struct osc_page *opg = cl2osc_page(slice);
169         CDEBUG(D_TRACE, "%p\n", opg);
170         LASSERT(opg->ops_lock == NULL);
171         OBD_SLAB_FREE_PTR(opg, osc_page_kmem);
172 }
173
174 static void osc_page_transfer_get(struct osc_page *opg, const char *label)
175 {
176         struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page);
177
178         LASSERT(!opg->ops_transfer_pinned);
179         cl_page_get(page);
180         lu_ref_add_atomic(&page->cp_reference, label, page);
181         opg->ops_transfer_pinned = 1;
182 }
183
184 static void osc_page_transfer_put(const struct lu_env *env,
185                                   struct osc_page *opg)
186 {
187         struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page);
188
189         if (opg->ops_transfer_pinned) {
190                 lu_ref_del(&page->cp_reference, "transfer", page);
191                 opg->ops_transfer_pinned = 0;
192                 cl_page_put(env, page);
193         }
194 }
195
196 /**
197  * This is called once for every page when it is submitted for a transfer
198  * either opportunistic (osc_page_cache_add()), or immediate
199  * (osc_page_submit()).
200  */
201 static void osc_page_transfer_add(const struct lu_env *env,
202                                   struct osc_page *opg, enum cl_req_type crt)
203 {
204         struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
205
206         /* ops_lru and ops_inflight share the same field, so take it from LRU
207          * first and then use it as inflight. */
208         osc_lru_del(osc_cli(obj), opg, false);
209
210         spin_lock(&obj->oo_seatbelt);
211         cfs_list_add(&opg->ops_inflight, &obj->oo_inflight[crt]);
212         opg->ops_submitter = cfs_current();
213         spin_unlock(&obj->oo_seatbelt);
214 }
215
216 static int osc_page_cache_add(const struct lu_env *env,
217                               const struct cl_page_slice *slice,
218                               struct cl_io *io)
219 {
220         struct osc_io   *oio = osc_env_io(env);
221         struct osc_page *opg = cl2osc_page(slice);
222         int result;
223         ENTRY;
224
225         LINVRNT(osc_page_protected(env, opg, CLM_WRITE, 0));
226
227         osc_page_transfer_get(opg, "transfer\0cache");
228         result = osc_queue_async_io(env, io, opg);
229         if (result != 0)
230                 osc_page_transfer_put(env, opg);
231         else
232                 osc_page_transfer_add(env, opg, CRT_WRITE);
233
234         /* for sync write, kernel will wait for this page to be flushed before
235          * osc_io_end() is called, so release it earlier.
236          * for mkwrite(), it's known there is no further pages. */
237         if (cl_io_is_sync_write(io) || cl_io_is_mkwrite(io)) {
238                 if (oio->oi_active != NULL) {
239                         osc_extent_release(env, oio->oi_active);
240                         oio->oi_active = NULL;
241                 }
242         }
243
244         RETURN(result);
245 }
246
247 void osc_index2policy(ldlm_policy_data_t *policy, const struct cl_object *obj,
248                       pgoff_t start, pgoff_t end)
249 {
250         memset(policy, 0, sizeof *policy);
251         policy->l_extent.start = cl_offset(obj, start);
252         policy->l_extent.end   = cl_offset(obj, end + 1) - 1;
253 }
254
255 static int osc_page_addref_lock(const struct lu_env *env,
256                                 struct osc_page *opg,
257                                 struct cl_lock *lock)
258 {
259         struct osc_lock *olock;
260         int              rc;
261
262         LASSERT(opg->ops_lock == NULL);
263
264         olock = osc_lock_at(lock);
265         if (cfs_atomic_inc_return(&olock->ols_pageref) <= 0) {
266                 cfs_atomic_dec(&olock->ols_pageref);
267                 rc = -ENODATA;
268         } else {
269                 cl_lock_get(lock);
270                 opg->ops_lock = lock;
271                 rc = 0;
272         }
273         return rc;
274 }
275
276 static void osc_page_putref_lock(const struct lu_env *env,
277                                  struct osc_page *opg)
278 {
279         struct cl_lock  *lock = opg->ops_lock;
280         struct osc_lock *olock;
281
282         LASSERT(lock != NULL);
283         olock = osc_lock_at(lock);
284
285         cfs_atomic_dec(&olock->ols_pageref);
286         opg->ops_lock = NULL;
287
288         cl_lock_put(env, lock);
289 }
290
291 static int osc_page_is_under_lock(const struct lu_env *env,
292                                   const struct cl_page_slice *slice,
293                                   struct cl_io *unused)
294 {
295         struct cl_lock *lock;
296         int             result = -ENODATA;
297
298         ENTRY;
299         lock = cl_lock_at_page(env, slice->cpl_obj, slice->cpl_page,
300                                NULL, 1, 0);
301         if (lock != NULL) {
302                 if (osc_page_addref_lock(env, cl2osc_page(slice), lock) == 0)
303                         result = -EBUSY;
304                 cl_lock_put(env, lock);
305         }
306         RETURN(result);
307 }
308
309 static void osc_page_disown(const struct lu_env *env,
310                             const struct cl_page_slice *slice,
311                             struct cl_io *io)
312 {
313         struct osc_page *opg = cl2osc_page(slice);
314
315         if (unlikely(opg->ops_lock))
316                 osc_page_putref_lock(env, opg);
317 }
318
319 static void osc_page_completion_read(const struct lu_env *env,
320                                      const struct cl_page_slice *slice,
321                                      int ioret)
322 {
323         struct osc_page   *opg = cl2osc_page(slice);
324         struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
325
326         if (likely(opg->ops_lock))
327                 osc_page_putref_lock(env, opg);
328         osc_lru_add(osc_cli(obj), opg);
329 }
330
331 static void osc_page_completion_write(const struct lu_env *env,
332                                       const struct cl_page_slice *slice,
333                                       int ioret)
334 {
335         struct osc_page   *opg = cl2osc_page(slice);
336         struct osc_object *obj = cl2osc(slice->cpl_obj);
337
338         osc_lru_add(osc_cli(obj), opg);
339 }
340
341 static int osc_page_fail(const struct lu_env *env,
342                          const struct cl_page_slice *slice,
343                          struct cl_io *unused)
344 {
345         /*
346          * Cached read?
347          */
348         LBUG();
349         return 0;
350 }
351
352
353 static const char *osc_list(cfs_list_t *head)
354 {
355         return cfs_list_empty(head) ? "-" : "+";
356 }
357
358 static inline cfs_time_t osc_submit_duration(struct osc_page *opg)
359 {
360         if (opg->ops_submit_time == 0)
361                 return 0;
362
363         return (cfs_time_current() - opg->ops_submit_time);
364 }
365
366 static int osc_page_print(const struct lu_env *env,
367                           const struct cl_page_slice *slice,
368                           void *cookie, lu_printer_t printer)
369 {
370         struct osc_page       *opg = cl2osc_page(slice);
371         struct osc_async_page *oap = &opg->ops_oap;
372         struct osc_object     *obj = cl2osc(slice->cpl_obj);
373         struct client_obd     *cli = &osc_export(obj)->exp_obd->u.cli;
374
375         return (*printer)(env, cookie, LUSTRE_OSC_NAME"-page@%p: "
376                           "1< %#x %d %u %s %s > "
377                           "2< "LPU64" %u %u %#x %#x | %p %p %p > "
378                           "3< %s %p %d %lu %d > "
379                           "4< %d %d %d %lu %s | %s %s %s %s > "
380                           "5< %s %s %s %s | %d %s | %d %s %s>\n",
381                           opg,
382                           /* 1 */
383                           oap->oap_magic, oap->oap_cmd,
384                           oap->oap_interrupted,
385                           osc_list(&oap->oap_pending_item),
386                           osc_list(&oap->oap_rpc_item),
387                           /* 2 */
388                           oap->oap_obj_off, oap->oap_page_off, oap->oap_count,
389                           oap->oap_async_flags, oap->oap_brw_flags,
390                           oap->oap_request, oap->oap_cli, obj,
391                           /* 3 */
392                           osc_list(&opg->ops_inflight),
393                           opg->ops_submitter, opg->ops_transfer_pinned,
394                           osc_submit_duration(opg), opg->ops_srvlock,
395                           /* 4 */
396                           cli->cl_r_in_flight, cli->cl_w_in_flight,
397                           cli->cl_max_rpcs_in_flight,
398                           cli->cl_avail_grant,
399                           osc_list(&cli->cl_cache_waiters),
400                           osc_list(&cli->cl_loi_ready_list),
401                           osc_list(&cli->cl_loi_hp_ready_list),
402                           osc_list(&cli->cl_loi_write_list),
403                           osc_list(&cli->cl_loi_read_list),
404                           /* 5 */
405                           osc_list(&obj->oo_ready_item),
406                           osc_list(&obj->oo_hp_ready_item),
407                           osc_list(&obj->oo_write_item),
408                           osc_list(&obj->oo_read_item),
409                           cfs_atomic_read(&obj->oo_nr_reads),
410                           osc_list(&obj->oo_reading_exts),
411                           cfs_atomic_read(&obj->oo_nr_writes),
412                           osc_list(&obj->oo_hp_exts),
413                           osc_list(&obj->oo_urgent_exts));
414 }
415
416 static void osc_page_delete(const struct lu_env *env,
417                             const struct cl_page_slice *slice)
418 {
419         struct osc_page   *opg = cl2osc_page(slice);
420         struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
421         int rc;
422
423         LINVRNT(opg->ops_temp || osc_page_protected(env, opg, CLM_READ, 1));
424
425         ENTRY;
426         CDEBUG(D_TRACE, "%p\n", opg);
427         osc_page_transfer_put(env, opg);
428         rc = osc_teardown_async_page(env, obj, opg);
429         if (rc) {
430                 CL_PAGE_DEBUG(D_ERROR, env, cl_page_top(slice->cpl_page),
431                               "Trying to teardown failed: %d\n", rc);
432                 LASSERT(0);
433         }
434
435         spin_lock(&obj->oo_seatbelt);
436         if (opg->ops_submitter != NULL) {
437                 LASSERT(!cfs_list_empty(&opg->ops_inflight));
438                 cfs_list_del_init(&opg->ops_inflight);
439                 opg->ops_submitter = NULL;
440         }
441         spin_unlock(&obj->oo_seatbelt);
442
443         osc_lru_del(osc_cli(obj), opg, true);
444         EXIT;
445 }
446
447 void osc_page_clip(const struct lu_env *env, const struct cl_page_slice *slice,
448                    int from, int to)
449 {
450         struct osc_page       *opg = cl2osc_page(slice);
451         struct osc_async_page *oap = &opg->ops_oap;
452
453         LINVRNT(osc_page_protected(env, opg, CLM_READ, 0));
454
455         opg->ops_from = from;
456         opg->ops_to   = to;
457         spin_lock(&oap->oap_lock);
458         oap->oap_async_flags |= ASYNC_COUNT_STABLE;
459         spin_unlock(&oap->oap_lock);
460 }
461
462 static int osc_page_cancel(const struct lu_env *env,
463                            const struct cl_page_slice *slice)
464 {
465         struct osc_page *opg = cl2osc_page(slice);
466         int rc = 0;
467
468         LINVRNT(osc_page_protected(env, opg, CLM_READ, 0));
469
470         /* Check if the transferring against this page
471          * is completed, or not even queued. */
472         if (opg->ops_transfer_pinned)
473                 /* FIXME: may not be interrupted.. */
474                 rc = osc_cancel_async_page(env, opg);
475         LASSERT(ergo(rc == 0, opg->ops_transfer_pinned == 0));
476         return rc;
477 }
478
479 static int osc_page_flush(const struct lu_env *env,
480                           const struct cl_page_slice *slice,
481                           struct cl_io *io)
482 {
483         struct osc_page *opg = cl2osc_page(slice);
484         int rc = 0;
485         ENTRY;
486         rc = osc_flush_async_page(env, io, opg);
487         RETURN(rc);
488 }
489
490 static const struct cl_page_operations osc_page_ops = {
491         .cpo_fini          = osc_page_fini,
492         .cpo_print         = osc_page_print,
493         .cpo_delete        = osc_page_delete,
494         .cpo_is_under_lock = osc_page_is_under_lock,
495         .cpo_disown        = osc_page_disown,
496         .io = {
497                 [CRT_READ] = {
498                         .cpo_cache_add  = osc_page_fail,
499                         .cpo_completion = osc_page_completion_read
500                 },
501                 [CRT_WRITE] = {
502                         .cpo_cache_add  = osc_page_cache_add,
503                         .cpo_completion = osc_page_completion_write
504                 }
505         },
506         .cpo_clip           = osc_page_clip,
507         .cpo_cancel         = osc_page_cancel,
508         .cpo_flush          = osc_page_flush
509 };
510
511 struct cl_page *osc_page_init(const struct lu_env *env,
512                               struct cl_object *obj,
513                               struct cl_page *page, cfs_page_t *vmpage)
514 {
515         struct osc_object *osc = cl2osc(obj);
516         struct osc_page   *opg;
517         int result;
518
519         OBD_SLAB_ALLOC_PTR_GFP(opg, osc_page_kmem, CFS_ALLOC_IO);
520         if (opg != NULL) {
521                 opg->ops_from = 0;
522                 opg->ops_to   = CFS_PAGE_SIZE;
523
524                 result = osc_prep_async_page(osc, opg, vmpage,
525                                              cl_offset(obj, page->cp_index));
526                 if (result == 0) {
527                         struct osc_io *oio = osc_env_io(env);
528                         opg->ops_srvlock = osc_io_srvlock(oio);
529                         cl_page_slice_add(page, &opg->ops_cl, obj,
530                                           &osc_page_ops);
531                 }
532                 /*
533                  * Cannot assert osc_page_protected() here as read-ahead
534                  * creates temporary pages outside of a lock.
535                  */
536 #ifdef INVARIANT_CHECK
537                 opg->ops_temp = !osc_page_protected(env, opg, CLM_READ, 1);
538 #endif
539                 /* ops_inflight and ops_lru are the same field, but it doesn't
540                  * hurt to initialize it twice :-) */
541                 CFS_INIT_LIST_HEAD(&opg->ops_inflight);
542                 CFS_INIT_LIST_HEAD(&opg->ops_lru);
543         } else
544                 result = -ENOMEM;
545
546         /* reserve an LRU space for this page */
547         if (page->cp_type == CPT_CACHEABLE && result == 0)
548                 result = osc_lru_reserve(env, osc, opg);
549
550         return ERR_PTR(result);
551 }
552
553 /**
554  * Helper function called by osc_io_submit() for every page in an immediate
555  * transfer (i.e., transferred synchronously).
556  */
557 void osc_page_submit(const struct lu_env *env, struct osc_page *opg,
558                      enum cl_req_type crt, int brw_flags)
559 {
560         struct osc_async_page *oap = &opg->ops_oap;
561         struct osc_object     *obj = oap->oap_obj;
562
563         LINVRNT(osc_page_protected(env, opg,
564                                    crt == CRT_WRITE ? CLM_WRITE : CLM_READ, 1));
565
566         LASSERTF(oap->oap_magic == OAP_MAGIC, "Bad oap magic: oap %p, "
567                  "magic 0x%x\n", oap, oap->oap_magic);
568         LASSERT(oap->oap_async_flags & ASYNC_READY);
569         LASSERT(oap->oap_async_flags & ASYNC_COUNT_STABLE);
570
571         oap->oap_cmd       = crt == CRT_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ;
572         oap->oap_page_off  = opg->ops_from;
573         oap->oap_count     = opg->ops_to - opg->ops_from;
574         oap->oap_brw_flags = OBD_BRW_SYNC | brw_flags;
575
576         if (!client_is_remote(osc_export(obj)) &&
577                         cfs_capable(CFS_CAP_SYS_RESOURCE)) {
578                 oap->oap_brw_flags |= OBD_BRW_NOQUOTA;
579                 oap->oap_cmd |= OBD_BRW_NOQUOTA;
580         }
581
582         opg->ops_submit_time = cfs_time_current();
583         osc_page_transfer_get(opg, "transfer\0imm");
584         osc_page_transfer_add(env, opg, crt);
585 }
586
587 /* --------------- LRU page management ------------------ */
588
589 /* OSC is a natural place to manage LRU pages as applications are specialized
590  * to write OSC by OSC. Ideally, if one OSC is used more frequently it should
591  * occupy more LRU slots. On the other hand, we should avoid using up all LRU
592  * slots (client_obd::cl_lru_left) otherwise process has to be put into sleep
593  * for free LRU slots - this will be very bad so the algorithm requires each
594  * OSC to free slots voluntarily to maintain a reasonable number of free slots
595  * at any time.
596  */
597
598 static CFS_DECL_WAITQ(osc_lru_waitq);
599 static cfs_atomic_t osc_lru_waiters = CFS_ATOMIC_INIT(0);
600 /* LRU pages are freed in batch mode. OSC should at least free this
601  * number of pages to avoid running out of LRU budget, and.. */
602 static const int lru_shrink_min = 2 << (20 - CFS_PAGE_SHIFT);  /* 2M */
603 /* free this number at most otherwise it will take too long time to finsih. */
604 static const int lru_shrink_max = 32 << (20 - CFS_PAGE_SHIFT); /* 32M */
605
606 /* Check if we can free LRU slots from this OSC. If there exists LRU waiters,
607  * we should free slots aggressively. In this way, slots are freed in a steady
608  * step to maintain fairness among OSCs.
609  *
610  * Return how many LRU pages should be freed. */
611 static int osc_cache_too_much(struct client_obd *cli)
612 {
613         struct cl_client_cache *cache = cli->cl_cache;
614         int pages = cfs_atomic_read(&cli->cl_lru_in_list) >> 1;
615
616         if (cfs_atomic_read(&osc_lru_waiters) > 0 &&
617             cfs_atomic_read(cli->cl_lru_left) < lru_shrink_max)
618                 /* drop lru pages aggressively */
619                 return min(pages, lru_shrink_max);
620
621         /* if it's going to run out LRU slots, we should free some, but not
622          * too much to maintain faireness among OSCs. */
623         if (cfs_atomic_read(cli->cl_lru_left) < cache->ccc_lru_max >> 4) {
624                 unsigned long tmp;
625
626                 tmp = cache->ccc_lru_max / cfs_atomic_read(&cache->ccc_users);
627                 if (pages > tmp)
628                         return min(pages, lru_shrink_max);
629
630                 return pages > lru_shrink_min ? lru_shrink_min : 0;
631         }
632
633         return 0;
634 }
635
636 /* Return how many pages are not discarded in @pvec. */
637 static int discard_pagevec(const struct lu_env *env, struct cl_io *io,
638                            struct cl_page **pvec, int max_index)
639 {
640         int count;
641         int i;
642
643         for (count = 0, i = 0; i < max_index; i++) {
644                 struct cl_page *page = pvec[i];
645                 if (cl_page_own_try(env, io, page) == 0) {
646                         /* free LRU page only if nobody is using it.
647                          * This check is necessary to avoid freeing the pages
648                          * having already been removed from LRU and pinned
649                          * for IO. */
650                         if (!cl_page_in_use(page)) {
651                                 cl_page_unmap(env, io, page);
652                                 cl_page_discard(env, io, page);
653                                 ++count;
654                         }
655                         cl_page_disown(env, io, page);
656                 }
657                 cl_page_put(env, page);
658                 pvec[i] = NULL;
659         }
660         return max_index - count;
661 }
662
663 /**
664  * Drop @target of pages from LRU at most.
665  */
666 int osc_lru_shrink(struct client_obd *cli, int target)
667 {
668         struct cl_env_nest nest;
669         struct lu_env *env;
670         struct cl_io *io;
671         struct cl_object *clobj = NULL;
672         struct cl_page **pvec;
673         struct osc_page *opg;
674         int maxscan = 0;
675         int count = 0;
676         int index = 0;
677         int rc = 0;
678         ENTRY;
679
680         LASSERT(cfs_atomic_read(&cli->cl_lru_in_list) >= 0);
681         if (cfs_atomic_read(&cli->cl_lru_in_list) == 0 || target <= 0)
682                 RETURN(0);
683
684         env = cl_env_nested_get(&nest);
685         if (IS_ERR(env))
686                 RETURN(PTR_ERR(env));
687
688         pvec = osc_env_info(env)->oti_pvec;
689         io = &osc_env_info(env)->oti_io;
690
691         client_obd_list_lock(&cli->cl_lru_list_lock);
692         cfs_atomic_inc(&cli->cl_lru_shrinkers);
693         maxscan = min(target << 1, cfs_atomic_read(&cli->cl_lru_in_list));
694         while (!cfs_list_empty(&cli->cl_lru_list)) {
695                 struct cl_page *page;
696
697                 if (--maxscan < 0)
698                         break;
699
700                 opg = cfs_list_entry(cli->cl_lru_list.next, struct osc_page,
701                                      ops_lru);
702                 page = cl_page_top(opg->ops_cl.cpl_page);
703                 if (cl_page_in_use_noref(page)) {
704                         cfs_list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
705                         continue;
706                 }
707
708                 LASSERT(page->cp_obj != NULL);
709                 if (clobj != page->cp_obj) {
710                         struct cl_object *tmp = page->cp_obj;
711
712                         cl_object_get(tmp);
713                         client_obd_list_unlock(&cli->cl_lru_list_lock);
714
715                         if (clobj != NULL) {
716                                 count -= discard_pagevec(env, io, pvec, index);
717                                 index = 0;
718
719                                 cl_io_fini(env, io);
720                                 cl_object_put(env, clobj);
721                                 clobj = NULL;
722                         }
723
724                         clobj = tmp;
725                         io->ci_obj = clobj;
726                         rc = cl_io_init(env, io, CIT_MISC, clobj);
727                         if (rc != 0)
728                                 break;
729
730                         ++maxscan;
731                         client_obd_list_lock(&cli->cl_lru_list_lock);
732                         continue;
733                 }
734
735                 /* move this page to the end of list as it will be discarded
736                  * soon. The page will be finally removed from LRU list in
737                  * osc_page_delete().  */
738                 cfs_list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
739
740                 /* it's okay to grab a refcount here w/o holding lock because
741                  * it has to grab cl_lru_list_lock to delete the page. */
742                 cl_page_get(page);
743                 pvec[index++] = page;
744                 if (++count >= target)
745                         break;
746
747                 if (unlikely(index == OTI_PVEC_SIZE)) {
748                         client_obd_list_unlock(&cli->cl_lru_list_lock);
749                         count -= discard_pagevec(env, io, pvec, index);
750                         index = 0;
751
752                         client_obd_list_lock(&cli->cl_lru_list_lock);
753                 }
754         }
755         client_obd_list_unlock(&cli->cl_lru_list_lock);
756
757         if (clobj != NULL) {
758                 count -= discard_pagevec(env, io, pvec, index);
759
760                 cl_io_fini(env, io);
761                 cl_object_put(env, clobj);
762         }
763         cl_env_nested_put(&nest, env);
764
765         cfs_atomic_dec(&cli->cl_lru_shrinkers);
766         RETURN(count > 0 ? count : rc);
767 }
768
769 static void osc_lru_add(struct client_obd *cli, struct osc_page *opg)
770 {
771         bool wakeup = false;
772
773         if (!opg->ops_in_lru)
774                 return;
775
776         cfs_atomic_dec(&cli->cl_lru_busy);
777         client_obd_list_lock(&cli->cl_lru_list_lock);
778         if (cfs_list_empty(&opg->ops_lru)) {
779                 cfs_list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
780                 cfs_atomic_inc_return(&cli->cl_lru_in_list);
781                 wakeup = cfs_atomic_read(&osc_lru_waiters) > 0;
782         }
783         client_obd_list_unlock(&cli->cl_lru_list_lock);
784
785         if (wakeup)
786                 cfs_waitq_broadcast(&osc_lru_waitq);
787 }
788
789 /* delete page from LRUlist. The page can be deleted from LRUlist for two
790  * reasons: redirtied or deleted from page cache. */
791 static void osc_lru_del(struct client_obd *cli, struct osc_page *opg, bool del)
792 {
793         if (opg->ops_in_lru) {
794                 client_obd_list_lock(&cli->cl_lru_list_lock);
795                 if (!cfs_list_empty(&opg->ops_lru)) {
796                         LASSERT(cfs_atomic_read(&cli->cl_lru_in_list) > 0);
797                         cfs_list_del_init(&opg->ops_lru);
798                         cfs_atomic_dec(&cli->cl_lru_in_list);
799                         if (!del)
800                                 cfs_atomic_inc(&cli->cl_lru_busy);
801                 } else if (del) {
802                         LASSERT(cfs_atomic_read(&cli->cl_lru_busy) > 0);
803                         cfs_atomic_dec(&cli->cl_lru_busy);
804                 }
805                 client_obd_list_unlock(&cli->cl_lru_list_lock);
806                 if (del) {
807                         cfs_atomic_inc(cli->cl_lru_left);
808                         /* this is a great place to release more LRU pages if
809                          * this osc occupies too many LRU pages and kernel is
810                          * stealing one of them.
811                          * cl_lru_shrinkers is to avoid recursive call in case
812                          * we're already in the context of osc_lru_shrink(). */
813                         if (cfs_atomic_read(&cli->cl_lru_shrinkers) == 0)
814                                 osc_lru_shrink(cli, osc_cache_too_much(cli));
815                         cfs_waitq_signal(&osc_lru_waitq);
816                 }
817         } else {
818                 LASSERT(cfs_list_empty(&opg->ops_lru));
819         }
820 }
821
822 static int osc_lru_reclaim(struct client_obd *cli)
823 {
824         struct cl_client_cache *cache = cli->cl_cache;
825         struct client_obd *victim;
826         struct client_obd *tmp;
827         int rc;
828
829         LASSERT(cache != NULL);
830         LASSERT(!cfs_list_empty(&cache->ccc_lru));
831
832         rc = osc_lru_shrink(cli, lru_shrink_min);
833         if (rc > 0) {
834                 CDEBUG(D_CACHE, "%s: Free %d pages from own LRU: %p.\n",
835                         cli->cl_import->imp_obd->obd_name, rc, cli);
836                 return rc;
837         }
838
839         CDEBUG(D_CACHE, "%s: cli %p no free slots, pages: %d, busy: %d.\n",
840                 cli->cl_import->imp_obd->obd_name, cli,
841                 cfs_atomic_read(&cli->cl_lru_in_list),
842                 cfs_atomic_read(&cli->cl_lru_busy));
843
844         /* Reclaim LRU slots from other client_obd as it can't free enough
845          * from its own. This should rarely happen. */
846         spin_lock(&cache->ccc_lru_lock);
847         cache->ccc_lru_shrinkers++;
848         cfs_list_move_tail(&cli->cl_lru_osc, &cache->ccc_lru);
849         cfs_list_for_each_entry_safe(victim, tmp, &cache->ccc_lru, cl_lru_osc) {
850                 if (victim == cli)
851                         break;
852
853                 CDEBUG(D_CACHE, "%s: cli %p LRU pages: %d, busy: %d.\n",
854                         victim->cl_import->imp_obd->obd_name, victim,
855                         cfs_atomic_read(&victim->cl_lru_in_list),
856                         cfs_atomic_read(&victim->cl_lru_busy));
857
858                 cfs_list_move_tail(&victim->cl_lru_osc, &cache->ccc_lru);
859                 if (cfs_atomic_read(&victim->cl_lru_in_list) > 0)
860                         break;
861         }
862         spin_unlock(&cache->ccc_lru_lock);
863         if (victim == cli) {
864                 CDEBUG(D_CACHE, "%s: can't get any free LRU slots.\n",
865                         cli->cl_import->imp_obd->obd_name);
866                 return 0;
867         }
868
869         rc = osc_lru_shrink(victim,
870                             min(cfs_atomic_read(&victim->cl_lru_in_list) >> 1,
871                                 lru_shrink_max));
872
873         CDEBUG(D_CACHE, "%s: Free %d pages from other cli: %p.\n",
874                 cli->cl_import->imp_obd->obd_name, rc, victim);
875
876         return rc;
877 }
878
879 static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
880                            struct osc_page *opg)
881 {
882         struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
883         struct client_obd *cli = osc_cli(obj);
884         int rc = 0;
885         ENTRY;
886
887         if (cli->cl_cache == NULL) /* shall not be in LRU */
888                 RETURN(0);
889
890         LASSERT(cfs_atomic_read(cli->cl_lru_left) >= 0);
891         while (!cfs_atomic_add_unless(cli->cl_lru_left, -1, 0)) {
892                 int gen;
893
894                 /* run out of LRU spaces, try to drop some by itself */
895                 rc = osc_lru_reclaim(cli);
896                 if (rc < 0)
897                         break;
898                 if (rc > 0)
899                         continue;
900
901                 cfs_cond_resched();
902
903                 /* slowest case, all of caching pages are busy, notifying
904                  * other OSCs that we're lack of LRU slots. */
905                 cfs_atomic_inc(&osc_lru_waiters);
906
907                 gen = cfs_atomic_read(&cli->cl_lru_in_list);
908                 rc = l_wait_event(osc_lru_waitq,
909                                 cfs_atomic_read(cli->cl_lru_left) > 0 ||
910                                 (cfs_atomic_read(&cli->cl_lru_in_list) > 0 &&
911                                  gen != cfs_atomic_read(&cli->cl_lru_in_list)),
912                                 &lwi);
913
914                 cfs_atomic_dec(&osc_lru_waiters);
915                 if (rc < 0)
916                         break;
917         }
918
919         if (rc >= 0) {
920                 cfs_atomic_inc(&cli->cl_lru_busy);
921                 opg->ops_in_lru = 1;
922                 rc = 0;
923         }
924
925         RETURN(rc);
926 }
927
928 /** @} osc */