Whamcloud - gitweb
b=20211
[fs/lustre-release.git] / lustre / osc / osc_page.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * Implementation of cl_page for OSC layer.
37  *
38  *   Author: Nikita Danilov <nikita.danilov@sun.com>
39  */
40
41 /** \addtogroup osc osc @{ */
42
43 #define DEBUG_SUBSYSTEM S_OSC
44
45 #include "osc_cl_internal.h"
46
47 /* 
48  * Comment out osc_page_protected because it may sleep inside the
49  * the client_obd_list_lock.
50  * client_obd_list_lock -> osc_ap_completion -> osc_completion ->
51  *   -> osc_page_protected -> osc_page_is_dlocked -> osc_match_base
52  *   -> ldlm_lock_match -> sptlrpc_import_check_ctx -> sleep.
53  */
54 #if 0
55 static int osc_page_is_dlocked(const struct lu_env *env,
56                                const struct osc_page *opg,
57                                enum cl_lock_mode mode, int pending, int unref)
58 {
59         struct cl_page         *page;
60         struct osc_object      *obj;
61         struct osc_thread_info *info;
62         struct ldlm_res_id     *resname;
63         struct lustre_handle   *lockh;
64         ldlm_policy_data_t     *policy;
65         ldlm_mode_t             dlmmode;
66         int                     flags;
67
68         might_sleep();
69
70         info = osc_env_info(env);
71         resname = &info->oti_resname;
72         policy = &info->oti_policy;
73         lockh = &info->oti_handle;
74         page = opg->ops_cl.cpl_page;
75         obj = cl2osc(opg->ops_cl.cpl_obj);
76
77         flags = LDLM_FL_TEST_LOCK | LDLM_FL_BLOCK_GRANTED;
78         if (pending)
79                 flags |= LDLM_FL_CBPENDING;
80
81         dlmmode = osc_cl_lock2ldlm(mode) | LCK_PW;
82         osc_lock_build_res(env, obj, resname);
83         osc_index2policy(policy, page->cp_obj, page->cp_index, page->cp_index);
84         return osc_match_base(osc_export(obj), resname, LDLM_EXTENT, policy,
85                               dlmmode, &flags, NULL, lockh, unref);
86 }
87
88 /**
89  * Checks an invariant that a page in the cache is covered by a lock, as
90  * needed.
91  */
92 static int osc_page_protected(const struct lu_env *env,
93                               const struct osc_page *opg,
94                               enum cl_lock_mode mode, int unref)
95 {
96         struct cl_object_header *hdr;
97         struct cl_lock          *scan;
98         struct cl_page          *page;
99         struct cl_lock_descr    *descr;
100         int result;
101
102         LINVRNT(!opg->ops_temp);
103
104         page = opg->ops_cl.cpl_page;
105         if (page->cp_owner != NULL &&
106             cl_io_top(page->cp_owner)->ci_lockreq == CILR_NEVER)
107                 /*
108                  * If IO is done without locks (liblustre, or lloop), lock is
109                  * not required.
110                  */
111                 result = 1;
112         else
113                 /* otherwise check for a DLM lock */
114         result = osc_page_is_dlocked(env, opg, mode, 1, unref);
115         if (result == 0) {
116                 /* maybe this page is a part of a lockless io? */
117                 hdr = cl_object_header(opg->ops_cl.cpl_obj);
118                 descr = &osc_env_info(env)->oti_descr;
119                 descr->cld_mode = mode;
120                 descr->cld_start = page->cp_index;
121                 descr->cld_end   = page->cp_index;
122                 spin_lock(&hdr->coh_lock_guard);
123                 list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) {
124                         /*
125                          * Lock-less sub-lock has to be either in HELD state
126                          * (when io is actively going on), or in CACHED state,
127                          * when top-lock is being unlocked:
128                          * cl_io_unlock()->cl_unuse()->...->lov_lock_unuse().
129                          */
130                         if ((scan->cll_state == CLS_HELD ||
131                              scan->cll_state == CLS_CACHED) &&
132                             cl_lock_ext_match(&scan->cll_descr, descr)) {
133                                 struct osc_lock *olck;
134
135                                 olck = osc_lock_at(scan);
136                                 result = osc_lock_is_lockless(olck);
137                                 break;
138                         }
139                 }
140                 spin_unlock(&hdr->coh_lock_guard);
141         }
142         return result;
143 }
144 #else
145 static int osc_page_protected(const struct lu_env *env,
146                               const struct osc_page *opg,
147                               enum cl_lock_mode mode, int unref)
148 {
149         return 1;
150 }
151 #endif
152
153 /*****************************************************************************
154  *
155  * Page operations.
156  *
157  */
158 static void osc_page_fini(const struct lu_env *env,
159                           struct cl_page_slice *slice)
160 {
161         struct osc_page *opg = cl2osc_page(slice);
162         CDEBUG(D_TRACE, "%p\n", opg);
163         OBD_SLAB_FREE_PTR(opg, osc_page_kmem);
164 }
165
166 static void osc_page_transfer_get(struct osc_page *opg, const char *label)
167 {
168         struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page);
169
170         LASSERT(!opg->ops_transfer_pinned);
171         cl_page_get(page);
172         lu_ref_add_atomic(&page->cp_reference, label, page);
173         opg->ops_transfer_pinned = 1;
174 }
175
176 static void osc_page_transfer_put(const struct lu_env *env,
177                                   struct osc_page *opg)
178 {
179         struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page);
180
181         if (opg->ops_transfer_pinned) {
182                 lu_ref_del(&page->cp_reference, "transfer", page);
183                 opg->ops_transfer_pinned = 0;
184                 cl_page_put(env, page);
185         }
186 }
187
188 /**
189  * This is called once for every page when it is submitted for a transfer
190  * either opportunistic (osc_page_cache_add()), or immediate
191  * (osc_page_submit()).
192  */
193 static void osc_page_transfer_add(const struct lu_env *env,
194                                   struct osc_page *opg, enum cl_req_type crt)
195 {
196         struct osc_object *obj;
197
198         LINVRNT(cl_page_is_vmlocked(env, opg->ops_cl.cpl_page));
199
200         obj = cl2osc(opg->ops_cl.cpl_obj);
201         spin_lock(&obj->oo_seatbelt);
202         list_add(&opg->ops_inflight, &obj->oo_inflight[crt]);
203         opg->ops_submitter = cfs_current();
204         spin_unlock(&obj->oo_seatbelt);
205 }
206
207 static int osc_page_cache_add(const struct lu_env *env,
208                               const struct cl_page_slice *slice,
209                               struct cl_io *unused)
210 {
211         struct osc_page   *opg = cl2osc_page(slice);
212         struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
213         struct osc_io     *oio = osc_env_io(env);
214         int result;
215         int brw_flags;
216         int noquota = 0;
217
218         LINVRNT(osc_page_protected(env, opg, CLM_WRITE, 0));
219         ENTRY;
220
221         /* Set the OBD_BRW_SRVLOCK before the page is queued. */
222         brw_flags = osc_io_srvlock(oio) ? OBD_BRW_SRVLOCK : 0;
223         if (!client_is_remote(osc_export(obj)) &&
224             cfs_capable(CFS_CAP_SYS_RESOURCE)) {
225                 brw_flags |= OBD_BRW_NOQUOTA;
226                 noquota = OBD_BRW_NOQUOTA;
227         }
228
229         osc_page_transfer_get(opg, "transfer\0cache");
230         result = osc_queue_async_io(env, osc_export(obj), NULL, obj->oo_oinfo,
231                                     &opg->ops_oap, OBD_BRW_WRITE | noquota,
232                                     0, 0, brw_flags, 0);
233         if (result != 0)
234                 osc_page_transfer_put(env, opg);
235         else
236                 osc_page_transfer_add(env, opg, CRT_WRITE);
237         RETURN(result);
238 }
239
240 void osc_index2policy(ldlm_policy_data_t *policy, const struct cl_object *obj,
241                       pgoff_t start, pgoff_t end)
242 {
243         memset(policy, 0, sizeof *policy);
244         policy->l_extent.start = cl_offset(obj, start);
245         policy->l_extent.end   = cl_offset(obj, end + 1) - 1;
246 }
247
248 static int osc_page_is_under_lock(const struct lu_env *env,
249                                   const struct cl_page_slice *slice,
250                                   struct cl_io *unused)
251 {
252         struct cl_lock *lock;
253         int             result;
254
255         ENTRY;
256         lock = cl_lock_at_page(env, slice->cpl_obj, slice->cpl_page,
257                                NULL, 1, 0);
258         if (lock != NULL) {
259                 cl_lock_put(env, lock);
260                 result = -EBUSY;
261         } else
262                 result = -ENODATA;
263         RETURN(result);
264 }
265
266 static int osc_page_fail(const struct lu_env *env,
267                          const struct cl_page_slice *slice,
268                          struct cl_io *unused)
269 {
270         /*
271          * Cached read?
272          */
273         LBUG();
274         return 0;
275 }
276
277
278 static const char *osc_list(struct list_head *head)
279 {
280         return list_empty(head) ? "-" : "+";
281 }
282
283 static inline cfs_time_t osc_submit_duration(struct osc_page *opg)
284 {
285         if (opg->ops_submit_time == 0)
286                 return 0;
287
288         return (cfs_time_current() - opg->ops_submit_time);
289 }
290
291 static int osc_page_print(const struct lu_env *env,
292                           const struct cl_page_slice *slice,
293                           void *cookie, lu_printer_t printer)
294 {
295         struct osc_page       *opg = cl2osc_page(slice);
296         struct osc_async_page *oap = &opg->ops_oap;
297
298         return (*printer)(env, cookie, LUSTRE_OSC_NAME"-page@%p: "
299                           "< %#x %d %u %s %s %s >"
300                           "< %llu %u %#x %#x %p %p %p %p %p >"
301                           "< %s %p %d %lu >\n",
302                           opg,
303                           /* 1 */
304                           oap->oap_magic, oap->oap_cmd,
305                           oap->oap_interrupted,
306                           osc_list(&oap->oap_pending_item),
307                           osc_list(&oap->oap_urgent_item),
308                           osc_list(&oap->oap_rpc_item),
309                           /* 2 */
310                           oap->oap_obj_off, oap->oap_page_off,
311                           oap->oap_async_flags, oap->oap_brw_flags,
312                           oap->oap_request,
313                           oap->oap_cli, oap->oap_loi, oap->oap_caller_ops,
314                           oap->oap_caller_data,
315                           /* 3 */
316                           osc_list(&opg->ops_inflight),
317                           opg->ops_submitter, opg->ops_transfer_pinned,
318                           osc_submit_duration(opg));
319 }
320
321 static void osc_page_delete(const struct lu_env *env,
322                             const struct cl_page_slice *slice)
323 {
324         struct osc_page       *opg = cl2osc_page(slice);
325         struct osc_object     *obj = cl2osc(opg->ops_cl.cpl_obj);
326         struct osc_async_page *oap = &opg->ops_oap;
327         int rc;
328
329         LINVRNT(opg->ops_temp || osc_page_protected(env, opg, CLM_READ, 1));
330
331         ENTRY;
332         CDEBUG(D_TRACE, "%p\n", opg);
333         osc_page_transfer_put(env, opg);
334         rc = osc_teardown_async_page(osc_export(obj), NULL, obj->oo_oinfo, oap);
335         if (rc) {
336                 CL_PAGE_DEBUG(D_ERROR, env, cl_page_top(slice->cpl_page),
337                               "Trying to teardown failed: %d\n", rc);
338                 LASSERT(0);
339         }
340         spin_lock(&obj->oo_seatbelt);
341         list_del_init(&opg->ops_inflight);
342         spin_unlock(&obj->oo_seatbelt);
343         EXIT;
344 }
345
346 void osc_page_clip(const struct lu_env *env, const struct cl_page_slice *slice,
347                    int from, int to)
348 {
349         struct osc_page       *opg = cl2osc_page(slice);
350         struct osc_async_page *oap = &opg->ops_oap;
351
352         LINVRNT(osc_page_protected(env, opg, CLM_READ, 0));
353
354         opg->ops_from = from;
355         opg->ops_to   = to;
356         spin_lock(&oap->oap_lock);
357         oap->oap_async_flags |= ASYNC_COUNT_STABLE;
358         spin_unlock(&oap->oap_lock);
359 }
360
361 static int osc_page_cancel(const struct lu_env *env,
362                            const struct cl_page_slice *slice)
363 {
364         struct osc_page *opg       = cl2osc_page(slice);
365         struct osc_async_page *oap = &opg->ops_oap;
366         int rc = 0;
367
368         LINVRNT(osc_page_protected(env, opg, CLM_READ, 0));
369
370         client_obd_list_lock(&oap->oap_cli->cl_loi_list_lock);
371         /* Check if the transferring against this page
372          * is completed, or not even queued. */
373         if (opg->ops_transfer_pinned)
374                 /* FIXME: may not be interrupted.. */
375                 rc = osc_oap_interrupted(env, oap);
376         LASSERT(ergo(rc == 0, opg->ops_transfer_pinned == 0));
377         client_obd_list_unlock(&oap->oap_cli->cl_loi_list_lock);
378         return rc;
379 }
380
381 static const struct cl_page_operations osc_page_ops = {
382         .cpo_fini          = osc_page_fini,
383         .cpo_print         = osc_page_print,
384         .cpo_delete        = osc_page_delete,
385         .cpo_is_under_lock = osc_page_is_under_lock,
386         .io = {
387                 [CRT_READ] = {
388                         .cpo_cache_add = osc_page_fail
389                 },
390                 [CRT_WRITE] = {
391                         .cpo_cache_add = osc_page_cache_add
392                 }
393         },
394         .cpo_clip           = osc_page_clip,
395         .cpo_cancel         = osc_page_cancel
396 };
397
398 static int osc_make_ready(const struct lu_env *env, void *data, int cmd)
399 {
400         struct osc_page *opg  = data;
401         struct cl_page  *page = cl_page_top(opg->ops_cl.cpl_page);
402         int result;
403
404         LASSERT(cmd == OBD_BRW_WRITE); /* no cached reads */
405         LINVRNT(osc_page_protected(env, opg, CLM_WRITE, 1));
406
407         ENTRY;
408         result = cl_page_make_ready(env, page, CRT_WRITE);
409         if (result == 0)
410                 opg->ops_submit_time = cfs_time_current();
411         RETURN(result);
412 }
413
414 static int osc_refresh_count(const struct lu_env *env, void *data, int cmd)
415 {
416         struct cl_page   *page;
417         struct osc_page  *osc = data;
418         struct cl_object *obj;
419         struct cl_attr   *attr = &osc_env_info(env)->oti_attr;
420
421         int result;
422         loff_t kms;
423
424         LINVRNT(osc_page_protected(env, osc, CLM_READ, 1));
425
426         /* readpage queues with _COUNT_STABLE, shouldn't get here. */
427         LASSERT(!(cmd & OBD_BRW_READ));
428         LASSERT(osc != NULL);
429         page = osc->ops_cl.cpl_page;
430         obj = osc->ops_cl.cpl_obj;
431
432         cl_object_attr_lock(obj);
433         result = cl_object_attr_get(env, obj, attr);
434         cl_object_attr_unlock(obj);
435         if (result < 0)
436                 return result;
437         kms = attr->cat_kms;
438         if (cl_offset(obj, page->cp_index) >= kms)
439                 /* catch race with truncate */
440                 return 0;
441         else if (cl_offset(obj, page->cp_index + 1) > kms)
442                 /* catch sub-page write at end of file */
443                 return kms % CFS_PAGE_SIZE;
444         else
445                 return CFS_PAGE_SIZE;
446 }
447
448 static int osc_completion(const struct lu_env *env,
449                           void *data, int cmd, struct obdo *oa, int rc)
450 {
451         struct osc_page       *opg  = data;
452         struct osc_async_page *oap  = &opg->ops_oap;
453         struct cl_page        *page = cl_page_top(opg->ops_cl.cpl_page);
454         struct osc_object     *obj  = cl2osc(opg->ops_cl.cpl_obj);
455         enum cl_req_type crt;
456
457         LINVRNT(osc_page_protected(env, opg, CLM_READ, 1));
458         LINVRNT(cl_page_is_vmlocked(env, page));
459
460         ENTRY;
461
462         cmd &= ~OBD_BRW_NOQUOTA;
463         LASSERT(equi(page->cp_state == CPS_PAGEIN,  cmd == OBD_BRW_READ));
464         LASSERT(equi(page->cp_state == CPS_PAGEOUT, cmd == OBD_BRW_WRITE));
465         LASSERT(opg->ops_transfer_pinned);
466
467         /*
468          * page->cp_req can be NULL if io submission failed before
469          * cl_req was allocated.
470          */
471         if (page->cp_req != NULL)
472                 cl_req_page_done(env, page);
473         LASSERT(page->cp_req == NULL);
474
475         /* As the transfer for this page is being done, clear the flags */
476         spin_lock(&oap->oap_lock);
477         oap->oap_async_flags = 0;
478         spin_unlock(&oap->oap_lock);
479
480         crt = cmd == OBD_BRW_READ ? CRT_READ : CRT_WRITE;
481         /* Clear opg->ops_transfer_pinned before VM lock is released. */
482         opg->ops_transfer_pinned = 0;
483
484         spin_lock(&obj->oo_seatbelt);
485         LASSERT(opg->ops_submitter != NULL);
486         LASSERT(!list_empty(&opg->ops_inflight));
487         list_del_init(&opg->ops_inflight);
488         spin_unlock(&obj->oo_seatbelt);
489
490         opg->ops_submit_time = 0;
491
492         cl_page_completion(env, page, crt, rc);
493
494         /* statistic */
495         if (rc == 0 && oap->oap_brw_flags & OBD_BRW_SRVLOCK) {
496                 struct lu_device *ld    = opg->ops_cl.cpl_obj->co_lu.lo_dev;
497                 struct osc_stats *stats = &lu2osc_dev(ld)->od_stats;
498                 int bytes = opg->ops_to - opg->ops_from;
499
500                 if (crt == CRT_READ)
501                         stats->os_lockless_reads += bytes;
502                 else
503                         stats->os_lockless_writes += bytes;
504         }
505
506         /*
507          * This has to be the last operation with the page, as locks are
508          * released in cl_page_completion() and nothing except for the
509          * reference counter protects page from concurrent reclaim.
510          */
511         lu_ref_del(&page->cp_reference, "transfer", page);
512         /*
513          * As page->cp_obj is pinned by a reference from page->cp_req, it is
514          * safe to call cl_page_put() without risking object destruction in a
515          * non-blocking context.
516          */
517         cl_page_put(env, page);
518         RETURN(0);
519 }
520
521 const static struct obd_async_page_ops osc_async_page_ops = {
522         .ap_make_ready    = osc_make_ready,
523         .ap_refresh_count = osc_refresh_count,
524         .ap_completion    = osc_completion
525 };
526
527 struct cl_page *osc_page_init(const struct lu_env *env,
528                               struct cl_object *obj,
529                               struct cl_page *page, cfs_page_t *vmpage)
530 {
531         struct osc_object *osc = cl2osc(obj);
532         struct osc_page   *opg;
533         int result;
534
535         OBD_SLAB_ALLOC_PTR_GFP(opg, osc_page_kmem, CFS_ALLOC_IO);
536         if (opg != NULL) {
537                 void *oap = &opg->ops_oap;
538
539                 opg->ops_from = 0;
540                 opg->ops_to   = CFS_PAGE_SIZE;
541
542                 result = osc_prep_async_page(osc_export(osc),
543                                              NULL, osc->oo_oinfo, vmpage,
544                                              cl_offset(obj, page->cp_index),
545                                              &osc_async_page_ops,
546                                              opg, (void **)&oap, 1, NULL);
547                 if (result == 0)
548                         cl_page_slice_add(page, &opg->ops_cl, obj,
549                                           &osc_page_ops);
550                 /*
551                  * Cannot assert osc_page_protected() here as read-ahead
552                  * creates temporary pages outside of a lock.
553                  */
554 #ifdef INVARIANT_CHECK
555                 opg->ops_temp = !osc_page_protected(env, opg, CLM_READ, 1);
556 #endif
557                 CFS_INIT_LIST_HEAD(&opg->ops_inflight);
558         } else
559                 result = -ENOMEM;
560         return ERR_PTR(result);
561 }
562
563 /**
564  * Helper function called by osc_io_submit() for every page in an immediate
565  * transfer (i.e., transferred synchronously).
566  */
567 void osc_io_submit_page(const struct lu_env *env,
568                         struct osc_io *oio, struct osc_page *opg,
569                         enum cl_req_type crt)
570 {
571         struct osc_async_page *oap = &opg->ops_oap;
572         struct client_obd     *cli = oap->oap_cli;
573         int flags = 0;
574
575         LINVRNT(osc_page_protected(env, opg,
576                                    crt == CRT_WRITE ? CLM_WRITE : CLM_READ, 1));
577
578         oap->oap_page_off   = opg->ops_from;
579         oap->oap_count      = opg->ops_to - opg->ops_from;
580         /* Give a hint to OST that requests are coming from kswapd - bug19529 */
581         if (libcfs_memory_pressure_get())
582                 oap->oap_brw_flags |= OBD_BRW_MEMALLOC;
583         oap->oap_brw_flags |= OBD_BRW_SYNC;
584         if (osc_io_srvlock(oio))
585                 oap->oap_brw_flags |= OBD_BRW_SRVLOCK;
586
587         oap->oap_cmd = crt == CRT_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ;
588         if (!client_is_remote(osc_export(cl2osc(opg->ops_cl.cpl_obj))) &&
589             cfs_capable(CFS_CAP_SYS_RESOURCE)) {
590                 oap->oap_brw_flags |= OBD_BRW_NOQUOTA;
591                 oap->oap_cmd |= OBD_BRW_NOQUOTA;
592         }
593
594         if (oap->oap_cmd & OBD_BRW_READ)
595                 flags = ASYNC_COUNT_STABLE;
596         else if (!(oap->oap_brw_page.flag & OBD_BRW_FROM_GRANT))
597                 osc_enter_cache_try(env, cli, oap->oap_loi, oap, 1);
598
599         spin_lock(&oap->oap_lock);
600         oap->oap_async_flags |= OSC_FLAGS | flags;
601         spin_unlock(&oap->oap_lock);
602
603         osc_oap_to_pending(oap);
604         osc_page_transfer_get(opg, "transfer\0imm");
605         osc_page_transfer_add(env, opg, crt);
606 }
607
608 /** @} osc */