Whamcloud - gitweb
b=18881
[fs/lustre-release.git] / lustre / osc / osc_page.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * Implementation of cl_page for OSC layer.
37  *
38  *   Author: Nikita Danilov <nikita.danilov@sun.com>
39  */
40
41 /** \addtogroup osc osc @{ */
42
43 #define DEBUG_SUBSYSTEM S_OSC
44
45 #include "osc_cl_internal.h"
46
47 static int osc_page_is_dlocked(const struct lu_env *env,
48                                const struct osc_page *opg,
49                                enum cl_lock_mode mode, int pending, int unref)
50 {
51         struct cl_page         *page;
52         struct osc_object      *obj;
53         struct osc_thread_info *info;
54         struct ldlm_res_id     *resname;
55         struct lustre_handle   *lockh;
56         ldlm_policy_data_t     *policy;
57         ldlm_mode_t             dlmmode;
58         int                     flags;
59
60         info = osc_env_info(env);
61         resname = &info->oti_resname;
62         policy = &info->oti_policy;
63         lockh = &info->oti_handle;
64         page = opg->ops_cl.cpl_page;
65         obj = cl2osc(opg->ops_cl.cpl_obj);
66
67         flags = LDLM_FL_TEST_LOCK | LDLM_FL_BLOCK_GRANTED;
68         if (pending)
69                 flags |= LDLM_FL_CBPENDING;
70
71         dlmmode = osc_cl_lock2ldlm(mode) | LCK_PW;
72         osc_lock_build_res(env, obj, resname);
73         osc_index2policy(policy, page->cp_obj, page->cp_index, page->cp_index);
74         return osc_match_base(osc_export(obj), resname, LDLM_EXTENT, policy,
75                               dlmmode, &flags, NULL, lockh, unref);
76 }
77
78 /**
79  * Checks an invariant that a page in the cache is covered by a lock, as
80  * needed.
81  */
82 static int osc_page_protected(const struct lu_env *env,
83                               const struct osc_page *opg,
84                               enum cl_lock_mode mode, int unref)
85 {
86         struct cl_object_header *hdr;
87         struct cl_lock          *scan;
88         struct cl_page          *page;
89         struct cl_lock_descr    *descr;
90         int result;
91
92         LINVRNT(!opg->ops_temp);
93
94         page = opg->ops_cl.cpl_page;
95         if (page->cp_owner != NULL &&
96             cl_io_top(page->cp_owner)->ci_lockreq == CILR_NEVER)
97                 /*
98                  * If IO is done without locks (liblustre, or lloop), lock is
99                  * not required.
100                  */
101                 result = 1;
102         else
103                 /* otherwise check for a DLM lock */
104         result = osc_page_is_dlocked(env, opg, mode, 1, unref);
105         if (result == 0) {
106                 /* maybe this page is a part of a lockless io? */
107                 hdr = cl_object_header(opg->ops_cl.cpl_obj);
108                 descr = &osc_env_info(env)->oti_descr;
109                 descr->cld_mode = mode;
110                 descr->cld_start = page->cp_index;
111                 descr->cld_end   = page->cp_index;
112                 spin_lock(&hdr->coh_lock_guard);
113                 list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) {
114                         /*
115                          * Lock-less sub-lock has to be either in HELD state
116                          * (when io is actively going on), or in CACHED state,
117                          * when top-lock is being unlocked:
118                          * cl_io_unlock()->cl_unuse()->...->lov_lock_unuse().
119                          */
120                         if ((scan->cll_state == CLS_HELD ||
121                              scan->cll_state == CLS_CACHED) &&
122                             cl_lock_ext_match(&scan->cll_descr, descr)) {
123                                 struct osc_lock *olck;
124
125                                 olck = osc_lock_at(scan);
126                                 result = osc_lock_is_lockless(olck);
127                                 break;
128                         }
129                 }
130                 spin_unlock(&hdr->coh_lock_guard);
131         }
132         return result;
133 }
134
135 /*****************************************************************************
136  *
137  * Page operations.
138  *
139  */
140 static void osc_page_fini(const struct lu_env *env,
141                           struct cl_page_slice *slice)
142 {
143         struct osc_page *opg = cl2osc_page(slice);
144         CDEBUG(D_TRACE, "%p\n", opg);
145         OBD_SLAB_FREE_PTR(opg, osc_page_kmem);
146 }
147
148 static void osc_page_transfer_get(struct osc_page *opg, const char *label)
149 {
150         struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page);
151
152         LASSERT(!opg->ops_transfer_pinned);
153         cl_page_get(page);
154         lu_ref_add_atomic(&page->cp_reference, label, page);
155         opg->ops_transfer_pinned = 1;
156 }
157
158 static void osc_page_transfer_put(const struct lu_env *env,
159                                   struct osc_page *opg)
160 {
161         struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page);
162
163         if (opg->ops_transfer_pinned) {
164                 lu_ref_del(&page->cp_reference, "transfer", page);
165                 opg->ops_transfer_pinned = 0;
166                 cl_page_put(env, page);
167         }
168 }
169
170 /**
171  * This is called once for every page when it is submitted for a transfer
172  * either opportunistic (osc_page_cache_add()), or immediate
173  * (osc_page_submit()).
174  */
175 static void osc_page_transfer_add(const struct lu_env *env,
176                                   struct osc_page *opg, enum cl_req_type crt)
177 {
178         struct osc_object *obj;
179
180         LINVRNT(cl_page_is_vmlocked(env, opg->ops_cl.cpl_page));
181
182         obj = cl2osc(opg->ops_cl.cpl_obj);
183         spin_lock(&obj->oo_seatbelt);
184         list_add(&opg->ops_inflight, &obj->oo_inflight[crt]);
185         opg->ops_submitter = cfs_current();
186         spin_unlock(&obj->oo_seatbelt);
187 }
188
189 static int osc_page_cache_add(const struct lu_env *env,
190                               const struct cl_page_slice *slice,
191                               struct cl_io *_)
192 {
193         struct osc_page   *opg = cl2osc_page(slice);
194         struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
195         struct osc_io     *oio = osc_env_io(env);
196         int result;
197         int brw_flags;
198         int noquota = 0;
199
200         LINVRNT(osc_page_protected(env, opg, CLM_WRITE, 0));
201         ENTRY;
202
203         /* Set the OBD_BRW_SRVLOCK before the page is queued. */
204         brw_flags = osc_io_srvlock(oio) ? OBD_BRW_SRVLOCK : 0;
205         if (!client_is_remote(osc_export(obj)) &&
206             cfs_capable(CFS_CAP_SYS_RESOURCE)) {
207                 brw_flags |= OBD_BRW_NOQUOTA;
208                 noquota = OBD_BRW_NOQUOTA;
209         }
210
211         osc_page_transfer_get(opg, "transfer\0cache");
212         result = osc_queue_async_io(env, osc_export(obj), NULL, obj->oo_oinfo,
213                                     &opg->ops_oap, OBD_BRW_WRITE | noquota,
214                                     0, 0, brw_flags, 0);
215         if (result != 0)
216                 osc_page_transfer_put(env, opg);
217         else
218                 osc_page_transfer_add(env, opg, CRT_WRITE);
219         RETURN(result);
220 }
221
222 void osc_index2policy(ldlm_policy_data_t *policy, const struct cl_object *obj,
223                       pgoff_t start, pgoff_t end)
224 {
225         memset(policy, 0, sizeof *policy);
226         policy->l_extent.start = cl_offset(obj, start);
227         policy->l_extent.end   = cl_offset(obj, end + 1) - 1;
228 }
229
230 static int osc_page_is_under_lock(const struct lu_env *env,
231                                   const struct cl_page_slice *slice,
232                                   struct cl_io *_)
233 {
234         struct cl_lock *lock;
235         int             result;
236
237         ENTRY;
238         lock = cl_lock_at_page(env, slice->cpl_obj, slice->cpl_page,
239                                NULL, 1, 0);
240         if (lock != NULL) {
241                 cl_lock_put(env, lock);
242                 result = -EBUSY;
243         } else
244                 result = -ENODATA;
245         RETURN(result);
246 }
247
248 static int osc_page_fail(const struct lu_env *env,
249                          const struct cl_page_slice *slice, struct cl_io *_)
250 {
251         /*
252          * Cached read?
253          */
254         LBUG();
255         return 0;
256 }
257
258
259 static const char *osc_list(struct list_head *head)
260 {
261         return list_empty(head) ? "-" : "+";
262 }
263
264 static int osc_page_print(const struct lu_env *env,
265                           const struct cl_page_slice *slice,
266                           void *cookie, lu_printer_t printer)
267 {
268         struct osc_page       *opg = cl2osc_page(slice);
269         struct osc_async_page *oap = &opg->ops_oap;
270
271         return (*printer)(env, cookie, LUSTRE_OSC_NAME"-page@%p: "
272                           "< %#x %d %u %s %s %s >"
273                           "< %llu %u %#x %#x %p %p %p %p %p >"
274                           "< %s %p %d >\n",
275                           opg,
276                           /* 1 */
277                           oap->oap_magic, oap->oap_cmd,
278                           oap->oap_interrupted,
279                           osc_list(&oap->oap_pending_item),
280                           osc_list(&oap->oap_urgent_item),
281                           osc_list(&oap->oap_rpc_item),
282                           /* 2 */
283                           oap->oap_obj_off, oap->oap_page_off,
284                           oap->oap_async_flags, oap->oap_brw_flags,
285                           oap->oap_request,
286                           oap->oap_cli, oap->oap_loi, oap->oap_caller_ops,
287                           oap->oap_caller_data,
288                           /* 3 */
289                           osc_list(&opg->ops_inflight),
290                           opg->ops_submitter, opg->ops_transfer_pinned);
291 }
292
293 static void osc_page_delete(const struct lu_env *env,
294                             const struct cl_page_slice *slice)
295 {
296         struct osc_page       *opg = cl2osc_page(slice);
297         struct osc_object     *obj = cl2osc(opg->ops_cl.cpl_obj);
298         struct osc_async_page *oap = &opg->ops_oap;
299         int rc;
300
301         LINVRNT(opg->ops_temp || osc_page_protected(env, opg, CLM_READ, 1));
302
303         ENTRY;
304         CDEBUG(D_TRACE, "%p\n", opg);
305         osc_page_transfer_put(env, opg);
306         rc = osc_teardown_async_page(osc_export(obj), NULL, obj->oo_oinfo, oap);
307         if (rc) {
308                 CL_PAGE_DEBUG(D_ERROR, env, cl_page_top(slice->cpl_page),
309                               "Trying to teardown failed: %d\n", rc);
310                 LASSERT(0);
311         }
312         spin_lock(&obj->oo_seatbelt);
313         list_del_init(&opg->ops_inflight);
314         spin_unlock(&obj->oo_seatbelt);
315         EXIT;
316 }
317
318 void osc_page_clip(const struct lu_env *env, const struct cl_page_slice *slice,
319                    int from, int to)
320 {
321         struct osc_page       *opg = cl2osc_page(slice);
322         struct osc_async_page *oap = &opg->ops_oap;
323
324         LINVRNT(osc_page_protected(env, opg, CLM_READ, 0));
325
326         opg->ops_from = from;
327         opg->ops_to   = to;
328         oap->oap_async_flags |= ASYNC_COUNT_STABLE;
329 }
330
331 static int osc_page_cancel(const struct lu_env *env,
332                            const struct cl_page_slice *slice)
333 {
334         struct osc_page *opg       = cl2osc_page(slice);
335         struct osc_async_page *oap = &opg->ops_oap;
336         int rc = 0;
337
338         LINVRNT(osc_page_protected(env, opg, CLM_READ, 0));
339
340         client_obd_list_lock(&oap->oap_cli->cl_loi_list_lock);
341         /* Check if the transferring against this page
342          * is completed, or not even queued. */
343         if (opg->ops_transfer_pinned)
344                 /* FIXME: may not be interrupted.. */
345                 rc = osc_oap_interrupted(env, oap);
346         LASSERT(ergo(rc == 0, opg->ops_transfer_pinned == 0));
347         client_obd_list_unlock(&oap->oap_cli->cl_loi_list_lock);
348         return rc;
349 }
350
351 static const struct cl_page_operations osc_page_ops = {
352         .cpo_fini          = osc_page_fini,
353         .cpo_print         = osc_page_print,
354         .cpo_delete        = osc_page_delete,
355         .cpo_is_under_lock = osc_page_is_under_lock,
356         .io = {
357                 [CRT_READ] = {
358                         .cpo_cache_add = osc_page_fail
359                 },
360                 [CRT_WRITE] = {
361                         .cpo_cache_add = osc_page_cache_add
362                 }
363         },
364         .cpo_clip           = osc_page_clip,
365         .cpo_cancel         = osc_page_cancel
366 };
367
368 static int osc_make_ready(const struct lu_env *env, void *data, int cmd)
369 {
370         struct osc_page *opg  = data;
371         struct cl_page  *page = cl_page_top(opg->ops_cl.cpl_page);
372         int result;
373
374         LASSERT(cmd == OBD_BRW_WRITE); /* no cached reads */
375         LINVRNT(osc_page_protected(env, opg, CLM_WRITE, 1));
376
377         ENTRY;
378         result = cl_page_make_ready(env, page, CRT_WRITE);
379         RETURN(result);
380 }
381
382 static int osc_refresh_count(const struct lu_env *env, void *data, int cmd)
383 {
384         struct cl_page   *page;
385         struct osc_page  *osc = data;
386         struct cl_object *obj;
387         struct cl_attr   *attr = &osc_env_info(env)->oti_attr;
388
389         int result;
390         loff_t kms;
391
392         LINVRNT(osc_page_protected(env, osc, CLM_READ, 1));
393
394         /* readpage queues with _COUNT_STABLE, shouldn't get here. */
395         LASSERT(!(cmd & OBD_BRW_READ));
396         LASSERT(osc != NULL);
397         page = osc->ops_cl.cpl_page;
398         obj = osc->ops_cl.cpl_obj;
399
400         cl_object_attr_lock(obj);
401         result = cl_object_attr_get(env, obj, attr);
402         cl_object_attr_unlock(obj);
403         if (result < 0)
404                 return result;
405         kms = attr->cat_kms;
406         if (cl_offset(obj, page->cp_index) >= kms)
407                 /* catch race with truncate */
408                 return 0;
409         else if (cl_offset(obj, page->cp_index + 1) > kms)
410                 /* catch sub-page write at end of file */
411                 return kms % CFS_PAGE_SIZE;
412         else
413                 return CFS_PAGE_SIZE;
414 }
415
416 static int osc_completion(const struct lu_env *env,
417                           void *data, int cmd, struct obdo *oa, int rc)
418 {
419         struct osc_page       *opg  = data;
420         struct osc_async_page *oap  = &opg->ops_oap;
421         struct cl_page        *page = cl_page_top(opg->ops_cl.cpl_page);
422         struct osc_object     *obj  = cl2osc(opg->ops_cl.cpl_obj);
423         enum cl_req_type crt;
424
425         LINVRNT(osc_page_protected(env, opg, CLM_READ, 1));
426         LINVRNT(cl_page_is_vmlocked(env, page));
427
428         ENTRY;
429
430         cmd &= ~OBD_BRW_NOQUOTA;
431         LASSERT(equi(page->cp_state == CPS_PAGEIN,  cmd == OBD_BRW_READ));
432         LASSERT(equi(page->cp_state == CPS_PAGEOUT, cmd == OBD_BRW_WRITE));
433         LASSERT(opg->ops_transfer_pinned);
434
435         /*
436          * page->cp_req can be NULL if io submission failed before
437          * cl_req was allocated.
438          */
439         if (page->cp_req != NULL)
440                 cl_req_page_done(env, page);
441         LASSERT(page->cp_req == NULL);
442
443         /* As the transfer for this page is being done, clear the flags */
444         oap->oap_async_flags = 0;
445
446         crt = cmd == OBD_BRW_READ ? CRT_READ : CRT_WRITE;
447         /* Clear opg->ops_transfer_pinned before VM lock is released. */
448         opg->ops_transfer_pinned = 0;
449
450         spin_lock(&obj->oo_seatbelt);
451         LASSERT(opg->ops_submitter != NULL);
452         LASSERT(!list_empty(&opg->ops_inflight));
453         list_del_init(&opg->ops_inflight);
454         spin_unlock(&obj->oo_seatbelt);
455
456         cl_page_completion(env, page, crt, rc);
457
458         /* statistic */
459         if (rc == 0 && oap->oap_brw_flags & OBD_BRW_SRVLOCK) {
460                 struct lu_device *ld    = opg->ops_cl.cpl_obj->co_lu.lo_dev;
461                 struct osc_stats *stats = &lu2osc_dev(ld)->od_stats;
462                 int bytes = opg->ops_to - opg->ops_from;
463
464                 if (crt == CRT_READ)
465                         stats->os_lockless_reads += bytes;
466                 else
467                         stats->os_lockless_writes += bytes;
468         }
469
470         /*
471          * This has to be the last operation with the page, as locks are
472          * released in cl_page_completion() and nothing except for the
473          * reference counter protects page from concurrent reclaim.
474          */
475         lu_ref_del(&page->cp_reference, "transfer", page);
476         /*
477          * As page->cp_obj is pinned by a reference from page->cp_req, it is
478          * safe to call cl_page_put() without risking object destruction in a
479          * non-blocking context.
480          */
481         cl_page_put(env, page);
482         RETURN(0);
483 }
484
485 const static struct obd_async_page_ops osc_async_page_ops = {
486         .ap_make_ready    = osc_make_ready,
487         .ap_refresh_count = osc_refresh_count,
488         .ap_completion    = osc_completion
489 };
490
491 struct cl_page *osc_page_init(const struct lu_env *env,
492                               struct cl_object *obj,
493                               struct cl_page *page, cfs_page_t *vmpage)
494 {
495         struct osc_object *osc = cl2osc(obj);
496         struct osc_page   *opg;
497         int result;
498
499         OBD_SLAB_ALLOC_PTR_GFP(opg, osc_page_kmem, CFS_ALLOC_IO);
500         if (opg != NULL) {
501                 void *oap = &opg->ops_oap;
502
503                 opg->ops_from = 0;
504                 opg->ops_to   = CFS_PAGE_SIZE;
505
506                 result = osc_prep_async_page(osc_export(osc),
507                                              NULL, osc->oo_oinfo, vmpage,
508                                              cl_offset(obj, page->cp_index),
509                                              &osc_async_page_ops,
510                                              opg, (void **)&oap, 1, NULL);
511                 if (result == 0)
512                         cl_page_slice_add(page, &opg->ops_cl, obj,
513                                           &osc_page_ops);
514                 /*
515                  * Cannot assert osc_page_protected() here as read-ahead
516                  * creates temporary pages outside of a lock.
517                  */
518 #ifdef INVARIANT_CHECK
519                 opg->ops_temp = !osc_page_protected(env, opg, CLM_READ, 1);
520 #endif
521                 CFS_INIT_LIST_HEAD(&opg->ops_inflight);
522         } else
523                 result = -ENOMEM;
524         return ERR_PTR(result);
525 }
526
527 /**
528  * Helper function called by osc_io_submit() for every page in an immediate
529  * transfer (i.e., transferred synchronously).
530  */
531 void osc_io_submit_page(const struct lu_env *env,
532                         struct osc_io *oio, struct osc_page *opg,
533                         enum cl_req_type crt)
534 {
535         struct osc_async_page *oap = &opg->ops_oap;
536         struct client_obd     *cli = oap->oap_cli;
537
538         LINVRNT(osc_page_protected(env, opg,
539                                    crt == CRT_WRITE ? CLM_WRITE : CLM_READ, 1));
540
541         oap->oap_page_off   = opg->ops_from;
542         oap->oap_count      = opg->ops_to - opg->ops_from;
543         oap->oap_brw_flags |= OBD_BRW_SYNC;
544         if (osc_io_srvlock(oio))
545                 oap->oap_brw_flags |= OBD_BRW_SRVLOCK;
546
547         oap->oap_cmd = crt == CRT_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ;
548         if (!client_is_remote(osc_export(cl2osc(opg->ops_cl.cpl_obj))) &&
549             cfs_capable(CFS_CAP_SYS_RESOURCE)) {
550                 oap->oap_brw_flags |= OBD_BRW_NOQUOTA;
551                 oap->oap_cmd |= OBD_BRW_NOQUOTA;
552         }
553
554         oap->oap_async_flags |= OSC_FLAGS;
555         if (oap->oap_cmd & OBD_BRW_READ)
556                 oap->oap_async_flags |= ASYNC_COUNT_STABLE;
557         else if (!(oap->oap_brw_page.flag & OBD_BRW_FROM_GRANT))
558                 osc_enter_cache_try(env, cli, oap->oap_loi, oap, 1);
559
560         osc_oap_to_pending(oap);
561         osc_page_transfer_get(opg, "transfer\0imm");
562         osc_page_transfer_add(env, opg, crt);
563 }
564
565 /** @} osc */