Whamcloud - gitweb
bc234e2dda674a14a5d6422d10dea9c20f99fa62
[fs/lustre-release.git] / lustre / osc / osc_page.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, Whamcloud, Inc.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * Implementation of cl_page for OSC layer.
37  *
38  *   Author: Nikita Danilov <nikita.danilov@sun.com>
39  */
40
41 #define DEBUG_SUBSYSTEM S_OSC
42
43 #include "osc_cl_internal.h"
44
45 /** \addtogroup osc 
46  *  @{ 
47  */
48
49 /* 
50  * Comment out osc_page_protected because it may sleep inside the
51  * the client_obd_list_lock.
52  * client_obd_list_lock -> osc_ap_completion -> osc_completion ->
53  *   -> osc_page_protected -> osc_page_is_dlocked -> osc_match_base
54  *   -> ldlm_lock_match -> sptlrpc_import_check_ctx -> sleep.
55  */
56 #if 0
57 static int osc_page_is_dlocked(const struct lu_env *env,
58                                const struct osc_page *opg,
59                                enum cl_lock_mode mode, int pending, int unref)
60 {
61         struct cl_page         *page;
62         struct osc_object      *obj;
63         struct osc_thread_info *info;
64         struct ldlm_res_id     *resname;
65         struct lustre_handle   *lockh;
66         ldlm_policy_data_t     *policy;
67         ldlm_mode_t             dlmmode;
68         int                     flags;
69
70         cfs_might_sleep();
71
72         info = osc_env_info(env);
73         resname = &info->oti_resname;
74         policy = &info->oti_policy;
75         lockh = &info->oti_handle;
76         page = opg->ops_cl.cpl_page;
77         obj = cl2osc(opg->ops_cl.cpl_obj);
78
79         flags = LDLM_FL_TEST_LOCK | LDLM_FL_BLOCK_GRANTED;
80         if (pending)
81                 flags |= LDLM_FL_CBPENDING;
82
83         dlmmode = osc_cl_lock2ldlm(mode) | LCK_PW;
84         osc_lock_build_res(env, obj, resname);
85         osc_index2policy(policy, page->cp_obj, page->cp_index, page->cp_index);
86         return osc_match_base(osc_export(obj), resname, LDLM_EXTENT, policy,
87                               dlmmode, &flags, NULL, lockh, unref);
88 }
89
90 /**
91  * Checks an invariant that a page in the cache is covered by a lock, as
92  * needed.
93  */
94 static int osc_page_protected(const struct lu_env *env,
95                               const struct osc_page *opg,
96                               enum cl_lock_mode mode, int unref)
97 {
98         struct cl_object_header *hdr;
99         struct cl_lock          *scan;
100         struct cl_page          *page;
101         struct cl_lock_descr    *descr;
102         int result;
103
104         LINVRNT(!opg->ops_temp);
105
106         page = opg->ops_cl.cpl_page;
107         if (page->cp_owner != NULL &&
108             cl_io_top(page->cp_owner)->ci_lockreq == CILR_NEVER)
109                 /*
110                  * If IO is done without locks (liblustre, or lloop), lock is
111                  * not required.
112                  */
113                 result = 1;
114         else
115                 /* otherwise check for a DLM lock */
116         result = osc_page_is_dlocked(env, opg, mode, 1, unref);
117         if (result == 0) {
118                 /* maybe this page is a part of a lockless io? */
119                 hdr = cl_object_header(opg->ops_cl.cpl_obj);
120                 descr = &osc_env_info(env)->oti_descr;
121                 descr->cld_mode = mode;
122                 descr->cld_start = page->cp_index;
123                 descr->cld_end   = page->cp_index;
124                 cfs_spin_lock(&hdr->coh_lock_guard);
125                 cfs_list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) {
126                         /*
127                          * Lock-less sub-lock has to be either in HELD state
128                          * (when io is actively going on), or in CACHED state,
129                          * when top-lock is being unlocked:
130                          * cl_io_unlock()->cl_unuse()->...->lov_lock_unuse().
131                          */
132                         if ((scan->cll_state == CLS_HELD ||
133                              scan->cll_state == CLS_CACHED) &&
134                             cl_lock_ext_match(&scan->cll_descr, descr)) {
135                                 struct osc_lock *olck;
136
137                                 olck = osc_lock_at(scan);
138                                 result = osc_lock_is_lockless(olck);
139                                 break;
140                         }
141                 }
142                 cfs_spin_unlock(&hdr->coh_lock_guard);
143         }
144         return result;
145 }
146 #else
147 static int osc_page_protected(const struct lu_env *env,
148                               const struct osc_page *opg,
149                               enum cl_lock_mode mode, int unref)
150 {
151         return 1;
152 }
153 #endif
154
155 /*****************************************************************************
156  *
157  * Page operations.
158  *
159  */
160 static void osc_page_fini(const struct lu_env *env,
161                           struct cl_page_slice *slice)
162 {
163         struct osc_page *opg = cl2osc_page(slice);
164         CDEBUG(D_TRACE, "%p\n", opg);
165         LASSERT(opg->ops_lock == NULL);
166         OBD_SLAB_FREE_PTR(opg, osc_page_kmem);
167 }
168
169 static void osc_page_transfer_get(struct osc_page *opg, const char *label)
170 {
171         struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page);
172
173         LASSERT(!opg->ops_transfer_pinned);
174         cl_page_get(page);
175         lu_ref_add_atomic(&page->cp_reference, label, page);
176         opg->ops_transfer_pinned = 1;
177 }
178
179 static void osc_page_transfer_put(const struct lu_env *env,
180                                   struct osc_page *opg)
181 {
182         struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page);
183
184         if (opg->ops_transfer_pinned) {
185                 lu_ref_del(&page->cp_reference, "transfer", page);
186                 opg->ops_transfer_pinned = 0;
187                 cl_page_put(env, page);
188         }
189 }
190
191 /**
192  * This is called once for every page when it is submitted for a transfer
193  * either opportunistic (osc_page_cache_add()), or immediate
194  * (osc_page_submit()).
195  */
196 static void osc_page_transfer_add(const struct lu_env *env,
197                                   struct osc_page *opg, enum cl_req_type crt)
198 {
199         struct osc_object *obj;
200
201         obj = cl2osc(opg->ops_cl.cpl_obj);
202         cfs_spin_lock(&obj->oo_seatbelt);
203         cfs_list_add(&opg->ops_inflight, &obj->oo_inflight[crt]);
204         opg->ops_submitter = cfs_current();
205         cfs_spin_unlock(&obj->oo_seatbelt);
206 }
207
208 static int osc_page_cache_add(const struct lu_env *env,
209                               const struct cl_page_slice *slice,
210                               struct cl_io *io)
211 {
212         struct osc_io   *oio = osc_env_io(env);
213         struct osc_page *opg = cl2osc_page(slice);
214         int result;
215         ENTRY;
216
217         LINVRNT(osc_page_protected(env, opg, CLM_WRITE, 0));
218
219         osc_page_transfer_get(opg, "transfer\0cache");
220         result = osc_queue_async_io(env, io, opg);
221         if (result != 0)
222                 osc_page_transfer_put(env, opg);
223         else
224                 osc_page_transfer_add(env, opg, CRT_WRITE);
225
226         /* for sync write, kernel will wait for this page to be flushed before
227          * osc_io_end() is called, so release it earlier.
228          * for mkwrite(), it's known there is no further pages. */
229         if (cl_io_is_sync_write(io) || cl_io_is_mkwrite(io)) {
230                 if (oio->oi_active != NULL) {
231                         osc_extent_release(env, oio->oi_active);
232                         oio->oi_active = NULL;
233                 }
234         }
235
236         RETURN(result);
237 }
238
239 void osc_index2policy(ldlm_policy_data_t *policy, const struct cl_object *obj,
240                       pgoff_t start, pgoff_t end)
241 {
242         memset(policy, 0, sizeof *policy);
243         policy->l_extent.start = cl_offset(obj, start);
244         policy->l_extent.end   = cl_offset(obj, end + 1) - 1;
245 }
246
247 static int osc_page_addref_lock(const struct lu_env *env,
248                                 struct osc_page *opg,
249                                 struct cl_lock *lock)
250 {
251         struct osc_lock *olock;
252         int              rc;
253
254         LASSERT(opg->ops_lock == NULL);
255
256         olock = osc_lock_at(lock);
257         if (cfs_atomic_inc_return(&olock->ols_pageref) <= 0) {
258                 cfs_atomic_dec(&olock->ols_pageref);
259                 cl_lock_put(env, lock);
260                 rc = 1;
261         } else {
262                 opg->ops_lock = lock;
263                 rc = 0;
264         }
265         return rc;
266 }
267
268 static void osc_page_putref_lock(const struct lu_env *env,
269                                  struct osc_page *opg)
270 {
271         struct cl_lock  *lock = opg->ops_lock;
272         struct osc_lock *olock;
273
274         LASSERT(lock != NULL);
275         olock = osc_lock_at(lock);
276
277         cfs_atomic_dec(&olock->ols_pageref);
278         opg->ops_lock = NULL;
279
280         cl_lock_put(env, lock);
281 }
282
283 static int osc_page_is_under_lock(const struct lu_env *env,
284                                   const struct cl_page_slice *slice,
285                                   struct cl_io *unused)
286 {
287         struct cl_lock *lock;
288         int             result;
289
290         ENTRY;
291         lock = cl_lock_at_page(env, slice->cpl_obj, slice->cpl_page,
292                                NULL, 1, 0);
293         if (lock != NULL &&
294             osc_page_addref_lock(env, cl2osc_page(slice), lock) == 0)
295                 result = -EBUSY;
296         else
297                 result = -ENODATA;
298         RETURN(result);
299 }
300
301 static void osc_page_disown(const struct lu_env *env,
302                             const struct cl_page_slice *slice,
303                             struct cl_io *io)
304 {
305         struct osc_page *opg = cl2osc_page(slice);
306
307         if (unlikely(opg->ops_lock))
308                 osc_page_putref_lock(env, opg);
309 }
310
311 static void osc_page_completion_read(const struct lu_env *env,
312                                      const struct cl_page_slice *slice,
313                                      int ioret)
314 {
315         struct osc_page *opg = cl2osc_page(slice);
316
317         if (likely(opg->ops_lock))
318                 osc_page_putref_lock(env, opg);
319 }
320
321 static int osc_page_fail(const struct lu_env *env,
322                          const struct cl_page_slice *slice,
323                          struct cl_io *unused)
324 {
325         /*
326          * Cached read?
327          */
328         LBUG();
329         return 0;
330 }
331
332
333 static const char *osc_list(cfs_list_t *head)
334 {
335         return cfs_list_empty(head) ? "-" : "+";
336 }
337
338 static inline cfs_time_t osc_submit_duration(struct osc_page *opg)
339 {
340         if (opg->ops_submit_time == 0)
341                 return 0;
342
343         return (cfs_time_current() - opg->ops_submit_time);
344 }
345
346 static int osc_page_print(const struct lu_env *env,
347                           const struct cl_page_slice *slice,
348                           void *cookie, lu_printer_t printer)
349 {
350         struct osc_page       *opg = cl2osc_page(slice);
351         struct osc_async_page *oap = &opg->ops_oap;
352         struct osc_object     *obj = cl2osc(slice->cpl_obj);
353         struct client_obd     *cli = &osc_export(obj)->exp_obd->u.cli;
354
355         return (*printer)(env, cookie, LUSTRE_OSC_NAME"-page@%p: "
356                           "1< %#x %d %u %s %s > "
357                           "2< "LPU64" %u %u %#x %#x | %p %p %p > "
358                           "3< %s %p %d %lu %d > "
359                           "4< %d %d %d %lu %s | %s %s %s %s > "
360                           "5< %s %s %s %s | %d %s | %d %s %s>\n",
361                           opg,
362                           /* 1 */
363                           oap->oap_magic, oap->oap_cmd,
364                           oap->oap_interrupted,
365                           osc_list(&oap->oap_pending_item),
366                           osc_list(&oap->oap_rpc_item),
367                           /* 2 */
368                           oap->oap_obj_off, oap->oap_page_off, oap->oap_count,
369                           oap->oap_async_flags, oap->oap_brw_flags,
370                           oap->oap_request, oap->oap_cli, obj,
371                           /* 3 */
372                           osc_list(&opg->ops_inflight),
373                           opg->ops_submitter, opg->ops_transfer_pinned,
374                           osc_submit_duration(opg), opg->ops_srvlock,
375                           /* 4 */
376                           cli->cl_r_in_flight, cli->cl_w_in_flight,
377                           cli->cl_max_rpcs_in_flight,
378                           cli->cl_avail_grant,
379                           osc_list(&cli->cl_cache_waiters),
380                           osc_list(&cli->cl_loi_ready_list),
381                           osc_list(&cli->cl_loi_hp_ready_list),
382                           osc_list(&cli->cl_loi_write_list),
383                           osc_list(&cli->cl_loi_read_list),
384                           /* 5 */
385                           osc_list(&obj->oo_ready_item),
386                           osc_list(&obj->oo_hp_ready_item),
387                           osc_list(&obj->oo_write_item),
388                           osc_list(&obj->oo_read_item),
389                           cfs_atomic_read(&obj->oo_nr_reads),
390                           osc_list(&obj->oo_reading_exts),
391                           cfs_atomic_read(&obj->oo_nr_writes),
392                           osc_list(&obj->oo_hp_exts),
393                           osc_list(&obj->oo_urgent_exts));
394 }
395
396 static void osc_page_delete(const struct lu_env *env,
397                             const struct cl_page_slice *slice)
398 {
399         struct osc_page   *opg = cl2osc_page(slice);
400         struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
401         int rc;
402
403         LINVRNT(opg->ops_temp || osc_page_protected(env, opg, CLM_READ, 1));
404
405         ENTRY;
406         CDEBUG(D_TRACE, "%p\n", opg);
407         osc_page_transfer_put(env, opg);
408         rc = osc_teardown_async_page(env, obj, opg);
409         if (rc) {
410                 CL_PAGE_DEBUG(D_ERROR, env, cl_page_top(slice->cpl_page),
411                               "Trying to teardown failed: %d\n", rc);
412                 LASSERT(0);
413         }
414         cfs_spin_lock(&obj->oo_seatbelt);
415         cfs_list_del_init(&opg->ops_inflight);
416         cfs_spin_unlock(&obj->oo_seatbelt);
417         EXIT;
418 }
419
420 void osc_page_clip(const struct lu_env *env, const struct cl_page_slice *slice,
421                    int from, int to)
422 {
423         struct osc_page       *opg = cl2osc_page(slice);
424         struct osc_async_page *oap = &opg->ops_oap;
425
426         LINVRNT(osc_page_protected(env, opg, CLM_READ, 0));
427
428         opg->ops_from = from;
429         opg->ops_to   = to;
430         cfs_spin_lock(&oap->oap_lock);
431         oap->oap_async_flags |= ASYNC_COUNT_STABLE;
432         cfs_spin_unlock(&oap->oap_lock);
433 }
434
435 static int osc_page_cancel(const struct lu_env *env,
436                            const struct cl_page_slice *slice)
437 {
438         struct osc_page *opg = cl2osc_page(slice);
439         int rc = 0;
440
441         LINVRNT(osc_page_protected(env, opg, CLM_READ, 0));
442
443         /* Check if the transferring against this page
444          * is completed, or not even queued. */
445         if (opg->ops_transfer_pinned)
446                 /* FIXME: may not be interrupted.. */
447                 rc = osc_cancel_async_page(env, opg);
448         LASSERT(ergo(rc == 0, opg->ops_transfer_pinned == 0));
449         return rc;
450 }
451
452 static int osc_page_flush(const struct lu_env *env,
453                           const struct cl_page_slice *slice,
454                           struct cl_io *io)
455 {
456         struct osc_page *opg = cl2osc_page(slice);
457         int rc = 0;
458         ENTRY;
459         rc = osc_flush_async_page(env, io, opg);
460         RETURN(rc);
461 }
462
463 static const struct cl_page_operations osc_page_ops = {
464         .cpo_fini          = osc_page_fini,
465         .cpo_print         = osc_page_print,
466         .cpo_delete        = osc_page_delete,
467         .cpo_is_under_lock = osc_page_is_under_lock,
468         .cpo_disown        = osc_page_disown,
469         .io = {
470                 [CRT_READ] = {
471                         .cpo_cache_add  = osc_page_fail,
472                         .cpo_completion = osc_page_completion_read
473                 },
474                 [CRT_WRITE] = {
475                         .cpo_cache_add  = osc_page_cache_add
476                 }
477         },
478         .cpo_clip           = osc_page_clip,
479         .cpo_cancel         = osc_page_cancel,
480         .cpo_flush          = osc_page_flush
481 };
482
483 struct cl_page *osc_page_init(const struct lu_env *env,
484                               struct cl_object *obj,
485                               struct cl_page *page, cfs_page_t *vmpage)
486 {
487         struct osc_object *osc = cl2osc(obj);
488         struct osc_page   *opg;
489         int result;
490
491         OBD_SLAB_ALLOC_PTR_GFP(opg, osc_page_kmem, CFS_ALLOC_IO);
492         if (opg != NULL) {
493                 opg->ops_from = 0;
494                 opg->ops_to   = CFS_PAGE_SIZE;
495
496                 result = osc_prep_async_page(osc, opg, vmpage,
497                                              cl_offset(obj, page->cp_index));
498                 if (result == 0) {
499                         struct osc_io *oio = osc_env_io(env);
500                         opg->ops_srvlock = osc_io_srvlock(oio);
501                         cl_page_slice_add(page, &opg->ops_cl, obj,
502                                           &osc_page_ops);
503                 }
504                 /*
505                  * Cannot assert osc_page_protected() here as read-ahead
506                  * creates temporary pages outside of a lock.
507                  */
508 #ifdef INVARIANT_CHECK
509                 opg->ops_temp = !osc_page_protected(env, opg, CLM_READ, 1);
510 #endif
511                 CFS_INIT_LIST_HEAD(&opg->ops_inflight);
512         } else
513                 result = -ENOMEM;
514         return ERR_PTR(result);
515 }
516
517 /**
518  * Helper function called by osc_io_submit() for every page in an immediate
519  * transfer (i.e., transferred synchronously).
520  */
521 void osc_page_submit(const struct lu_env *env, struct osc_page *opg,
522                      enum cl_req_type crt, int brw_flags)
523 {
524         struct osc_async_page *oap = &opg->ops_oap;
525         struct osc_object     *obj = oap->oap_obj;
526
527         LINVRNT(osc_page_protected(env, opg,
528                                    crt == CRT_WRITE ? CLM_WRITE : CLM_READ, 1));
529
530         LASSERTF(oap->oap_magic == OAP_MAGIC, "Bad oap magic: oap %p, "
531                  "magic 0x%x\n", oap, oap->oap_magic);
532         LASSERT(oap->oap_async_flags & ASYNC_READY);
533         LASSERT(oap->oap_async_flags & ASYNC_COUNT_STABLE);
534
535         oap->oap_cmd       = crt == CRT_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ;
536         oap->oap_page_off  = opg->ops_from;
537         oap->oap_count     = opg->ops_to - opg->ops_from;
538         oap->oap_brw_flags = OBD_BRW_SYNC | brw_flags;
539
540         if (!client_is_remote(osc_export(obj)) &&
541                         cfs_capable(CFS_CAP_SYS_RESOURCE)) {
542                 oap->oap_brw_flags |= OBD_BRW_NOQUOTA;
543                 oap->oap_cmd |= OBD_BRW_NOQUOTA;
544         }
545
546         opg->ops_submit_time = cfs_time_current();
547         osc_page_transfer_get(opg, "transfer\0imm");
548         osc_page_transfer_add(env, opg, crt);
549 }
550
551 /** @} osc */