Whamcloud - gitweb
a767aec16c02b4196f89929caef6ddfeb91cccc6
[fs/lustre-release.git] / lustre / osc / osc_page.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
30  * Use is subject to license terms.
31  *
32  * Copyright (c) 2011, Whamcloud, Inc.
33  */
34 /*
35  * This file is part of Lustre, http://www.lustre.org/
36  * Lustre is a trademark of Sun Microsystems, Inc.
37  *
38  * Implementation of cl_page for OSC layer.
39  *
40  *   Author: Nikita Danilov <nikita.danilov@sun.com>
41  */
42
43 #define DEBUG_SUBSYSTEM S_OSC
44
45 #include "osc_cl_internal.h"
46
47 /** \addtogroup osc 
48  *  @{ 
49  */
50
51 /* 
52  * Comment out osc_page_protected because it may sleep inside the
53  * the client_obd_list_lock.
54  * client_obd_list_lock -> osc_ap_completion -> osc_completion ->
55  *   -> osc_page_protected -> osc_page_is_dlocked -> osc_match_base
56  *   -> ldlm_lock_match -> sptlrpc_import_check_ctx -> sleep.
57  */
58 #if 0
59 static int osc_page_is_dlocked(const struct lu_env *env,
60                                const struct osc_page *opg,
61                                enum cl_lock_mode mode, int pending, int unref)
62 {
63         struct cl_page         *page;
64         struct osc_object      *obj;
65         struct osc_thread_info *info;
66         struct ldlm_res_id     *resname;
67         struct lustre_handle   *lockh;
68         ldlm_policy_data_t     *policy;
69         ldlm_mode_t             dlmmode;
70         int                     flags;
71
72         cfs_might_sleep();
73
74         info = osc_env_info(env);
75         resname = &info->oti_resname;
76         policy = &info->oti_policy;
77         lockh = &info->oti_handle;
78         page = opg->ops_cl.cpl_page;
79         obj = cl2osc(opg->ops_cl.cpl_obj);
80
81         flags = LDLM_FL_TEST_LOCK | LDLM_FL_BLOCK_GRANTED;
82         if (pending)
83                 flags |= LDLM_FL_CBPENDING;
84
85         dlmmode = osc_cl_lock2ldlm(mode) | LCK_PW;
86         osc_lock_build_res(env, obj, resname);
87         osc_index2policy(policy, page->cp_obj, page->cp_index, page->cp_index);
88         return osc_match_base(osc_export(obj), resname, LDLM_EXTENT, policy,
89                               dlmmode, &flags, NULL, lockh, unref);
90 }
91
92 /**
93  * Checks an invariant that a page in the cache is covered by a lock, as
94  * needed.
95  */
96 static int osc_page_protected(const struct lu_env *env,
97                               const struct osc_page *opg,
98                               enum cl_lock_mode mode, int unref)
99 {
100         struct cl_object_header *hdr;
101         struct cl_lock          *scan;
102         struct cl_page          *page;
103         struct cl_lock_descr    *descr;
104         int result;
105
106         LINVRNT(!opg->ops_temp);
107
108         page = opg->ops_cl.cpl_page;
109         if (page->cp_owner != NULL &&
110             cl_io_top(page->cp_owner)->ci_lockreq == CILR_NEVER)
111                 /*
112                  * If IO is done without locks (liblustre, or lloop), lock is
113                  * not required.
114                  */
115                 result = 1;
116         else
117                 /* otherwise check for a DLM lock */
118         result = osc_page_is_dlocked(env, opg, mode, 1, unref);
119         if (result == 0) {
120                 /* maybe this page is a part of a lockless io? */
121                 hdr = cl_object_header(opg->ops_cl.cpl_obj);
122                 descr = &osc_env_info(env)->oti_descr;
123                 descr->cld_mode = mode;
124                 descr->cld_start = page->cp_index;
125                 descr->cld_end   = page->cp_index;
126                 cfs_spin_lock(&hdr->coh_lock_guard);
127                 cfs_list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) {
128                         /*
129                          * Lock-less sub-lock has to be either in HELD state
130                          * (when io is actively going on), or in CACHED state,
131                          * when top-lock is being unlocked:
132                          * cl_io_unlock()->cl_unuse()->...->lov_lock_unuse().
133                          */
134                         if ((scan->cll_state == CLS_HELD ||
135                              scan->cll_state == CLS_CACHED) &&
136                             cl_lock_ext_match(&scan->cll_descr, descr)) {
137                                 struct osc_lock *olck;
138
139                                 olck = osc_lock_at(scan);
140                                 result = osc_lock_is_lockless(olck);
141                                 break;
142                         }
143                 }
144                 cfs_spin_unlock(&hdr->coh_lock_guard);
145         }
146         return result;
147 }
148 #else
149 static int osc_page_protected(const struct lu_env *env,
150                               const struct osc_page *opg,
151                               enum cl_lock_mode mode, int unref)
152 {
153         return 1;
154 }
155 #endif
156
157 /*****************************************************************************
158  *
159  * Page operations.
160  *
161  */
162 static void osc_page_fini(const struct lu_env *env,
163                           struct cl_page_slice *slice)
164 {
165         struct osc_page *opg = cl2osc_page(slice);
166         CDEBUG(D_TRACE, "%p\n", opg);
167         LASSERT(opg->ops_lock == NULL);
168         OBD_SLAB_FREE_PTR(opg, osc_page_kmem);
169 }
170
171 static void osc_page_transfer_get(struct osc_page *opg, const char *label)
172 {
173         struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page);
174
175         LASSERT(!opg->ops_transfer_pinned);
176         cl_page_get(page);
177         lu_ref_add_atomic(&page->cp_reference, label, page);
178         opg->ops_transfer_pinned = 1;
179 }
180
181 static void osc_page_transfer_put(const struct lu_env *env,
182                                   struct osc_page *opg)
183 {
184         struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page);
185
186         if (opg->ops_transfer_pinned) {
187                 lu_ref_del(&page->cp_reference, "transfer", page);
188                 opg->ops_transfer_pinned = 0;
189                 cl_page_put(env, page);
190         }
191 }
192
193 /**
194  * This is called once for every page when it is submitted for a transfer
195  * either opportunistic (osc_page_cache_add()), or immediate
196  * (osc_page_submit()).
197  */
198 static void osc_page_transfer_add(const struct lu_env *env,
199                                   struct osc_page *opg, enum cl_req_type crt)
200 {
201         struct osc_object *obj;
202
203         obj = cl2osc(opg->ops_cl.cpl_obj);
204         cfs_spin_lock(&obj->oo_seatbelt);
205         cfs_list_add(&opg->ops_inflight, &obj->oo_inflight[crt]);
206         opg->ops_submitter = cfs_current();
207         cfs_spin_unlock(&obj->oo_seatbelt);
208 }
209
210 static int osc_page_cache_add(const struct lu_env *env,
211                               const struct cl_page_slice *slice,
212                               struct cl_io *unused)
213 {
214         struct osc_page   *opg = cl2osc_page(slice);
215         struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
216         int result;
217         /* All cacheable IO is async-capable */
218         int brw_flags = OBD_BRW_ASYNC;
219         int noquota = 0;
220
221         LINVRNT(osc_page_protected(env, opg, CLM_WRITE, 0));
222         ENTRY;
223
224         /* Set the OBD_BRW_SRVLOCK before the page is queued. */
225         brw_flags |= opg->ops_srvlock ? OBD_BRW_SRVLOCK : 0;
226         if (!client_is_remote(osc_export(obj)) &&
227             cfs_capable(CFS_CAP_SYS_RESOURCE)) {
228                 brw_flags |= OBD_BRW_NOQUOTA;
229                 noquota = OBD_BRW_NOQUOTA;
230         }
231
232         osc_page_transfer_get(opg, "transfer\0cache");
233         result = osc_queue_async_io(env, osc_export(obj), NULL, obj->oo_oinfo,
234                                     &opg->ops_oap, OBD_BRW_WRITE | noquota,
235                                     opg->ops_from, opg->ops_to - opg->ops_from,
236                                     brw_flags, 0);
237         if (result != 0)
238                 osc_page_transfer_put(env, opg);
239         else
240                 osc_page_transfer_add(env, opg, CRT_WRITE);
241         RETURN(result);
242 }
243
244 void osc_index2policy(ldlm_policy_data_t *policy, const struct cl_object *obj,
245                       pgoff_t start, pgoff_t end)
246 {
247         memset(policy, 0, sizeof *policy);
248         policy->l_extent.start = cl_offset(obj, start);
249         policy->l_extent.end   = cl_offset(obj, end + 1) - 1;
250 }
251
252 static int osc_page_addref_lock(const struct lu_env *env,
253                                 struct osc_page *opg,
254                                 struct cl_lock *lock)
255 {
256         struct osc_lock *olock;
257         int              rc;
258
259         LASSERT(opg->ops_lock == NULL);
260
261         olock = osc_lock_at(lock);
262         if (cfs_atomic_inc_return(&olock->ols_pageref) <= 0) {
263                 cfs_atomic_dec(&olock->ols_pageref);
264                 cl_lock_put(env, lock);
265                 rc = 1;
266         } else {
267                 opg->ops_lock = lock;
268                 rc = 0;
269         }
270         return rc;
271 }
272
273 static void osc_page_putref_lock(const struct lu_env *env,
274                                  struct osc_page *opg)
275 {
276         struct cl_lock  *lock = opg->ops_lock;
277         struct osc_lock *olock;
278
279         LASSERT(lock != NULL);
280         olock = osc_lock_at(lock);
281
282         cfs_atomic_dec(&olock->ols_pageref);
283         opg->ops_lock = NULL;
284
285         /*
286          * Note: usually this won't be the last reference of the lock, but if
287          * it is, then all the lock_put do is at most just freeing some memory,
288          * so it would be OK that caller is holding spinlocks.
289          */
290         LASSERT(cfs_atomic_read(&lock->cll_ref) > 1 || olock->ols_hold == 0);
291         cl_lock_put(env, lock);
292 }
293
294 static int osc_page_is_under_lock(const struct lu_env *env,
295                                   const struct cl_page_slice *slice,
296                                   struct cl_io *unused)
297 {
298         struct cl_lock *lock;
299         int             result;
300
301         ENTRY;
302         lock = cl_lock_at_page(env, slice->cpl_obj, slice->cpl_page,
303                                NULL, 1, 0);
304         if (lock != NULL &&
305             osc_page_addref_lock(env, cl2osc_page(slice), lock) == 0)
306                 result = -EBUSY;
307         else
308                 result = -ENODATA;
309         RETURN(result);
310 }
311
312 static void osc_page_disown(const struct lu_env *env,
313                             const struct cl_page_slice *slice,
314                             struct cl_io *io)
315 {
316         struct osc_page *opg = cl2osc_page(slice);
317
318         if (unlikely(opg->ops_lock))
319                 osc_page_putref_lock(env, opg);
320 }
321
322 static void osc_page_completion_read(const struct lu_env *env,
323                                      const struct cl_page_slice *slice,
324                                      int ioret)
325 {
326         struct osc_page *opg = cl2osc_page(slice);
327
328         if (likely(opg->ops_lock))
329                 osc_page_putref_lock(env, opg);
330 }
331
332 static int osc_page_fail(const struct lu_env *env,
333                          const struct cl_page_slice *slice,
334                          struct cl_io *unused)
335 {
336         /*
337          * Cached read?
338          */
339         LBUG();
340         return 0;
341 }
342
343
344 static const char *osc_list(cfs_list_t *head)
345 {
346         return cfs_list_empty(head) ? "-" : "+";
347 }
348
349 static inline cfs_time_t osc_submit_duration(struct osc_page *opg)
350 {
351         if (opg->ops_submit_time == 0)
352                 return 0;
353
354         return (cfs_time_current() - opg->ops_submit_time);
355 }
356
357 static int osc_page_print(const struct lu_env *env,
358                           const struct cl_page_slice *slice,
359                           void *cookie, lu_printer_t printer)
360 {
361         struct osc_page       *opg = cl2osc_page(slice);
362         struct osc_async_page *oap = &opg->ops_oap;
363         struct osc_object     *obj = cl2osc(slice->cpl_obj);
364         struct client_obd     *cli = &osc_export(obj)->exp_obd->u.cli;
365         struct lov_oinfo      *loi = obj->oo_oinfo;
366
367         return (*printer)(env, cookie, LUSTRE_OSC_NAME"-page@%p: "
368                           "1< %#x %d %u %s %s %s > "
369                           "2< "LPU64" %u %u %#x %#x | %p %p %p %p %p > "
370                           "3< %s %p %d %lu %d > "
371                           "4< %d %d %d %lu %s | %s %s %s %s > "
372                           "5< %s %s %s %s | %d %s %s | %d %s %s>\n",
373                           opg,
374                           /* 1 */
375                           oap->oap_magic, oap->oap_cmd,
376                           oap->oap_interrupted,
377                           osc_list(&oap->oap_pending_item),
378                           osc_list(&oap->oap_urgent_item),
379                           osc_list(&oap->oap_rpc_item),
380                           /* 2 */
381                           oap->oap_obj_off, oap->oap_page_off, oap->oap_count,
382                           oap->oap_async_flags, oap->oap_brw_flags,
383                           oap->oap_request,
384                           oap->oap_cli, oap->oap_loi, oap->oap_caller_ops,
385                           oap->oap_caller_data,
386                           /* 3 */
387                           osc_list(&opg->ops_inflight),
388                           opg->ops_submitter, opg->ops_transfer_pinned,
389                           osc_submit_duration(opg), opg->ops_srvlock,
390                           /* 4 */
391                           cli->cl_r_in_flight, cli->cl_w_in_flight,
392                           cli->cl_max_rpcs_in_flight,
393                           cli->cl_avail_grant,
394                           osc_list(&cli->cl_cache_waiters),
395                           osc_list(&cli->cl_loi_ready_list),
396                           osc_list(&cli->cl_loi_hp_ready_list),
397                           osc_list(&cli->cl_loi_write_list),
398                           osc_list(&cli->cl_loi_read_list),
399                           /* 5 */
400                           osc_list(&loi->loi_ready_item),
401                           osc_list(&loi->loi_hp_ready_item),
402                           osc_list(&loi->loi_write_item),
403                           osc_list(&loi->loi_read_item),
404                           loi->loi_read_lop.lop_num_pending,
405                           osc_list(&loi->loi_read_lop.lop_pending),
406                           osc_list(&loi->loi_read_lop.lop_urgent),
407                           loi->loi_write_lop.lop_num_pending,
408                           osc_list(&loi->loi_write_lop.lop_pending),
409                           osc_list(&loi->loi_write_lop.lop_urgent));
410 }
411
412 static void osc_page_delete(const struct lu_env *env,
413                             const struct cl_page_slice *slice)
414 {
415         struct osc_page       *opg = cl2osc_page(slice);
416         struct osc_object     *obj = cl2osc(opg->ops_cl.cpl_obj);
417         struct osc_async_page *oap = &opg->ops_oap;
418         int rc;
419
420         LINVRNT(opg->ops_temp || osc_page_protected(env, opg, CLM_READ, 1));
421
422         ENTRY;
423         CDEBUG(D_TRACE, "%p\n", opg);
424         osc_page_transfer_put(env, opg);
425         rc = osc_teardown_async_page(osc_export(obj), NULL, obj->oo_oinfo, oap);
426         if (rc) {
427                 CL_PAGE_DEBUG(D_ERROR, env, cl_page_top(slice->cpl_page),
428                               "Trying to teardown failed: %d\n", rc);
429                 LASSERT(0);
430         }
431         cfs_spin_lock(&obj->oo_seatbelt);
432         cfs_list_del_init(&opg->ops_inflight);
433         cfs_spin_unlock(&obj->oo_seatbelt);
434         EXIT;
435 }
436
437 void osc_page_clip(const struct lu_env *env, const struct cl_page_slice *slice,
438                    int from, int to)
439 {
440         struct osc_page       *opg = cl2osc_page(slice);
441         struct osc_async_page *oap = &opg->ops_oap;
442
443         LINVRNT(osc_page_protected(env, opg, CLM_READ, 0));
444
445         opg->ops_from = from;
446         opg->ops_to   = to;
447         cfs_spin_lock(&oap->oap_lock);
448         oap->oap_async_flags |= ASYNC_COUNT_STABLE;
449         cfs_spin_unlock(&oap->oap_lock);
450 }
451
452 static int osc_page_cancel(const struct lu_env *env,
453                            const struct cl_page_slice *slice)
454 {
455         struct osc_page *opg       = cl2osc_page(slice);
456         struct osc_async_page *oap = &opg->ops_oap;
457         int rc = 0;
458
459         LINVRNT(osc_page_protected(env, opg, CLM_READ, 0));
460
461         client_obd_list_lock(&oap->oap_cli->cl_loi_list_lock);
462         /* Check if the transferring against this page
463          * is completed, or not even queued. */
464         if (opg->ops_transfer_pinned)
465                 /* FIXME: may not be interrupted.. */
466                 rc = osc_oap_interrupted(env, oap);
467         LASSERT(ergo(rc == 0, opg->ops_transfer_pinned == 0));
468         client_obd_list_unlock(&oap->oap_cli->cl_loi_list_lock);
469         return rc;
470 }
471
472 static const struct cl_page_operations osc_page_ops = {
473         .cpo_fini          = osc_page_fini,
474         .cpo_print         = osc_page_print,
475         .cpo_delete        = osc_page_delete,
476         .cpo_is_under_lock = osc_page_is_under_lock,
477         .cpo_disown        = osc_page_disown,
478         .io = {
479                 [CRT_READ] = {
480                         .cpo_cache_add  = osc_page_fail,
481                         .cpo_completion = osc_page_completion_read
482                 },
483                 [CRT_WRITE] = {
484                         .cpo_cache_add  = osc_page_cache_add
485                 }
486         },
487         .cpo_clip           = osc_page_clip,
488         .cpo_cancel         = osc_page_cancel
489 };
490
491 static int osc_make_ready(const struct lu_env *env, void *data, int cmd)
492 {
493         struct osc_page *opg  = data;
494         struct cl_page  *page = cl_page_top(opg->ops_cl.cpl_page);
495         int result;
496
497         LASSERT(cmd == OBD_BRW_WRITE); /* no cached reads */
498         LINVRNT(osc_page_protected(env, opg, CLM_WRITE, 1));
499
500         ENTRY;
501         result = cl_page_make_ready(env, page, CRT_WRITE);
502         if (result == 0)
503                 opg->ops_submit_time = cfs_time_current();
504         RETURN(result);
505 }
506
507 static int osc_refresh_count(const struct lu_env *env, void *data, int cmd)
508 {
509         struct cl_page   *page;
510         struct osc_page  *osc = data;
511         struct cl_object *obj;
512         struct cl_attr   *attr = &osc_env_info(env)->oti_attr;
513
514         int result;
515         loff_t kms;
516
517         LINVRNT(osc_page_protected(env, osc, CLM_READ, 1));
518
519         /* readpage queues with _COUNT_STABLE, shouldn't get here. */
520         LASSERT(!(cmd & OBD_BRW_READ));
521         LASSERT(osc != NULL);
522         page = osc->ops_cl.cpl_page;
523         obj = osc->ops_cl.cpl_obj;
524
525         cl_object_attr_lock(obj);
526         result = cl_object_attr_get(env, obj, attr);
527         cl_object_attr_unlock(obj);
528         if (result < 0)
529                 return result;
530         kms = attr->cat_kms;
531         if (cl_offset(obj, page->cp_index) >= kms)
532                 /* catch race with truncate */
533                 return 0;
534         else if (cl_offset(obj, page->cp_index + 1) > kms)
535                 /* catch sub-page write at end of file */
536                 return kms % CFS_PAGE_SIZE;
537         else
538                 return CFS_PAGE_SIZE;
539 }
540
541 static int osc_completion(const struct lu_env *env,
542                           void *data, int cmd, struct obdo *oa, int rc)
543 {
544         struct osc_page       *opg  = data;
545         struct osc_async_page *oap  = &opg->ops_oap;
546         struct cl_page        *page = cl_page_top(opg->ops_cl.cpl_page);
547         struct osc_object     *obj  = cl2osc(opg->ops_cl.cpl_obj);
548         enum cl_req_type crt;
549         int srvlock;
550
551         LINVRNT(osc_page_protected(env, opg, CLM_READ, 1));
552
553         ENTRY;
554
555         cmd &= ~OBD_BRW_NOQUOTA;
556         LASSERT(equi(page->cp_state == CPS_PAGEIN,  cmd == OBD_BRW_READ));
557         LASSERT(equi(page->cp_state == CPS_PAGEOUT, cmd == OBD_BRW_WRITE));
558         LASSERT(opg->ops_transfer_pinned);
559
560         /*
561          * page->cp_req can be NULL if io submission failed before
562          * cl_req was allocated.
563          */
564         if (page->cp_req != NULL)
565                 cl_req_page_done(env, page);
566         LASSERT(page->cp_req == NULL);
567
568         /* As the transfer for this page is being done, clear the flags */
569         cfs_spin_lock(&oap->oap_lock);
570         oap->oap_async_flags = 0;
571         cfs_spin_unlock(&oap->oap_lock);
572
573         crt = cmd == OBD_BRW_READ ? CRT_READ : CRT_WRITE;
574         /* Clear opg->ops_transfer_pinned before VM lock is released. */
575         opg->ops_transfer_pinned = 0;
576
577         cfs_spin_lock(&obj->oo_seatbelt);
578         LASSERT(opg->ops_submitter != NULL);
579         LASSERT(!cfs_list_empty(&opg->ops_inflight));
580         cfs_list_del_init(&opg->ops_inflight);
581         cfs_spin_unlock(&obj->oo_seatbelt);
582
583         opg->ops_submit_time = 0;
584         srvlock = oap->oap_brw_flags & OBD_BRW_SRVLOCK;
585
586         cl_page_completion(env, page, crt, rc);
587
588         /* statistic */
589         if (rc == 0 && srvlock) {
590                 struct lu_device *ld    = opg->ops_cl.cpl_obj->co_lu.lo_dev;
591                 struct osc_stats *stats = &lu2osc_dev(ld)->od_stats;
592                 int bytes = oap->oap_count;
593
594                 if (crt == CRT_READ)
595                         stats->os_lockless_reads += bytes;
596                 else
597                         stats->os_lockless_writes += bytes;
598         }
599
600         /*
601          * This has to be the last operation with the page, as locks are
602          * released in cl_page_completion() and nothing except for the
603          * reference counter protects page from concurrent reclaim.
604          */
605         lu_ref_del(&page->cp_reference, "transfer", page);
606         /*
607          * As page->cp_obj is pinned by a reference from page->cp_req, it is
608          * safe to call cl_page_put() without risking object destruction in a
609          * non-blocking context.
610          */
611         cl_page_put(env, page);
612         RETURN(0);
613 }
614
615 const static struct obd_async_page_ops osc_async_page_ops = {
616         .ap_make_ready    = osc_make_ready,
617         .ap_refresh_count = osc_refresh_count,
618         .ap_completion    = osc_completion
619 };
620
621 struct cl_page *osc_page_init(const struct lu_env *env,
622                               struct cl_object *obj,
623                               struct cl_page *page, cfs_page_t *vmpage)
624 {
625         struct osc_object *osc = cl2osc(obj);
626         struct osc_page   *opg;
627         int result;
628
629         OBD_SLAB_ALLOC_PTR_GFP(opg, osc_page_kmem, CFS_ALLOC_IO);
630         if (opg != NULL) {
631                 void *oap = &opg->ops_oap;
632
633                 opg->ops_from = 0;
634                 opg->ops_to   = CFS_PAGE_SIZE;
635
636                 result = osc_prep_async_page(osc_export(osc),
637                                              NULL, osc->oo_oinfo, vmpage,
638                                              cl_offset(obj, page->cp_index),
639                                              &osc_async_page_ops,
640                                              opg, (void **)&oap, 1, NULL);
641                 if (result == 0) {
642                         struct osc_io *oio = osc_env_io(env);
643                         opg->ops_srvlock = osc_io_srvlock(oio);
644                         cl_page_slice_add(page, &opg->ops_cl, obj,
645                                           &osc_page_ops);
646                 }
647                 /*
648                  * Cannot assert osc_page_protected() here as read-ahead
649                  * creates temporary pages outside of a lock.
650                  */
651 #ifdef INVARIANT_CHECK
652                 opg->ops_temp = !osc_page_protected(env, opg, CLM_READ, 1);
653 #endif
654                 CFS_INIT_LIST_HEAD(&opg->ops_inflight);
655         } else
656                 result = -ENOMEM;
657         return ERR_PTR(result);
658 }
659
660 /**
661  * Helper function called by osc_io_submit() for every page in an immediate
662  * transfer (i.e., transferred synchronously).
663  */
664 void osc_io_submit_page(const struct lu_env *env,
665                         struct osc_io *oio, struct osc_page *opg,
666                         enum cl_req_type crt)
667 {
668         struct osc_async_page *oap = &opg->ops_oap;
669         struct client_obd     *cli = oap->oap_cli;
670         int flags = 0;
671
672         LINVRNT(osc_page_protected(env, opg,
673                                    crt == CRT_WRITE ? CLM_WRITE : CLM_READ, 1));
674
675         oap->oap_page_off   = opg->ops_from;
676         oap->oap_count      = opg->ops_to - opg->ops_from;
677         /* Give a hint to OST that requests are coming from kswapd - bug19529 */
678         if (cfs_memory_pressure_get())
679                 oap->oap_brw_flags |= OBD_BRW_MEMALLOC;
680         oap->oap_brw_flags |= OBD_BRW_SYNC;
681         if (osc_io_srvlock(oio))
682                 oap->oap_brw_flags |= OBD_BRW_SRVLOCK;
683
684         oap->oap_cmd = crt == CRT_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ;
685         if (!client_is_remote(osc_export(cl2osc(opg->ops_cl.cpl_obj))) &&
686             cfs_capable(CFS_CAP_SYS_RESOURCE)) {
687                 oap->oap_brw_flags |= OBD_BRW_NOQUOTA;
688                 oap->oap_cmd |= OBD_BRW_NOQUOTA;
689         }
690
691         if (oap->oap_cmd & OBD_BRW_READ)
692                 flags = ASYNC_COUNT_STABLE;
693         else if (!(oap->oap_brw_page.flag & OBD_BRW_FROM_GRANT))
694                 osc_enter_cache_try(env, cli, oap->oap_loi, oap, 1);
695
696         cfs_spin_lock(&oap->oap_lock);
697         oap->oap_async_flags |= OSC_FLAGS | flags;
698         cfs_spin_unlock(&oap->oap_lock);
699
700         osc_oap_to_pending(oap);
701         osc_page_transfer_get(opg, "transfer\0imm");
702         osc_page_transfer_add(env, opg, crt);
703 }
704
705 /** @} osc */