Whamcloud - gitweb
55bb24c876dcf8358a6580aca57106d54a7b56e6
[fs/lustre-release.git] / lustre / osc / osc_page.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
30  * Use is subject to license terms.
31  *
32  * Copyright (c) 2011 Whamcloud, Inc.
33  *
34  */
35 /*
36  * This file is part of Lustre, http://www.lustre.org/
37  * Lustre is a trademark of Sun Microsystems, Inc.
38  *
39  * Implementation of cl_page for OSC layer.
40  *
41  *   Author: Nikita Danilov <nikita.danilov@sun.com>
42  */
43
44 #define DEBUG_SUBSYSTEM S_OSC
45
46 #include "osc_cl_internal.h"
47
48 /** \addtogroup osc 
49  *  @{ 
50  */
51
52 /* 
53  * Comment out osc_page_protected because it may sleep inside the
54  * the client_obd_list_lock.
55  * client_obd_list_lock -> osc_ap_completion -> osc_completion ->
56  *   -> osc_page_protected -> osc_page_is_dlocked -> osc_match_base
57  *   -> ldlm_lock_match -> sptlrpc_import_check_ctx -> sleep.
58  */
59 #if 0
60 static int osc_page_is_dlocked(const struct lu_env *env,
61                                const struct osc_page *opg,
62                                enum cl_lock_mode mode, int pending, int unref)
63 {
64         struct cl_page         *page;
65         struct osc_object      *obj;
66         struct osc_thread_info *info;
67         struct ldlm_res_id     *resname;
68         struct lustre_handle   *lockh;
69         ldlm_policy_data_t     *policy;
70         ldlm_mode_t             dlmmode;
71         int                     flags;
72
73         cfs_might_sleep();
74
75         info = osc_env_info(env);
76         resname = &info->oti_resname;
77         policy = &info->oti_policy;
78         lockh = &info->oti_handle;
79         page = opg->ops_cl.cpl_page;
80         obj = cl2osc(opg->ops_cl.cpl_obj);
81
82         flags = LDLM_FL_TEST_LOCK | LDLM_FL_BLOCK_GRANTED;
83         if (pending)
84                 flags |= LDLM_FL_CBPENDING;
85
86         dlmmode = osc_cl_lock2ldlm(mode) | LCK_PW;
87         osc_lock_build_res(env, obj, resname);
88         osc_index2policy(policy, page->cp_obj, page->cp_index, page->cp_index);
89         return osc_match_base(osc_export(obj), resname, LDLM_EXTENT, policy,
90                               dlmmode, &flags, NULL, lockh, unref);
91 }
92
93 /**
94  * Checks an invariant that a page in the cache is covered by a lock, as
95  * needed.
96  */
97 static int osc_page_protected(const struct lu_env *env,
98                               const struct osc_page *opg,
99                               enum cl_lock_mode mode, int unref)
100 {
101         struct cl_object_header *hdr;
102         struct cl_lock          *scan;
103         struct cl_page          *page;
104         struct cl_lock_descr    *descr;
105         int result;
106
107         LINVRNT(!opg->ops_temp);
108
109         page = opg->ops_cl.cpl_page;
110         if (page->cp_owner != NULL &&
111             cl_io_top(page->cp_owner)->ci_lockreq == CILR_NEVER)
112                 /*
113                  * If IO is done without locks (liblustre, or lloop), lock is
114                  * not required.
115                  */
116                 result = 1;
117         else
118                 /* otherwise check for a DLM lock */
119         result = osc_page_is_dlocked(env, opg, mode, 1, unref);
120         if (result == 0) {
121                 /* maybe this page is a part of a lockless io? */
122                 hdr = cl_object_header(opg->ops_cl.cpl_obj);
123                 descr = &osc_env_info(env)->oti_descr;
124                 descr->cld_mode = mode;
125                 descr->cld_start = page->cp_index;
126                 descr->cld_end   = page->cp_index;
127                 cfs_spin_lock(&hdr->coh_lock_guard);
128                 cfs_list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) {
129                         /*
130                          * Lock-less sub-lock has to be either in HELD state
131                          * (when io is actively going on), or in CACHED state,
132                          * when top-lock is being unlocked:
133                          * cl_io_unlock()->cl_unuse()->...->lov_lock_unuse().
134                          */
135                         if ((scan->cll_state == CLS_HELD ||
136                              scan->cll_state == CLS_CACHED) &&
137                             cl_lock_ext_match(&scan->cll_descr, descr)) {
138                                 struct osc_lock *olck;
139
140                                 olck = osc_lock_at(scan);
141                                 result = osc_lock_is_lockless(olck);
142                                 break;
143                         }
144                 }
145                 cfs_spin_unlock(&hdr->coh_lock_guard);
146         }
147         return result;
148 }
149 #else
150 static int osc_page_protected(const struct lu_env *env,
151                               const struct osc_page *opg,
152                               enum cl_lock_mode mode, int unref)
153 {
154         return 1;
155 }
156 #endif
157
158 /*****************************************************************************
159  *
160  * Page operations.
161  *
162  */
163 static void osc_page_fini(const struct lu_env *env,
164                           struct cl_page_slice *slice)
165 {
166         struct osc_page *opg = cl2osc_page(slice);
167         CDEBUG(D_TRACE, "%p\n", opg);
168         LASSERT(opg->ops_lock == NULL);
169         OBD_SLAB_FREE_PTR(opg, osc_page_kmem);
170 }
171
172 static void osc_page_transfer_get(struct osc_page *opg, const char *label)
173 {
174         struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page);
175
176         LASSERT(!opg->ops_transfer_pinned);
177         cl_page_get(page);
178         lu_ref_add_atomic(&page->cp_reference, label, page);
179         opg->ops_transfer_pinned = 1;
180 }
181
182 static void osc_page_transfer_put(const struct lu_env *env,
183                                   struct osc_page *opg)
184 {
185         struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page);
186
187         if (opg->ops_transfer_pinned) {
188                 lu_ref_del(&page->cp_reference, "transfer", page);
189                 opg->ops_transfer_pinned = 0;
190                 cl_page_put(env, page);
191         }
192 }
193
194 /**
195  * This is called once for every page when it is submitted for a transfer
196  * either opportunistic (osc_page_cache_add()), or immediate
197  * (osc_page_submit()).
198  */
199 static void osc_page_transfer_add(const struct lu_env *env,
200                                   struct osc_page *opg, enum cl_req_type crt)
201 {
202         struct osc_object *obj;
203
204         obj = cl2osc(opg->ops_cl.cpl_obj);
205         cfs_spin_lock(&obj->oo_seatbelt);
206         cfs_list_add(&opg->ops_inflight, &obj->oo_inflight[crt]);
207         opg->ops_submitter = cfs_current();
208         cfs_spin_unlock(&obj->oo_seatbelt);
209 }
210
211 static int osc_page_cache_add(const struct lu_env *env,
212                               const struct cl_page_slice *slice,
213                               struct cl_io *unused)
214 {
215         struct osc_page   *opg = cl2osc_page(slice);
216         struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
217         int result;
218         /* All cacheable IO is async-capable */
219         int brw_flags = OBD_BRW_ASYNC;
220         int noquota = 0;
221
222         LINVRNT(osc_page_protected(env, opg, CLM_WRITE, 0));
223         ENTRY;
224
225         /* Set the OBD_BRW_SRVLOCK before the page is queued. */
226         brw_flags |= opg->ops_srvlock ? OBD_BRW_SRVLOCK : 0;
227         if (!client_is_remote(osc_export(obj)) &&
228             cfs_capable(CFS_CAP_SYS_RESOURCE)) {
229                 brw_flags |= OBD_BRW_NOQUOTA;
230                 noquota = OBD_BRW_NOQUOTA;
231         }
232
233         osc_page_transfer_get(opg, "transfer\0cache");
234         result = osc_queue_async_io(env, osc_export(obj), NULL, obj->oo_oinfo,
235                                     &opg->ops_oap, OBD_BRW_WRITE | noquota,
236                                     opg->ops_from, opg->ops_to - opg->ops_from,
237                                     brw_flags, 0);
238         if (result != 0)
239                 osc_page_transfer_put(env, opg);
240         else
241                 osc_page_transfer_add(env, opg, CRT_WRITE);
242         RETURN(result);
243 }
244
245 void osc_index2policy(ldlm_policy_data_t *policy, const struct cl_object *obj,
246                       pgoff_t start, pgoff_t end)
247 {
248         memset(policy, 0, sizeof *policy);
249         policy->l_extent.start = cl_offset(obj, start);
250         policy->l_extent.end   = cl_offset(obj, end + 1) - 1;
251 }
252
253 static int osc_page_addref_lock(const struct lu_env *env,
254                                 struct osc_page *opg,
255                                 struct cl_lock *lock)
256 {
257         struct osc_lock *olock;
258         int              rc;
259
260         LASSERT(opg->ops_lock == NULL);
261
262         olock = osc_lock_at(lock);
263         if (cfs_atomic_inc_return(&olock->ols_pageref) <= 0) {
264                 cfs_atomic_dec(&olock->ols_pageref);
265                 cl_lock_put(env, lock);
266                 rc = 1;
267         } else {
268                 opg->ops_lock = lock;
269                 rc = 0;
270         }
271         return rc;
272 }
273
274 static void osc_page_putref_lock(const struct lu_env *env,
275                                  struct osc_page *opg)
276 {
277         struct cl_lock  *lock = opg->ops_lock;
278         struct osc_lock *olock;
279
280         LASSERT(lock != NULL);
281         olock = osc_lock_at(lock);
282
283         cfs_atomic_dec(&olock->ols_pageref);
284         opg->ops_lock = NULL;
285
286         /*
287          * Note: usually this won't be the last reference of the lock, but if
288          * it is, then all the lock_put do is at most just freeing some memory,
289          * so it would be OK that caller is holding spinlocks.
290          */
291         LASSERT(cfs_atomic_read(&lock->cll_ref) > 1 || olock->ols_hold == 0);
292         cl_lock_put(env, lock);
293 }
294
295 static int osc_page_is_under_lock(const struct lu_env *env,
296                                   const struct cl_page_slice *slice,
297                                   struct cl_io *unused)
298 {
299         struct cl_lock *lock;
300         int             result;
301
302         ENTRY;
303         lock = cl_lock_at_page(env, slice->cpl_obj, slice->cpl_page,
304                                NULL, 1, 0);
305         if (lock != NULL &&
306             osc_page_addref_lock(env, cl2osc_page(slice), lock) == 0)
307                 result = -EBUSY;
308         else
309                 result = -ENODATA;
310         RETURN(result);
311 }
312
313 static void osc_page_disown(const struct lu_env *env,
314                             const struct cl_page_slice *slice,
315                             struct cl_io *io)
316 {
317         struct osc_page *opg = cl2osc_page(slice);
318
319         if (unlikely(opg->ops_lock))
320                 osc_page_putref_lock(env, opg);
321 }
322
323 static void osc_page_completion_read(const struct lu_env *env,
324                                      const struct cl_page_slice *slice,
325                                      int ioret)
326 {
327         struct osc_page *opg = cl2osc_page(slice);
328
329         if (likely(opg->ops_lock))
330                 osc_page_putref_lock(env, opg);
331 }
332
333 static int osc_page_fail(const struct lu_env *env,
334                          const struct cl_page_slice *slice,
335                          struct cl_io *unused)
336 {
337         /*
338          * Cached read?
339          */
340         LBUG();
341         return 0;
342 }
343
344
345 static const char *osc_list(cfs_list_t *head)
346 {
347         return cfs_list_empty(head) ? "-" : "+";
348 }
349
350 static inline cfs_time_t osc_submit_duration(struct osc_page *opg)
351 {
352         if (opg->ops_submit_time == 0)
353                 return 0;
354
355         return (cfs_time_current() - opg->ops_submit_time);
356 }
357
358 static int osc_page_print(const struct lu_env *env,
359                           const struct cl_page_slice *slice,
360                           void *cookie, lu_printer_t printer)
361 {
362         struct osc_page       *opg = cl2osc_page(slice);
363         struct osc_async_page *oap = &opg->ops_oap;
364         struct osc_object     *obj = cl2osc(slice->cpl_obj);
365         struct client_obd     *cli = &osc_export(obj)->exp_obd->u.cli;
366         struct lov_oinfo      *loi = obj->oo_oinfo;
367
368         return (*printer)(env, cookie, LUSTRE_OSC_NAME"-page@%p: "
369                           "1< %#x %d %u %s %s %s > "
370                           "2< "LPU64" %u %u %#x %#x | %p %p %p %p %p > "
371                           "3< %s %p %d %lu %d > "
372                           "4< %d %d %d %lu %s | %s %s %s %s > "
373                           "5< %s %s %s %s | %d %s %s | %d %s %s>\n",
374                           opg,
375                           /* 1 */
376                           oap->oap_magic, oap->oap_cmd,
377                           oap->oap_interrupted,
378                           osc_list(&oap->oap_pending_item),
379                           osc_list(&oap->oap_urgent_item),
380                           osc_list(&oap->oap_rpc_item),
381                           /* 2 */
382                           oap->oap_obj_off, oap->oap_page_off, oap->oap_count,
383                           oap->oap_async_flags, oap->oap_brw_flags,
384                           oap->oap_request,
385                           oap->oap_cli, oap->oap_loi, oap->oap_caller_ops,
386                           oap->oap_caller_data,
387                           /* 3 */
388                           osc_list(&opg->ops_inflight),
389                           opg->ops_submitter, opg->ops_transfer_pinned,
390                           osc_submit_duration(opg), opg->ops_srvlock,
391                           /* 4 */
392                           cli->cl_r_in_flight, cli->cl_w_in_flight,
393                           cli->cl_max_rpcs_in_flight,
394                           cli->cl_avail_grant,
395                           osc_list(&cli->cl_cache_waiters),
396                           osc_list(&cli->cl_loi_ready_list),
397                           osc_list(&cli->cl_loi_hp_ready_list),
398                           osc_list(&cli->cl_loi_write_list),
399                           osc_list(&cli->cl_loi_read_list),
400                           /* 5 */
401                           osc_list(&loi->loi_ready_item),
402                           osc_list(&loi->loi_hp_ready_item),
403                           osc_list(&loi->loi_write_item),
404                           osc_list(&loi->loi_read_item),
405                           loi->loi_read_lop.lop_num_pending,
406                           osc_list(&loi->loi_read_lop.lop_pending),
407                           osc_list(&loi->loi_read_lop.lop_urgent),
408                           loi->loi_write_lop.lop_num_pending,
409                           osc_list(&loi->loi_write_lop.lop_pending),
410                           osc_list(&loi->loi_write_lop.lop_urgent));
411 }
412
413 static void osc_page_delete(const struct lu_env *env,
414                             const struct cl_page_slice *slice)
415 {
416         struct osc_page       *opg = cl2osc_page(slice);
417         struct osc_object     *obj = cl2osc(opg->ops_cl.cpl_obj);
418         struct osc_async_page *oap = &opg->ops_oap;
419         int rc;
420
421         LINVRNT(opg->ops_temp || osc_page_protected(env, opg, CLM_READ, 1));
422
423         ENTRY;
424         CDEBUG(D_TRACE, "%p\n", opg);
425         osc_page_transfer_put(env, opg);
426         rc = osc_teardown_async_page(osc_export(obj), NULL, obj->oo_oinfo, oap);
427         if (rc) {
428                 CL_PAGE_DEBUG(D_ERROR, env, cl_page_top(slice->cpl_page),
429                               "Trying to teardown failed: %d\n", rc);
430                 LASSERT(0);
431         }
432         cfs_spin_lock(&obj->oo_seatbelt);
433         cfs_list_del_init(&opg->ops_inflight);
434         cfs_spin_unlock(&obj->oo_seatbelt);
435         EXIT;
436 }
437
438 void osc_page_clip(const struct lu_env *env, const struct cl_page_slice *slice,
439                    int from, int to)
440 {
441         struct osc_page       *opg = cl2osc_page(slice);
442         struct osc_async_page *oap = &opg->ops_oap;
443
444         LINVRNT(osc_page_protected(env, opg, CLM_READ, 0));
445
446         opg->ops_from = from;
447         opg->ops_to   = to;
448         cfs_spin_lock(&oap->oap_lock);
449         oap->oap_async_flags |= ASYNC_COUNT_STABLE;
450         cfs_spin_unlock(&oap->oap_lock);
451 }
452
453 static int osc_page_cancel(const struct lu_env *env,
454                            const struct cl_page_slice *slice)
455 {
456         struct osc_page *opg       = cl2osc_page(slice);
457         struct osc_async_page *oap = &opg->ops_oap;
458         int rc = 0;
459
460         LINVRNT(osc_page_protected(env, opg, CLM_READ, 0));
461
462         client_obd_list_lock(&oap->oap_cli->cl_loi_list_lock);
463         /* Check if the transferring against this page
464          * is completed, or not even queued. */
465         if (opg->ops_transfer_pinned)
466                 /* FIXME: may not be interrupted.. */
467                 rc = osc_oap_interrupted(env, oap);
468         LASSERT(ergo(rc == 0, opg->ops_transfer_pinned == 0));
469         client_obd_list_unlock(&oap->oap_cli->cl_loi_list_lock);
470         return rc;
471 }
472
473 static const struct cl_page_operations osc_page_ops = {
474         .cpo_fini          = osc_page_fini,
475         .cpo_print         = osc_page_print,
476         .cpo_delete        = osc_page_delete,
477         .cpo_is_under_lock = osc_page_is_under_lock,
478         .cpo_disown        = osc_page_disown,
479         .io = {
480                 [CRT_READ] = {
481                         .cpo_cache_add  = osc_page_fail,
482                         .cpo_completion = osc_page_completion_read
483                 },
484                 [CRT_WRITE] = {
485                         .cpo_cache_add  = osc_page_cache_add
486                 }
487         },
488         .cpo_clip           = osc_page_clip,
489         .cpo_cancel         = osc_page_cancel
490 };
491
492 static int osc_make_ready(const struct lu_env *env, void *data, int cmd)
493 {
494         struct osc_page *opg  = data;
495         struct cl_page  *page = cl_page_top(opg->ops_cl.cpl_page);
496         int result;
497
498         LASSERT(cmd == OBD_BRW_WRITE); /* no cached reads */
499         LINVRNT(osc_page_protected(env, opg, CLM_WRITE, 1));
500
501         ENTRY;
502         result = cl_page_make_ready(env, page, CRT_WRITE);
503         if (result == 0)
504                 opg->ops_submit_time = cfs_time_current();
505         RETURN(result);
506 }
507
508 static int osc_refresh_count(const struct lu_env *env, void *data, int cmd)
509 {
510         struct cl_page   *page;
511         struct osc_page  *osc = data;
512         struct cl_object *obj;
513         struct cl_attr   *attr = &osc_env_info(env)->oti_attr;
514
515         int result;
516         loff_t kms;
517
518         LINVRNT(osc_page_protected(env, osc, CLM_READ, 1));
519
520         /* readpage queues with _COUNT_STABLE, shouldn't get here. */
521         LASSERT(!(cmd & OBD_BRW_READ));
522         LASSERT(osc != NULL);
523         page = osc->ops_cl.cpl_page;
524         obj = osc->ops_cl.cpl_obj;
525
526         cl_object_attr_lock(obj);
527         result = cl_object_attr_get(env, obj, attr);
528         cl_object_attr_unlock(obj);
529         if (result < 0)
530                 return result;
531         kms = attr->cat_kms;
532         if (cl_offset(obj, page->cp_index) >= kms)
533                 /* catch race with truncate */
534                 return 0;
535         else if (cl_offset(obj, page->cp_index + 1) > kms)
536                 /* catch sub-page write at end of file */
537                 return kms % CFS_PAGE_SIZE;
538         else
539                 return CFS_PAGE_SIZE;
540 }
541
542 static int osc_completion(const struct lu_env *env,
543                           void *data, int cmd, struct obdo *oa, int rc)
544 {
545         struct osc_page       *opg  = data;
546         struct osc_async_page *oap  = &opg->ops_oap;
547         struct cl_page        *page = cl_page_top(opg->ops_cl.cpl_page);
548         struct osc_object     *obj  = cl2osc(opg->ops_cl.cpl_obj);
549         enum cl_req_type crt;
550         int srvlock;
551
552         LINVRNT(osc_page_protected(env, opg, CLM_READ, 1));
553
554         ENTRY;
555
556         cmd &= ~OBD_BRW_NOQUOTA;
557         LASSERT(equi(page->cp_state == CPS_PAGEIN,  cmd == OBD_BRW_READ));
558         LASSERT(equi(page->cp_state == CPS_PAGEOUT, cmd == OBD_BRW_WRITE));
559         LASSERT(opg->ops_transfer_pinned);
560
561         /*
562          * page->cp_req can be NULL if io submission failed before
563          * cl_req was allocated.
564          */
565         if (page->cp_req != NULL)
566                 cl_req_page_done(env, page);
567         LASSERT(page->cp_req == NULL);
568
569         /* As the transfer for this page is being done, clear the flags */
570         cfs_spin_lock(&oap->oap_lock);
571         oap->oap_async_flags = 0;
572         cfs_spin_unlock(&oap->oap_lock);
573
574         crt = cmd == OBD_BRW_READ ? CRT_READ : CRT_WRITE;
575         /* Clear opg->ops_transfer_pinned before VM lock is released. */
576         opg->ops_transfer_pinned = 0;
577
578         cfs_spin_lock(&obj->oo_seatbelt);
579         LASSERT(opg->ops_submitter != NULL);
580         LASSERT(!cfs_list_empty(&opg->ops_inflight));
581         cfs_list_del_init(&opg->ops_inflight);
582         cfs_spin_unlock(&obj->oo_seatbelt);
583
584         opg->ops_submit_time = 0;
585         srvlock = oap->oap_brw_flags & OBD_BRW_SRVLOCK;
586
587         cl_page_completion(env, page, crt, rc);
588
589         /* statistic */
590         if (rc == 0 && srvlock) {
591                 struct lu_device *ld    = opg->ops_cl.cpl_obj->co_lu.lo_dev;
592                 struct osc_stats *stats = &lu2osc_dev(ld)->od_stats;
593                 int bytes = oap->oap_count;
594
595                 if (crt == CRT_READ)
596                         stats->os_lockless_reads += bytes;
597                 else
598                         stats->os_lockless_writes += bytes;
599         }
600
601         /*
602          * This has to be the last operation with the page, as locks are
603          * released in cl_page_completion() and nothing except for the
604          * reference counter protects page from concurrent reclaim.
605          */
606         lu_ref_del(&page->cp_reference, "transfer", page);
607         /*
608          * As page->cp_obj is pinned by a reference from page->cp_req, it is
609          * safe to call cl_page_put() without risking object destruction in a
610          * non-blocking context.
611          */
612         cl_page_put(env, page);
613         RETURN(0);
614 }
615
616 const static struct obd_async_page_ops osc_async_page_ops = {
617         .ap_make_ready    = osc_make_ready,
618         .ap_refresh_count = osc_refresh_count,
619         .ap_completion    = osc_completion
620 };
621
622 struct cl_page *osc_page_init(const struct lu_env *env,
623                               struct cl_object *obj,
624                               struct cl_page *page, cfs_page_t *vmpage)
625 {
626         struct osc_object *osc = cl2osc(obj);
627         struct osc_page   *opg;
628         int result;
629
630         OBD_SLAB_ALLOC_PTR_GFP(opg, osc_page_kmem, CFS_ALLOC_IO);
631         if (opg != NULL) {
632                 void *oap = &opg->ops_oap;
633
634                 opg->ops_from = 0;
635                 opg->ops_to   = CFS_PAGE_SIZE;
636
637                 result = osc_prep_async_page(osc_export(osc),
638                                              NULL, osc->oo_oinfo, vmpage,
639                                              cl_offset(obj, page->cp_index),
640                                              &osc_async_page_ops,
641                                              opg, (void **)&oap, 1, NULL);
642                 if (result == 0) {
643                         struct osc_io *oio = osc_env_io(env);
644                         opg->ops_srvlock = osc_io_srvlock(oio);
645                         cl_page_slice_add(page, &opg->ops_cl, obj,
646                                           &osc_page_ops);
647                 }
648                 /*
649                  * Cannot assert osc_page_protected() here as read-ahead
650                  * creates temporary pages outside of a lock.
651                  */
652 #ifdef INVARIANT_CHECK
653                 opg->ops_temp = !osc_page_protected(env, opg, CLM_READ, 1);
654 #endif
655                 CFS_INIT_LIST_HEAD(&opg->ops_inflight);
656         } else
657                 result = -ENOMEM;
658         return ERR_PTR(result);
659 }
660
661 /**
662  * Helper function called by osc_io_submit() for every page in an immediate
663  * transfer (i.e., transferred synchronously).
664  */
665 void osc_io_submit_page(const struct lu_env *env,
666                         struct osc_io *oio, struct osc_page *opg,
667                         enum cl_req_type crt)
668 {
669         struct osc_async_page *oap = &opg->ops_oap;
670         struct client_obd     *cli = oap->oap_cli;
671         int flags = 0;
672
673         LINVRNT(osc_page_protected(env, opg,
674                                    crt == CRT_WRITE ? CLM_WRITE : CLM_READ, 1));
675
676         oap->oap_page_off   = opg->ops_from;
677         oap->oap_count      = opg->ops_to - opg->ops_from;
678         /* Give a hint to OST that requests are coming from kswapd - bug19529 */
679         if (cfs_memory_pressure_get())
680                 oap->oap_brw_flags |= OBD_BRW_MEMALLOC;
681         oap->oap_brw_flags |= OBD_BRW_SYNC;
682         if (osc_io_srvlock(oio))
683                 oap->oap_brw_flags |= OBD_BRW_SRVLOCK;
684
685         oap->oap_cmd = crt == CRT_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ;
686         if (!client_is_remote(osc_export(cl2osc(opg->ops_cl.cpl_obj))) &&
687             cfs_capable(CFS_CAP_SYS_RESOURCE)) {
688                 oap->oap_brw_flags |= OBD_BRW_NOQUOTA;
689                 oap->oap_cmd |= OBD_BRW_NOQUOTA;
690         }
691
692         if (oap->oap_cmd & OBD_BRW_READ)
693                 flags = ASYNC_COUNT_STABLE;
694         else if (!(oap->oap_brw_page.flag & OBD_BRW_FROM_GRANT))
695                 osc_enter_cache_try(env, cli, oap->oap_loi, oap, 1);
696
697         cfs_spin_lock(&oap->oap_lock);
698         oap->oap_async_flags |= OSC_FLAGS | flags;
699         cfs_spin_unlock(&oap->oap_lock);
700
701         osc_oap_to_pending(oap);
702         osc_page_transfer_get(opg, "transfer\0imm");
703         osc_page_transfer_add(env, opg, crt);
704 }
705
706 /** @} osc */