Whamcloud - gitweb
LU-365 Update copyright for files modified by Whamcloud
[fs/lustre-release.git] / lustre / osc / osc_page.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
30  * Use is subject to license terms.
31  *
32  * Copyright (c) 2011 Whamcloud, Inc.
33  *
34  */
35 /*
36  * This file is part of Lustre, http://www.lustre.org/
37  * Lustre is a trademark of Sun Microsystems, Inc.
38  *
39  * Implementation of cl_page for OSC layer.
40  *
41  *   Author: Nikita Danilov <nikita.danilov@sun.com>
42  */
43
44 #define DEBUG_SUBSYSTEM S_OSC
45
46 #include "osc_cl_internal.h"
47
48 /** \addtogroup osc 
49  *  @{ 
50  */
51
52 /* 
53  * Comment out osc_page_protected because it may sleep inside the
54  * the client_obd_list_lock.
55  * client_obd_list_lock -> osc_ap_completion -> osc_completion ->
56  *   -> osc_page_protected -> osc_page_is_dlocked -> osc_match_base
57  *   -> ldlm_lock_match -> sptlrpc_import_check_ctx -> sleep.
58  */
59 #if 0
60 static int osc_page_is_dlocked(const struct lu_env *env,
61                                const struct osc_page *opg,
62                                enum cl_lock_mode mode, int pending, int unref)
63 {
64         struct cl_page         *page;
65         struct osc_object      *obj;
66         struct osc_thread_info *info;
67         struct ldlm_res_id     *resname;
68         struct lustre_handle   *lockh;
69         ldlm_policy_data_t     *policy;
70         ldlm_mode_t             dlmmode;
71         int                     flags;
72
73         cfs_might_sleep();
74
75         info = osc_env_info(env);
76         resname = &info->oti_resname;
77         policy = &info->oti_policy;
78         lockh = &info->oti_handle;
79         page = opg->ops_cl.cpl_page;
80         obj = cl2osc(opg->ops_cl.cpl_obj);
81
82         flags = LDLM_FL_TEST_LOCK | LDLM_FL_BLOCK_GRANTED;
83         if (pending)
84                 flags |= LDLM_FL_CBPENDING;
85
86         dlmmode = osc_cl_lock2ldlm(mode) | LCK_PW;
87         osc_lock_build_res(env, obj, resname);
88         osc_index2policy(policy, page->cp_obj, page->cp_index, page->cp_index);
89         return osc_match_base(osc_export(obj), resname, LDLM_EXTENT, policy,
90                               dlmmode, &flags, NULL, lockh, unref);
91 }
92
93 /**
94  * Checks an invariant that a page in the cache is covered by a lock, as
95  * needed.
96  */
97 static int osc_page_protected(const struct lu_env *env,
98                               const struct osc_page *opg,
99                               enum cl_lock_mode mode, int unref)
100 {
101         struct cl_object_header *hdr;
102         struct cl_lock          *scan;
103         struct cl_page          *page;
104         struct cl_lock_descr    *descr;
105         int result;
106
107         LINVRNT(!opg->ops_temp);
108
109         page = opg->ops_cl.cpl_page;
110         if (page->cp_owner != NULL &&
111             cl_io_top(page->cp_owner)->ci_lockreq == CILR_NEVER)
112                 /*
113                  * If IO is done without locks (liblustre, or lloop), lock is
114                  * not required.
115                  */
116                 result = 1;
117         else
118                 /* otherwise check for a DLM lock */
119         result = osc_page_is_dlocked(env, opg, mode, 1, unref);
120         if (result == 0) {
121                 /* maybe this page is a part of a lockless io? */
122                 hdr = cl_object_header(opg->ops_cl.cpl_obj);
123                 descr = &osc_env_info(env)->oti_descr;
124                 descr->cld_mode = mode;
125                 descr->cld_start = page->cp_index;
126                 descr->cld_end   = page->cp_index;
127                 cfs_spin_lock(&hdr->coh_lock_guard);
128                 cfs_list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) {
129                         /*
130                          * Lock-less sub-lock has to be either in HELD state
131                          * (when io is actively going on), or in CACHED state,
132                          * when top-lock is being unlocked:
133                          * cl_io_unlock()->cl_unuse()->...->lov_lock_unuse().
134                          */
135                         if ((scan->cll_state == CLS_HELD ||
136                              scan->cll_state == CLS_CACHED) &&
137                             cl_lock_ext_match(&scan->cll_descr, descr)) {
138                                 struct osc_lock *olck;
139
140                                 olck = osc_lock_at(scan);
141                                 result = osc_lock_is_lockless(olck);
142                                 break;
143                         }
144                 }
145                 cfs_spin_unlock(&hdr->coh_lock_guard);
146         }
147         return result;
148 }
149 #else
150 static int osc_page_protected(const struct lu_env *env,
151                               const struct osc_page *opg,
152                               enum cl_lock_mode mode, int unref)
153 {
154         return 1;
155 }
156 #endif
157
158 /*****************************************************************************
159  *
160  * Page operations.
161  *
162  */
163 static void osc_page_fini(const struct lu_env *env,
164                           struct cl_page_slice *slice)
165 {
166         struct osc_page *opg = cl2osc_page(slice);
167         CDEBUG(D_TRACE, "%p\n", opg);
168         LASSERT(opg->ops_lock == NULL);
169         OBD_SLAB_FREE_PTR(opg, osc_page_kmem);
170 }
171
172 static void osc_page_transfer_get(struct osc_page *opg, const char *label)
173 {
174         struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page);
175
176         LASSERT(!opg->ops_transfer_pinned);
177         cl_page_get(page);
178         lu_ref_add_atomic(&page->cp_reference, label, page);
179         opg->ops_transfer_pinned = 1;
180 }
181
182 static void osc_page_transfer_put(const struct lu_env *env,
183                                   struct osc_page *opg)
184 {
185         struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page);
186
187         if (opg->ops_transfer_pinned) {
188                 lu_ref_del(&page->cp_reference, "transfer", page);
189                 opg->ops_transfer_pinned = 0;
190                 cl_page_put(env, page);
191         }
192 }
193
194 /**
195  * This is called once for every page when it is submitted for a transfer
196  * either opportunistic (osc_page_cache_add()), or immediate
197  * (osc_page_submit()).
198  */
199 static void osc_page_transfer_add(const struct lu_env *env,
200                                   struct osc_page *opg, enum cl_req_type crt)
201 {
202         struct osc_object *obj;
203
204         LINVRNT(cl_page_is_vmlocked(env, opg->ops_cl.cpl_page));
205
206         obj = cl2osc(opg->ops_cl.cpl_obj);
207         cfs_spin_lock(&obj->oo_seatbelt);
208         cfs_list_add(&opg->ops_inflight, &obj->oo_inflight[crt]);
209         opg->ops_submitter = cfs_current();
210         cfs_spin_unlock(&obj->oo_seatbelt);
211 }
212
213 static int osc_page_cache_add(const struct lu_env *env,
214                               const struct cl_page_slice *slice,
215                               struct cl_io *unused)
216 {
217         struct osc_page   *opg = cl2osc_page(slice);
218         struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
219         int result;
220         /* All cacheable IO is async-capable */
221         int brw_flags = OBD_BRW_ASYNC;
222         int noquota = 0;
223
224         LINVRNT(osc_page_protected(env, opg, CLM_WRITE, 0));
225         ENTRY;
226
227         /* Set the OBD_BRW_SRVLOCK before the page is queued. */
228         brw_flags |= opg->ops_srvlock ? OBD_BRW_SRVLOCK : 0;
229         if (!client_is_remote(osc_export(obj)) &&
230             cfs_capable(CFS_CAP_SYS_RESOURCE)) {
231                 brw_flags |= OBD_BRW_NOQUOTA;
232                 noquota = OBD_BRW_NOQUOTA;
233         }
234
235         osc_page_transfer_get(opg, "transfer\0cache");
236         result = osc_queue_async_io(env, osc_export(obj), NULL, obj->oo_oinfo,
237                                     &opg->ops_oap, OBD_BRW_WRITE | noquota,
238                                     opg->ops_from, opg->ops_to - opg->ops_from,
239                                     brw_flags, 0);
240         if (result != 0)
241                 osc_page_transfer_put(env, opg);
242         else
243                 osc_page_transfer_add(env, opg, CRT_WRITE);
244         RETURN(result);
245 }
246
247 void osc_index2policy(ldlm_policy_data_t *policy, const struct cl_object *obj,
248                       pgoff_t start, pgoff_t end)
249 {
250         memset(policy, 0, sizeof *policy);
251         policy->l_extent.start = cl_offset(obj, start);
252         policy->l_extent.end   = cl_offset(obj, end + 1) - 1;
253 }
254
255 static int osc_page_addref_lock(const struct lu_env *env,
256                                 struct osc_page *opg,
257                                 struct cl_lock *lock)
258 {
259         struct osc_lock *olock;
260         int              rc;
261
262         LASSERT(opg->ops_lock == NULL);
263
264         olock = osc_lock_at(lock);
265         if (cfs_atomic_inc_return(&olock->ols_pageref) <= 0) {
266                 cfs_atomic_dec(&olock->ols_pageref);
267                 cl_lock_put(env, lock);
268                 rc = 1;
269         } else {
270                 opg->ops_lock = lock;
271                 rc = 0;
272         }
273         return rc;
274 }
275
276 static void osc_page_putref_lock(const struct lu_env *env,
277                                  struct osc_page *opg)
278 {
279         struct cl_lock  *lock = opg->ops_lock;
280         struct osc_lock *olock;
281
282         LASSERT(lock != NULL);
283         olock = osc_lock_at(lock);
284
285         cfs_atomic_dec(&olock->ols_pageref);
286         opg->ops_lock = NULL;
287
288         /*
289          * Note: usually this won't be the last reference of the lock, but if
290          * it is, then all the lock_put do is at most just freeing some memory,
291          * so it would be OK that caller is holding spinlocks.
292          */
293         LASSERT(cfs_atomic_read(&lock->cll_ref) > 1 || olock->ols_hold == 0);
294         cl_lock_put(env, lock);
295 }
296
297 static int osc_page_is_under_lock(const struct lu_env *env,
298                                   const struct cl_page_slice *slice,
299                                   struct cl_io *unused)
300 {
301         struct cl_lock *lock;
302         int             result;
303
304         ENTRY;
305         lock = cl_lock_at_page(env, slice->cpl_obj, slice->cpl_page,
306                                NULL, 1, 0);
307         if (lock != NULL &&
308             osc_page_addref_lock(env, cl2osc_page(slice), lock) == 0)
309                 result = -EBUSY;
310         else
311                 result = -ENODATA;
312         RETURN(result);
313 }
314
315 static void osc_page_disown(const struct lu_env *env,
316                             const struct cl_page_slice *slice,
317                             struct cl_io *io)
318 {
319         struct osc_page *opg = cl2osc_page(slice);
320
321         if (unlikely(opg->ops_lock))
322                 osc_page_putref_lock(env, opg);
323 }
324
325 static void osc_page_completion_read(const struct lu_env *env,
326                                      const struct cl_page_slice *slice,
327                                      int ioret)
328 {
329         struct osc_page *opg = cl2osc_page(slice);
330
331         if (likely(opg->ops_lock))
332                 osc_page_putref_lock(env, opg);
333 }
334
335 static int osc_page_fail(const struct lu_env *env,
336                          const struct cl_page_slice *slice,
337                          struct cl_io *unused)
338 {
339         /*
340          * Cached read?
341          */
342         LBUG();
343         return 0;
344 }
345
346
347 static const char *osc_list(cfs_list_t *head)
348 {
349         return cfs_list_empty(head) ? "-" : "+";
350 }
351
352 static inline cfs_time_t osc_submit_duration(struct osc_page *opg)
353 {
354         if (opg->ops_submit_time == 0)
355                 return 0;
356
357         return (cfs_time_current() - opg->ops_submit_time);
358 }
359
360 static int osc_page_print(const struct lu_env *env,
361                           const struct cl_page_slice *slice,
362                           void *cookie, lu_printer_t printer)
363 {
364         struct osc_page       *opg = cl2osc_page(slice);
365         struct osc_async_page *oap = &opg->ops_oap;
366         struct osc_object     *obj = cl2osc(slice->cpl_obj);
367         struct client_obd     *cli = &osc_export(obj)->exp_obd->u.cli;
368         struct lov_oinfo      *loi = obj->oo_oinfo;
369
370         return (*printer)(env, cookie, LUSTRE_OSC_NAME"-page@%p: "
371                           "1< %#x %d %u %s %s %s > "
372                           "2< "LPU64" %u %u %#x %#x | %p %p %p %p %p > "
373                           "3< %s %p %d %lu %d > "
374                           "4< %d %d %d %lu %s | %s %s %s %s > "
375                           "5< %s %s %s %s | %d %s %s | %d %s %s>\n",
376                           opg,
377                           /* 1 */
378                           oap->oap_magic, oap->oap_cmd,
379                           oap->oap_interrupted,
380                           osc_list(&oap->oap_pending_item),
381                           osc_list(&oap->oap_urgent_item),
382                           osc_list(&oap->oap_rpc_item),
383                           /* 2 */
384                           oap->oap_obj_off, oap->oap_page_off, oap->oap_count,
385                           oap->oap_async_flags, oap->oap_brw_flags,
386                           oap->oap_request,
387                           oap->oap_cli, oap->oap_loi, oap->oap_caller_ops,
388                           oap->oap_caller_data,
389                           /* 3 */
390                           osc_list(&opg->ops_inflight),
391                           opg->ops_submitter, opg->ops_transfer_pinned,
392                           osc_submit_duration(opg), opg->ops_srvlock,
393                           /* 4 */
394                           cli->cl_r_in_flight, cli->cl_w_in_flight,
395                           cli->cl_max_rpcs_in_flight,
396                           cli->cl_avail_grant,
397                           osc_list(&cli->cl_cache_waiters),
398                           osc_list(&cli->cl_loi_ready_list),
399                           osc_list(&cli->cl_loi_hp_ready_list),
400                           osc_list(&cli->cl_loi_write_list),
401                           osc_list(&cli->cl_loi_read_list),
402                           /* 5 */
403                           osc_list(&loi->loi_ready_item),
404                           osc_list(&loi->loi_hp_ready_item),
405                           osc_list(&loi->loi_write_item),
406                           osc_list(&loi->loi_read_item),
407                           loi->loi_read_lop.lop_num_pending,
408                           osc_list(&loi->loi_read_lop.lop_pending),
409                           osc_list(&loi->loi_read_lop.lop_urgent),
410                           loi->loi_write_lop.lop_num_pending,
411                           osc_list(&loi->loi_write_lop.lop_pending),
412                           osc_list(&loi->loi_write_lop.lop_urgent));
413 }
414
415 static void osc_page_delete(const struct lu_env *env,
416                             const struct cl_page_slice *slice)
417 {
418         struct osc_page       *opg = cl2osc_page(slice);
419         struct osc_object     *obj = cl2osc(opg->ops_cl.cpl_obj);
420         struct osc_async_page *oap = &opg->ops_oap;
421         int rc;
422
423         LINVRNT(opg->ops_temp || osc_page_protected(env, opg, CLM_READ, 1));
424
425         ENTRY;
426         CDEBUG(D_TRACE, "%p\n", opg);
427         osc_page_transfer_put(env, opg);
428         rc = osc_teardown_async_page(osc_export(obj), NULL, obj->oo_oinfo, oap);
429         if (rc) {
430                 CL_PAGE_DEBUG(D_ERROR, env, cl_page_top(slice->cpl_page),
431                               "Trying to teardown failed: %d\n", rc);
432                 LASSERT(0);
433         }
434         cfs_spin_lock(&obj->oo_seatbelt);
435         cfs_list_del_init(&opg->ops_inflight);
436         cfs_spin_unlock(&obj->oo_seatbelt);
437         EXIT;
438 }
439
440 void osc_page_clip(const struct lu_env *env, const struct cl_page_slice *slice,
441                    int from, int to)
442 {
443         struct osc_page       *opg = cl2osc_page(slice);
444         struct osc_async_page *oap = &opg->ops_oap;
445
446         LINVRNT(osc_page_protected(env, opg, CLM_READ, 0));
447
448         opg->ops_from = from;
449         opg->ops_to   = to;
450         cfs_spin_lock(&oap->oap_lock);
451         oap->oap_async_flags |= ASYNC_COUNT_STABLE;
452         cfs_spin_unlock(&oap->oap_lock);
453 }
454
455 static int osc_page_cancel(const struct lu_env *env,
456                            const struct cl_page_slice *slice)
457 {
458         struct osc_page *opg       = cl2osc_page(slice);
459         struct osc_async_page *oap = &opg->ops_oap;
460         int rc = 0;
461
462         LINVRNT(osc_page_protected(env, opg, CLM_READ, 0));
463
464         client_obd_list_lock(&oap->oap_cli->cl_loi_list_lock);
465         /* Check if the transferring against this page
466          * is completed, or not even queued. */
467         if (opg->ops_transfer_pinned)
468                 /* FIXME: may not be interrupted.. */
469                 rc = osc_oap_interrupted(env, oap);
470         LASSERT(ergo(rc == 0, opg->ops_transfer_pinned == 0));
471         client_obd_list_unlock(&oap->oap_cli->cl_loi_list_lock);
472         return rc;
473 }
474
475 static const struct cl_page_operations osc_page_ops = {
476         .cpo_fini          = osc_page_fini,
477         .cpo_print         = osc_page_print,
478         .cpo_delete        = osc_page_delete,
479         .cpo_is_under_lock = osc_page_is_under_lock,
480         .cpo_disown        = osc_page_disown,
481         .io = {
482                 [CRT_READ] = {
483                         .cpo_cache_add  = osc_page_fail,
484                         .cpo_completion = osc_page_completion_read
485                 },
486                 [CRT_WRITE] = {
487                         .cpo_cache_add  = osc_page_cache_add
488                 }
489         },
490         .cpo_clip           = osc_page_clip,
491         .cpo_cancel         = osc_page_cancel
492 };
493
494 static int osc_make_ready(const struct lu_env *env, void *data, int cmd)
495 {
496         struct osc_page *opg  = data;
497         struct cl_page  *page = cl_page_top(opg->ops_cl.cpl_page);
498         int result;
499
500         LASSERT(cmd == OBD_BRW_WRITE); /* no cached reads */
501         LINVRNT(osc_page_protected(env, opg, CLM_WRITE, 1));
502
503         ENTRY;
504         result = cl_page_make_ready(env, page, CRT_WRITE);
505         if (result == 0)
506                 opg->ops_submit_time = cfs_time_current();
507         RETURN(result);
508 }
509
510 static int osc_refresh_count(const struct lu_env *env, void *data, int cmd)
511 {
512         struct cl_page   *page;
513         struct osc_page  *osc = data;
514         struct cl_object *obj;
515         struct cl_attr   *attr = &osc_env_info(env)->oti_attr;
516
517         int result;
518         loff_t kms;
519
520         LINVRNT(osc_page_protected(env, osc, CLM_READ, 1));
521
522         /* readpage queues with _COUNT_STABLE, shouldn't get here. */
523         LASSERT(!(cmd & OBD_BRW_READ));
524         LASSERT(osc != NULL);
525         page = osc->ops_cl.cpl_page;
526         obj = osc->ops_cl.cpl_obj;
527
528         cl_object_attr_lock(obj);
529         result = cl_object_attr_get(env, obj, attr);
530         cl_object_attr_unlock(obj);
531         if (result < 0)
532                 return result;
533         kms = attr->cat_kms;
534         if (cl_offset(obj, page->cp_index) >= kms)
535                 /* catch race with truncate */
536                 return 0;
537         else if (cl_offset(obj, page->cp_index + 1) > kms)
538                 /* catch sub-page write at end of file */
539                 return kms % CFS_PAGE_SIZE;
540         else
541                 return CFS_PAGE_SIZE;
542 }
543
544 static int osc_completion(const struct lu_env *env,
545                           void *data, int cmd, struct obdo *oa, int rc)
546 {
547         struct osc_page       *opg  = data;
548         struct osc_async_page *oap  = &opg->ops_oap;
549         struct cl_page        *page = cl_page_top(opg->ops_cl.cpl_page);
550         struct osc_object     *obj  = cl2osc(opg->ops_cl.cpl_obj);
551         enum cl_req_type crt;
552
553         LINVRNT(osc_page_protected(env, opg, CLM_READ, 1));
554         LINVRNT(cl_page_is_vmlocked(env, page));
555
556         ENTRY;
557
558         cmd &= ~OBD_BRW_NOQUOTA;
559         LASSERT(equi(page->cp_state == CPS_PAGEIN,  cmd == OBD_BRW_READ));
560         LASSERT(equi(page->cp_state == CPS_PAGEOUT, cmd == OBD_BRW_WRITE));
561         LASSERT(opg->ops_transfer_pinned);
562
563         /*
564          * page->cp_req can be NULL if io submission failed before
565          * cl_req was allocated.
566          */
567         if (page->cp_req != NULL)
568                 cl_req_page_done(env, page);
569         LASSERT(page->cp_req == NULL);
570
571         /* As the transfer for this page is being done, clear the flags */
572         cfs_spin_lock(&oap->oap_lock);
573         oap->oap_async_flags = 0;
574         cfs_spin_unlock(&oap->oap_lock);
575
576         crt = cmd == OBD_BRW_READ ? CRT_READ : CRT_WRITE;
577         /* Clear opg->ops_transfer_pinned before VM lock is released. */
578         opg->ops_transfer_pinned = 0;
579
580         cfs_spin_lock(&obj->oo_seatbelt);
581         LASSERT(opg->ops_submitter != NULL);
582         LASSERT(!cfs_list_empty(&opg->ops_inflight));
583         cfs_list_del_init(&opg->ops_inflight);
584         cfs_spin_unlock(&obj->oo_seatbelt);
585
586         opg->ops_submit_time = 0;
587
588         cl_page_completion(env, page, crt, rc);
589
590         /* statistic */
591         if (rc == 0 && oap->oap_brw_flags & OBD_BRW_SRVLOCK) {
592                 struct lu_device *ld    = opg->ops_cl.cpl_obj->co_lu.lo_dev;
593                 struct osc_stats *stats = &lu2osc_dev(ld)->od_stats;
594                 int bytes = oap->oap_count;
595
596                 if (crt == CRT_READ)
597                         stats->os_lockless_reads += bytes;
598                 else
599                         stats->os_lockless_writes += bytes;
600         }
601
602         /*
603          * This has to be the last operation with the page, as locks are
604          * released in cl_page_completion() and nothing except for the
605          * reference counter protects page from concurrent reclaim.
606          */
607         lu_ref_del(&page->cp_reference, "transfer", page);
608         /*
609          * As page->cp_obj is pinned by a reference from page->cp_req, it is
610          * safe to call cl_page_put() without risking object destruction in a
611          * non-blocking context.
612          */
613         cl_page_put(env, page);
614         RETURN(0);
615 }
616
617 const static struct obd_async_page_ops osc_async_page_ops = {
618         .ap_make_ready    = osc_make_ready,
619         .ap_refresh_count = osc_refresh_count,
620         .ap_completion    = osc_completion
621 };
622
623 struct cl_page *osc_page_init(const struct lu_env *env,
624                               struct cl_object *obj,
625                               struct cl_page *page, cfs_page_t *vmpage)
626 {
627         struct osc_object *osc = cl2osc(obj);
628         struct osc_page   *opg;
629         int result;
630
631         OBD_SLAB_ALLOC_PTR_GFP(opg, osc_page_kmem, CFS_ALLOC_IO);
632         if (opg != NULL) {
633                 void *oap = &opg->ops_oap;
634
635                 opg->ops_from = 0;
636                 opg->ops_to   = CFS_PAGE_SIZE;
637
638                 result = osc_prep_async_page(osc_export(osc),
639                                              NULL, osc->oo_oinfo, vmpage,
640                                              cl_offset(obj, page->cp_index),
641                                              &osc_async_page_ops,
642                                              opg, (void **)&oap, 1, NULL);
643                 if (result == 0) {
644                         struct osc_io *oio = osc_env_io(env);
645                         opg->ops_srvlock = osc_io_srvlock(oio);
646                         cl_page_slice_add(page, &opg->ops_cl, obj,
647                                           &osc_page_ops);
648                 }
649                 /*
650                  * Cannot assert osc_page_protected() here as read-ahead
651                  * creates temporary pages outside of a lock.
652                  */
653 #ifdef INVARIANT_CHECK
654                 opg->ops_temp = !osc_page_protected(env, opg, CLM_READ, 1);
655 #endif
656                 CFS_INIT_LIST_HEAD(&opg->ops_inflight);
657         } else
658                 result = -ENOMEM;
659         return ERR_PTR(result);
660 }
661
662 /**
663  * Helper function called by osc_io_submit() for every page in an immediate
664  * transfer (i.e., transferred synchronously).
665  */
666 void osc_io_submit_page(const struct lu_env *env,
667                         struct osc_io *oio, struct osc_page *opg,
668                         enum cl_req_type crt)
669 {
670         struct osc_async_page *oap = &opg->ops_oap;
671         struct client_obd     *cli = oap->oap_cli;
672         int flags = 0;
673
674         LINVRNT(osc_page_protected(env, opg,
675                                    crt == CRT_WRITE ? CLM_WRITE : CLM_READ, 1));
676
677         oap->oap_page_off   = opg->ops_from;
678         oap->oap_count      = opg->ops_to - opg->ops_from;
679         /* Give a hint to OST that requests are coming from kswapd - bug19529 */
680         if (cfs_memory_pressure_get())
681                 oap->oap_brw_flags |= OBD_BRW_MEMALLOC;
682         oap->oap_brw_flags |= OBD_BRW_SYNC;
683         if (osc_io_srvlock(oio))
684                 oap->oap_brw_flags |= OBD_BRW_SRVLOCK;
685
686         oap->oap_cmd = crt == CRT_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ;
687         if (!client_is_remote(osc_export(cl2osc(opg->ops_cl.cpl_obj))) &&
688             cfs_capable(CFS_CAP_SYS_RESOURCE)) {
689                 oap->oap_brw_flags |= OBD_BRW_NOQUOTA;
690                 oap->oap_cmd |= OBD_BRW_NOQUOTA;
691         }
692
693         if (oap->oap_cmd & OBD_BRW_READ)
694                 flags = ASYNC_COUNT_STABLE;
695         else if (!(oap->oap_brw_page.flag & OBD_BRW_FROM_GRANT))
696                 osc_enter_cache_try(env, cli, oap->oap_loi, oap, 1);
697
698         cfs_spin_lock(&oap->oap_lock);
699         oap->oap_async_flags |= OSC_FLAGS | flags;
700         cfs_spin_unlock(&oap->oap_lock);
701
702         osc_oap_to_pending(oap);
703         osc_page_transfer_get(opg, "transfer\0imm");
704         osc_page_transfer_add(env, opg, crt);
705 }
706
707 /** @} osc */