Whamcloud - gitweb
b=16919 Async journal commit support
[fs/lustre-release.git] / lustre / osc / osc_page.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * Implementation of cl_page for OSC layer.
37  *
38  *   Author: Nikita Danilov <nikita.danilov@sun.com>
39  */
40
41 #define DEBUG_SUBSYSTEM S_OSC
42
43 #include "osc_cl_internal.h"
44
45 /** \addtogroup osc 
46  *  @{ 
47  */
48
49 /* 
50  * Comment out osc_page_protected because it may sleep inside the
51  * the client_obd_list_lock.
52  * client_obd_list_lock -> osc_ap_completion -> osc_completion ->
53  *   -> osc_page_protected -> osc_page_is_dlocked -> osc_match_base
54  *   -> ldlm_lock_match -> sptlrpc_import_check_ctx -> sleep.
55  */
56 #if 0
57 static int osc_page_is_dlocked(const struct lu_env *env,
58                                const struct osc_page *opg,
59                                enum cl_lock_mode mode, int pending, int unref)
60 {
61         struct cl_page         *page;
62         struct osc_object      *obj;
63         struct osc_thread_info *info;
64         struct ldlm_res_id     *resname;
65         struct lustre_handle   *lockh;
66         ldlm_policy_data_t     *policy;
67         ldlm_mode_t             dlmmode;
68         int                     flags;
69
70         cfs_might_sleep();
71
72         info = osc_env_info(env);
73         resname = &info->oti_resname;
74         policy = &info->oti_policy;
75         lockh = &info->oti_handle;
76         page = opg->ops_cl.cpl_page;
77         obj = cl2osc(opg->ops_cl.cpl_obj);
78
79         flags = LDLM_FL_TEST_LOCK | LDLM_FL_BLOCK_GRANTED;
80         if (pending)
81                 flags |= LDLM_FL_CBPENDING;
82
83         dlmmode = osc_cl_lock2ldlm(mode) | LCK_PW;
84         osc_lock_build_res(env, obj, resname);
85         osc_index2policy(policy, page->cp_obj, page->cp_index, page->cp_index);
86         return osc_match_base(osc_export(obj), resname, LDLM_EXTENT, policy,
87                               dlmmode, &flags, NULL, lockh, unref);
88 }
89
90 /**
91  * Checks an invariant that a page in the cache is covered by a lock, as
92  * needed.
93  */
94 static int osc_page_protected(const struct lu_env *env,
95                               const struct osc_page *opg,
96                               enum cl_lock_mode mode, int unref)
97 {
98         struct cl_object_header *hdr;
99         struct cl_lock          *scan;
100         struct cl_page          *page;
101         struct cl_lock_descr    *descr;
102         int result;
103
104         LINVRNT(!opg->ops_temp);
105
106         page = opg->ops_cl.cpl_page;
107         if (page->cp_owner != NULL &&
108             cl_io_top(page->cp_owner)->ci_lockreq == CILR_NEVER)
109                 /*
110                  * If IO is done without locks (liblustre, or lloop), lock is
111                  * not required.
112                  */
113                 result = 1;
114         else
115                 /* otherwise check for a DLM lock */
116         result = osc_page_is_dlocked(env, opg, mode, 1, unref);
117         if (result == 0) {
118                 /* maybe this page is a part of a lockless io? */
119                 hdr = cl_object_header(opg->ops_cl.cpl_obj);
120                 descr = &osc_env_info(env)->oti_descr;
121                 descr->cld_mode = mode;
122                 descr->cld_start = page->cp_index;
123                 descr->cld_end   = page->cp_index;
124                 cfs_spin_lock(&hdr->coh_lock_guard);
125                 cfs_list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) {
126                         /*
127                          * Lock-less sub-lock has to be either in HELD state
128                          * (when io is actively going on), or in CACHED state,
129                          * when top-lock is being unlocked:
130                          * cl_io_unlock()->cl_unuse()->...->lov_lock_unuse().
131                          */
132                         if ((scan->cll_state == CLS_HELD ||
133                              scan->cll_state == CLS_CACHED) &&
134                             cl_lock_ext_match(&scan->cll_descr, descr)) {
135                                 struct osc_lock *olck;
136
137                                 olck = osc_lock_at(scan);
138                                 result = osc_lock_is_lockless(olck);
139                                 break;
140                         }
141                 }
142                 cfs_spin_unlock(&hdr->coh_lock_guard);
143         }
144         return result;
145 }
146 #else
147 static int osc_page_protected(const struct lu_env *env,
148                               const struct osc_page *opg,
149                               enum cl_lock_mode mode, int unref)
150 {
151         return 1;
152 }
153 #endif
154
155 /*****************************************************************************
156  *
157  * Page operations.
158  *
159  */
160 static void osc_page_fini(const struct lu_env *env,
161                           struct cl_page_slice *slice)
162 {
163         struct osc_page *opg = cl2osc_page(slice);
164         CDEBUG(D_TRACE, "%p\n", opg);
165         OBD_SLAB_FREE_PTR(opg, osc_page_kmem);
166 }
167
168 static void osc_page_transfer_get(struct osc_page *opg, const char *label)
169 {
170         struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page);
171
172         LASSERT(!opg->ops_transfer_pinned);
173         cl_page_get(page);
174         lu_ref_add_atomic(&page->cp_reference, label, page);
175         opg->ops_transfer_pinned = 1;
176 }
177
178 static void osc_page_transfer_put(const struct lu_env *env,
179                                   struct osc_page *opg)
180 {
181         struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page);
182
183         if (opg->ops_transfer_pinned) {
184                 lu_ref_del(&page->cp_reference, "transfer", page);
185                 opg->ops_transfer_pinned = 0;
186                 cl_page_put(env, page);
187         }
188 }
189
190 /**
191  * This is called once for every page when it is submitted for a transfer
192  * either opportunistic (osc_page_cache_add()), or immediate
193  * (osc_page_submit()).
194  */
195 static void osc_page_transfer_add(const struct lu_env *env,
196                                   struct osc_page *opg, enum cl_req_type crt)
197 {
198         struct osc_object *obj;
199
200         LINVRNT(cl_page_is_vmlocked(env, opg->ops_cl.cpl_page));
201
202         obj = cl2osc(opg->ops_cl.cpl_obj);
203         cfs_spin_lock(&obj->oo_seatbelt);
204         cfs_list_add(&opg->ops_inflight, &obj->oo_inflight[crt]);
205         opg->ops_submitter = cfs_current();
206         cfs_spin_unlock(&obj->oo_seatbelt);
207 }
208
209 static int osc_page_cache_add(const struct lu_env *env,
210                               const struct cl_page_slice *slice,
211                               struct cl_io *unused)
212 {
213         struct osc_page   *opg = cl2osc_page(slice);
214         struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
215         int result;
216         /* All cacheable IO is async-capable */
217         int brw_flags = OBD_BRW_ASYNC;
218         int noquota = 0;
219
220         LINVRNT(osc_page_protected(env, opg, CLM_WRITE, 0));
221         ENTRY;
222
223         /* Set the OBD_BRW_SRVLOCK before the page is queued. */
224         brw_flags |= opg->ops_srvlock ? OBD_BRW_SRVLOCK : 0;
225         if (!client_is_remote(osc_export(obj)) &&
226             cfs_capable(CFS_CAP_SYS_RESOURCE)) {
227                 brw_flags |= OBD_BRW_NOQUOTA;
228                 noquota = OBD_BRW_NOQUOTA;
229         }
230
231         osc_page_transfer_get(opg, "transfer\0cache");
232         result = osc_queue_async_io(env, osc_export(obj), NULL, obj->oo_oinfo,
233                                     &opg->ops_oap, OBD_BRW_WRITE | noquota,
234                                     0, 0, brw_flags, 0);
235         if (result != 0)
236                 osc_page_transfer_put(env, opg);
237         else
238                 osc_page_transfer_add(env, opg, CRT_WRITE);
239         RETURN(result);
240 }
241
242 void osc_index2policy(ldlm_policy_data_t *policy, const struct cl_object *obj,
243                       pgoff_t start, pgoff_t end)
244 {
245         memset(policy, 0, sizeof *policy);
246         policy->l_extent.start = cl_offset(obj, start);
247         policy->l_extent.end   = cl_offset(obj, end + 1) - 1;
248 }
249
250 static int osc_page_is_under_lock(const struct lu_env *env,
251                                   const struct cl_page_slice *slice,
252                                   struct cl_io *unused)
253 {
254         struct cl_lock *lock;
255         int             result;
256
257         ENTRY;
258         lock = cl_lock_at_page(env, slice->cpl_obj, slice->cpl_page,
259                                NULL, 1, 0);
260         if (lock != NULL) {
261                 cl_lock_put(env, lock);
262                 result = -EBUSY;
263         } else
264                 result = -ENODATA;
265         RETURN(result);
266 }
267
268 static int osc_page_fail(const struct lu_env *env,
269                          const struct cl_page_slice *slice,
270                          struct cl_io *unused)
271 {
272         /*
273          * Cached read?
274          */
275         LBUG();
276         return 0;
277 }
278
279
280 static const char *osc_list(cfs_list_t *head)
281 {
282         return cfs_list_empty(head) ? "-" : "+";
283 }
284
285 static inline cfs_time_t osc_submit_duration(struct osc_page *opg)
286 {
287         if (opg->ops_submit_time == 0)
288                 return 0;
289
290         return (cfs_time_current() - opg->ops_submit_time);
291 }
292
293 static int osc_page_print(const struct lu_env *env,
294                           const struct cl_page_slice *slice,
295                           void *cookie, lu_printer_t printer)
296 {
297         struct osc_page       *opg = cl2osc_page(slice);
298         struct osc_async_page *oap = &opg->ops_oap;
299         struct osc_object     *obj = cl2osc(slice->cpl_obj);
300         struct client_obd     *cli = &osc_export(obj)->exp_obd->u.cli;
301         struct lov_oinfo      *loi = obj->oo_oinfo;
302
303         return (*printer)(env, cookie, LUSTRE_OSC_NAME"-page@%p: "
304                           "1< %#x %d %u %s %s %s > "
305                           "2< "LPU64" %u %#x %#x | %p %p %p %p %p > "
306                           "3< %s %p %d %lu %d > "
307                           "4< %d %d %d %lu %s | %s %s %s %s > "
308                           "5< %s %s %s %s | %d %s %s | %d %s %s>\n",
309                           opg,
310                           /* 1 */
311                           oap->oap_magic, oap->oap_cmd,
312                           oap->oap_interrupted,
313                           osc_list(&oap->oap_pending_item),
314                           osc_list(&oap->oap_urgent_item),
315                           osc_list(&oap->oap_rpc_item),
316                           /* 2 */
317                           oap->oap_obj_off, oap->oap_page_off,
318                           oap->oap_async_flags, oap->oap_brw_flags,
319                           oap->oap_request,
320                           oap->oap_cli, oap->oap_loi, oap->oap_caller_ops,
321                           oap->oap_caller_data,
322                           /* 3 */
323                           osc_list(&opg->ops_inflight),
324                           opg->ops_submitter, opg->ops_transfer_pinned,
325                           osc_submit_duration(opg), opg->ops_srvlock,
326                           /* 4 */
327                           cli->cl_r_in_flight, cli->cl_w_in_flight,
328                           cli->cl_max_rpcs_in_flight,
329                           cli->cl_avail_grant,
330                           osc_list(&cli->cl_cache_waiters),
331                           osc_list(&cli->cl_loi_ready_list),
332                           osc_list(&cli->cl_loi_hp_ready_list),
333                           osc_list(&cli->cl_loi_write_list),
334                           osc_list(&cli->cl_loi_read_list),
335                           /* 5 */
336                           osc_list(&loi->loi_ready_item),
337                           osc_list(&loi->loi_hp_ready_item),
338                           osc_list(&loi->loi_write_item),
339                           osc_list(&loi->loi_read_item),
340                           loi->loi_read_lop.lop_num_pending,
341                           osc_list(&loi->loi_read_lop.lop_pending),
342                           osc_list(&loi->loi_read_lop.lop_urgent),
343                           loi->loi_write_lop.lop_num_pending,
344                           osc_list(&loi->loi_write_lop.lop_pending),
345                           osc_list(&loi->loi_write_lop.lop_urgent));
346 }
347
348 static void osc_page_delete(const struct lu_env *env,
349                             const struct cl_page_slice *slice)
350 {
351         struct osc_page       *opg = cl2osc_page(slice);
352         struct osc_object     *obj = cl2osc(opg->ops_cl.cpl_obj);
353         struct osc_async_page *oap = &opg->ops_oap;
354         int rc;
355
356         LINVRNT(opg->ops_temp || osc_page_protected(env, opg, CLM_READ, 1));
357
358         ENTRY;
359         CDEBUG(D_TRACE, "%p\n", opg);
360         osc_page_transfer_put(env, opg);
361         rc = osc_teardown_async_page(osc_export(obj), NULL, obj->oo_oinfo, oap);
362         if (rc) {
363                 CL_PAGE_DEBUG(D_ERROR, env, cl_page_top(slice->cpl_page),
364                               "Trying to teardown failed: %d\n", rc);
365                 LASSERT(0);
366         }
367         cfs_spin_lock(&obj->oo_seatbelt);
368         cfs_list_del_init(&opg->ops_inflight);
369         cfs_spin_unlock(&obj->oo_seatbelt);
370         EXIT;
371 }
372
373 void osc_page_clip(const struct lu_env *env, const struct cl_page_slice *slice,
374                    int from, int to)
375 {
376         struct osc_page       *opg = cl2osc_page(slice);
377         struct osc_async_page *oap = &opg->ops_oap;
378
379         LINVRNT(osc_page_protected(env, opg, CLM_READ, 0));
380
381         opg->ops_from = from;
382         opg->ops_to   = to;
383         cfs_spin_lock(&oap->oap_lock);
384         oap->oap_async_flags |= ASYNC_COUNT_STABLE;
385         cfs_spin_unlock(&oap->oap_lock);
386 }
387
388 static int osc_page_cancel(const struct lu_env *env,
389                            const struct cl_page_slice *slice)
390 {
391         struct osc_page *opg       = cl2osc_page(slice);
392         struct osc_async_page *oap = &opg->ops_oap;
393         int rc = 0;
394
395         LINVRNT(osc_page_protected(env, opg, CLM_READ, 0));
396
397         client_obd_list_lock(&oap->oap_cli->cl_loi_list_lock);
398         /* Check if the transferring against this page
399          * is completed, or not even queued. */
400         if (opg->ops_transfer_pinned)
401                 /* FIXME: may not be interrupted.. */
402                 rc = osc_oap_interrupted(env, oap);
403         LASSERT(ergo(rc == 0, opg->ops_transfer_pinned == 0));
404         client_obd_list_unlock(&oap->oap_cli->cl_loi_list_lock);
405         return rc;
406 }
407
408 static const struct cl_page_operations osc_page_ops = {
409         .cpo_fini          = osc_page_fini,
410         .cpo_print         = osc_page_print,
411         .cpo_delete        = osc_page_delete,
412         .cpo_is_under_lock = osc_page_is_under_lock,
413         .io = {
414                 [CRT_READ] = {
415                         .cpo_cache_add = osc_page_fail
416                 },
417                 [CRT_WRITE] = {
418                         .cpo_cache_add = osc_page_cache_add
419                 }
420         },
421         .cpo_clip           = osc_page_clip,
422         .cpo_cancel         = osc_page_cancel
423 };
424
425 static int osc_make_ready(const struct lu_env *env, void *data, int cmd)
426 {
427         struct osc_page *opg  = data;
428         struct cl_page  *page = cl_page_top(opg->ops_cl.cpl_page);
429         int result;
430
431         LASSERT(cmd == OBD_BRW_WRITE); /* no cached reads */
432         LINVRNT(osc_page_protected(env, opg, CLM_WRITE, 1));
433
434         ENTRY;
435         result = cl_page_make_ready(env, page, CRT_WRITE);
436         if (result == 0)
437                 opg->ops_submit_time = cfs_time_current();
438         RETURN(result);
439 }
440
441 static int osc_refresh_count(const struct lu_env *env, void *data, int cmd)
442 {
443         struct cl_page   *page;
444         struct osc_page  *osc = data;
445         struct cl_object *obj;
446         struct cl_attr   *attr = &osc_env_info(env)->oti_attr;
447
448         int result;
449         loff_t kms;
450
451         LINVRNT(osc_page_protected(env, osc, CLM_READ, 1));
452
453         /* readpage queues with _COUNT_STABLE, shouldn't get here. */
454         LASSERT(!(cmd & OBD_BRW_READ));
455         LASSERT(osc != NULL);
456         page = osc->ops_cl.cpl_page;
457         obj = osc->ops_cl.cpl_obj;
458
459         cl_object_attr_lock(obj);
460         result = cl_object_attr_get(env, obj, attr);
461         cl_object_attr_unlock(obj);
462         if (result < 0)
463                 return result;
464         kms = attr->cat_kms;
465         if (cl_offset(obj, page->cp_index) >= kms)
466                 /* catch race with truncate */
467                 return 0;
468         else if (cl_offset(obj, page->cp_index + 1) > kms)
469                 /* catch sub-page write at end of file */
470                 return kms % CFS_PAGE_SIZE;
471         else
472                 return CFS_PAGE_SIZE;
473 }
474
475 static int osc_completion(const struct lu_env *env,
476                           void *data, int cmd, struct obdo *oa, int rc)
477 {
478         struct osc_page       *opg  = data;
479         struct osc_async_page *oap  = &opg->ops_oap;
480         struct cl_page        *page = cl_page_top(opg->ops_cl.cpl_page);
481         struct osc_object     *obj  = cl2osc(opg->ops_cl.cpl_obj);
482         enum cl_req_type crt;
483
484         LINVRNT(osc_page_protected(env, opg, CLM_READ, 1));
485         LINVRNT(cl_page_is_vmlocked(env, page));
486
487         ENTRY;
488
489         cmd &= ~OBD_BRW_NOQUOTA;
490         LASSERT(equi(page->cp_state == CPS_PAGEIN,  cmd == OBD_BRW_READ));
491         LASSERT(equi(page->cp_state == CPS_PAGEOUT, cmd == OBD_BRW_WRITE));
492         LASSERT(opg->ops_transfer_pinned);
493
494         /*
495          * page->cp_req can be NULL if io submission failed before
496          * cl_req was allocated.
497          */
498         if (page->cp_req != NULL)
499                 cl_req_page_done(env, page);
500         LASSERT(page->cp_req == NULL);
501
502         /* As the transfer for this page is being done, clear the flags */
503         cfs_spin_lock(&oap->oap_lock);
504         oap->oap_async_flags = 0;
505         cfs_spin_unlock(&oap->oap_lock);
506
507         crt = cmd == OBD_BRW_READ ? CRT_READ : CRT_WRITE;
508         /* Clear opg->ops_transfer_pinned before VM lock is released. */
509         opg->ops_transfer_pinned = 0;
510
511         cfs_spin_lock(&obj->oo_seatbelt);
512         LASSERT(opg->ops_submitter != NULL);
513         LASSERT(!cfs_list_empty(&opg->ops_inflight));
514         cfs_list_del_init(&opg->ops_inflight);
515         cfs_spin_unlock(&obj->oo_seatbelt);
516
517         opg->ops_submit_time = 0;
518
519         cl_page_completion(env, page, crt, rc);
520
521         /* statistic */
522         if (rc == 0 && oap->oap_brw_flags & OBD_BRW_SRVLOCK) {
523                 struct lu_device *ld    = opg->ops_cl.cpl_obj->co_lu.lo_dev;
524                 struct osc_stats *stats = &lu2osc_dev(ld)->od_stats;
525                 int bytes = opg->ops_to - opg->ops_from;
526
527                 if (crt == CRT_READ)
528                         stats->os_lockless_reads += bytes;
529                 else
530                         stats->os_lockless_writes += bytes;
531         }
532
533         /*
534          * This has to be the last operation with the page, as locks are
535          * released in cl_page_completion() and nothing except for the
536          * reference counter protects page from concurrent reclaim.
537          */
538         lu_ref_del(&page->cp_reference, "transfer", page);
539         /*
540          * As page->cp_obj is pinned by a reference from page->cp_req, it is
541          * safe to call cl_page_put() without risking object destruction in a
542          * non-blocking context.
543          */
544         cl_page_put(env, page);
545         RETURN(0);
546 }
547
548 const static struct obd_async_page_ops osc_async_page_ops = {
549         .ap_make_ready    = osc_make_ready,
550         .ap_refresh_count = osc_refresh_count,
551         .ap_completion    = osc_completion
552 };
553
554 struct cl_page *osc_page_init(const struct lu_env *env,
555                               struct cl_object *obj,
556                               struct cl_page *page, cfs_page_t *vmpage)
557 {
558         struct osc_object *osc = cl2osc(obj);
559         struct osc_page   *opg;
560         int result;
561
562         OBD_SLAB_ALLOC_PTR_GFP(opg, osc_page_kmem, CFS_ALLOC_IO);
563         if (opg != NULL) {
564                 void *oap = &opg->ops_oap;
565
566                 opg->ops_from = 0;
567                 opg->ops_to   = CFS_PAGE_SIZE;
568
569                 result = osc_prep_async_page(osc_export(osc),
570                                              NULL, osc->oo_oinfo, vmpage,
571                                              cl_offset(obj, page->cp_index),
572                                              &osc_async_page_ops,
573                                              opg, (void **)&oap, 1, NULL);
574                 if (result == 0) {
575                         struct osc_io *oio = osc_env_io(env);
576                         opg->ops_srvlock = osc_io_srvlock(oio);
577                         cl_page_slice_add(page, &opg->ops_cl, obj,
578                                           &osc_page_ops);
579                 }
580                 /*
581                  * Cannot assert osc_page_protected() here as read-ahead
582                  * creates temporary pages outside of a lock.
583                  */
584 #ifdef INVARIANT_CHECK
585                 opg->ops_temp = !osc_page_protected(env, opg, CLM_READ, 1);
586 #endif
587                 CFS_INIT_LIST_HEAD(&opg->ops_inflight);
588         } else
589                 result = -ENOMEM;
590         return ERR_PTR(result);
591 }
592
593 /**
594  * Helper function called by osc_io_submit() for every page in an immediate
595  * transfer (i.e., transferred synchronously).
596  */
597 void osc_io_submit_page(const struct lu_env *env,
598                         struct osc_io *oio, struct osc_page *opg,
599                         enum cl_req_type crt)
600 {
601         struct osc_async_page *oap = &opg->ops_oap;
602         struct client_obd     *cli = oap->oap_cli;
603         int flags = 0;
604
605         LINVRNT(osc_page_protected(env, opg,
606                                    crt == CRT_WRITE ? CLM_WRITE : CLM_READ, 1));
607
608         oap->oap_page_off   = opg->ops_from;
609         oap->oap_count      = opg->ops_to - opg->ops_from;
610         /* Give a hint to OST that requests are coming from kswapd - bug19529 */
611         if (cfs_memory_pressure_get())
612                 oap->oap_brw_flags |= OBD_BRW_MEMALLOC;
613         oap->oap_brw_flags |= OBD_BRW_SYNC;
614         if (osc_io_srvlock(oio))
615                 oap->oap_brw_flags |= OBD_BRW_SRVLOCK;
616
617         oap->oap_cmd = crt == CRT_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ;
618         if (!client_is_remote(osc_export(cl2osc(opg->ops_cl.cpl_obj))) &&
619             cfs_capable(CFS_CAP_SYS_RESOURCE)) {
620                 oap->oap_brw_flags |= OBD_BRW_NOQUOTA;
621                 oap->oap_cmd |= OBD_BRW_NOQUOTA;
622         }
623
624         if (oap->oap_cmd & OBD_BRW_READ)
625                 flags = ASYNC_COUNT_STABLE;
626         else if (!(oap->oap_brw_page.flag & OBD_BRW_FROM_GRANT))
627                 osc_enter_cache_try(env, cli, oap->oap_loi, oap, 1);
628
629         cfs_spin_lock(&oap->oap_lock);
630         oap->oap_async_flags |= OSC_FLAGS | flags;
631         cfs_spin_unlock(&oap->oap_lock);
632
633         osc_oap_to_pending(oap);
634         osc_page_transfer_get(opg, "transfer\0imm");
635         osc_page_transfer_add(env, opg, crt);
636 }
637
638 /** @} osc */