Whamcloud - gitweb
LU-5683 clio: add CIT_DATA_VERSION
[fs/lustre-release.git] / lustre / osc / osc_io.c
index 5cb13ee..34679c9 100644 (file)
@@ -1,6 +1,4 @@
-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
  * GPL HEADER START
  *
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  * GPL HEADER END
  */
 /*
- * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
  * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2014, Intel Corporation.
  */
 /*
  * This file is part of Lustre, http://www.lustre.org/
  * Implementation of cl_io for OSC layer.
  *
  *   Author: Nikita Danilov <nikita.danilov@sun.com>
+ *   Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
  */
 
-/** \addtogroup osc osc @{ */
-
 #define DEBUG_SUBSYSTEM S_OSC
 
 #include "osc_cl_internal.h"
 
+/** \addtogroup osc 
+ *  @{ 
+ */
+
 /*****************************************************************************
  *
  * Type conversions.
@@ -64,14 +67,18 @@ static struct osc_io *cl2osc_io(const struct lu_env *env,
         return oio;
 }
 
-static struct osc_page *osc_cl_page_osc(struct cl_page *page)
+static struct osc_page *osc_cl_page_osc(struct cl_page *page,
+                                       struct osc_object *osc)
 {
-        const struct cl_page_slice *slice;
+       const struct cl_page_slice *slice;
 
-        slice = cl_page_at(page, &osc_device_type);
-        LASSERT(slice != NULL);
+       if (osc != NULL)
+               slice = cl_object_page_slice(&osc->oo_cl, page);
+       else
+               slice = cl_page_at(page, &osc_device_type);
+       LASSERT(slice != NULL);
 
-        return cl2osc_page(slice);
+       return cl2osc_page(slice);
 }
 
 
@@ -85,117 +92,108 @@ static void osc_io_fini(const struct lu_env *env, const struct cl_io_slice *io)
 {
 }
 
-struct cl_page *osc_oap2cl_page(struct osc_async_page *oap)
+static void osc_read_ahead_release(const struct lu_env *env,
+                                  void *cbdata)
 {
-        return container_of(oap, struct osc_page, ops_oap)->ops_cl.cpl_page;
+       struct ldlm_lock *dlmlock = cbdata;
+       struct lustre_handle lockh;
+
+       ldlm_lock2handle(dlmlock, &lockh);
+       ldlm_lock_decref(&lockh, LCK_PR);
+       LDLM_LOCK_PUT(dlmlock);
 }
 
-static void osc_io_unplug(const struct lu_env *env, struct osc_object *osc,
-                          struct client_obd *cli)
+static int osc_io_read_ahead(const struct lu_env *env,
+                            const struct cl_io_slice *ios,
+                            pgoff_t start, struct cl_read_ahead *ra)
 {
-        loi_list_maint(cli, osc->oo_oinfo);
-        osc_check_rpcs(env, cli);
-        client_obd_list_unlock(&cli->cl_loi_list_lock);
+       struct osc_object       *osc = cl2osc(ios->cis_obj);
+       struct ldlm_lock        *dlmlock;
+       int                     result = -ENODATA;
+       ENTRY;
+
+       dlmlock = osc_dlmlock_at_pgoff(env, osc, start, 0);
+       if (dlmlock != NULL) {
+               if (dlmlock->l_req_mode != LCK_PR) {
+                       struct lustre_handle lockh;
+                       ldlm_lock2handle(dlmlock, &lockh);
+                       ldlm_lock_addref(&lockh, LCK_PR);
+                       ldlm_lock_decref(&lockh, dlmlock->l_req_mode);
+               }
+
+               ra->cra_end = cl_index(osc2cl(osc),
+                                      dlmlock->l_policy_data.l_extent.end);
+               ra->cra_release = osc_read_ahead_release;
+               ra->cra_cbdata = dlmlock;
+               result = 0;
+       }
+
+       RETURN(result);
 }
 
 /**
- * How many pages osc_io_submit() queues before checking whether an RPC is
- * ready.
- */
-#define OSC_QUEUE_GRAIN (32)
-
-/**
  * An implementation of cl_io_operations::cio_io_submit() method for osc
  * layer. Iterates over pages in the in-queue, prepares each for io by calling
  * cl_page_prep() and then either submits them through osc_io_submit_page()
  * or, if page is already submitted, changes osc flags through
- * osc_set_async_flags_base().
+ * osc_set_async_flags().
  */
 static int osc_io_submit(const struct lu_env *env,
                          const struct cl_io_slice *ios,
-                         enum cl_req_type crt, struct cl_2queue *queue,
-                         enum cl_req_priority priority)
+                        enum cl_req_type crt, struct cl_2queue *queue)
 {
-        struct cl_page    *page;
-        struct cl_page    *tmp;
-        struct osc_object *osc0 = NULL;
-        struct client_obd *cli  = NULL;
-        struct osc_object *osc  = NULL; /* to keep gcc happy */
-        struct osc_page   *opg;
-        struct cl_io      *io;
-
-        struct cl_page_list *qin      = &queue->c2_qin;
-        struct cl_page_list *qout     = &queue->c2_qout;
-        int queued = 0;
-        int result = 0;
+       struct cl_page    *page;
+       struct cl_page    *tmp;
+       struct client_obd *cli  = NULL;
+       struct osc_object *osc  = NULL; /* to keep gcc happy */
+       struct osc_page   *opg;
+       struct cl_io      *io;
+       struct list_head  list = LIST_HEAD_INIT(list);
+
+       struct cl_page_list *qin      = &queue->c2_qin;
+       struct cl_page_list *qout     = &queue->c2_qout;
+       unsigned int queued = 0;
+       int result = 0;
+       int cmd;
+       int brw_flags;
+       unsigned int max_pages;
+
+       LASSERT(qin->pl_nr > 0);
 
-        LASSERT(qin->pl_nr > 0);
+       CDEBUG(D_CACHE, "%d %d\n", qin->pl_nr, crt);
+
+       osc = cl2osc(ios->cis_obj);
+       cli = osc_cli(osc);
+       max_pages = cli->cl_max_pages_per_rpc;
+
+       cmd = crt == CRT_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ;
+       brw_flags = osc_io_srvlock(cl2osc_io(env, ios)) ? OBD_BRW_SRVLOCK : 0;
 
-        CDEBUG(D_INFO, "%i %i\n", qin->pl_nr, crt);
         /*
          * NOTE: here @page is a top-level page. This is done to avoid
          *       creation of sub-page-list.
          */
         cl_page_list_for_each_safe(page, tmp, qin) {
                 struct osc_async_page *oap;
-                struct obd_export     *exp;
 
                 /* Top level IO. */
                 io = page->cp_owner;
                 LASSERT(io != NULL);
 
-                opg = osc_cl_page_osc(page);
-                oap = &opg->ops_oap;
-                osc = cl2osc(opg->ops_cl.cpl_obj);
-                exp = osc_export(osc);
+               opg = osc_cl_page_osc(page, osc);
+               oap = &opg->ops_oap;
+               LASSERT(osc == oap->oap_obj);
 
-                if (priority > CRP_NORMAL) {
-                        spin_lock(&oap->oap_lock);
-                        oap->oap_async_flags |= ASYNC_HP;
-                        spin_unlock(&oap->oap_lock);
-                }
-                /*
-                 * This can be checked without cli->cl_loi_list_lock, because
-                 * ->oap_*_item are always manipulated when the page is owned.
-                 */
-                if (!list_empty(&oap->oap_urgent_item) ||
-                    !list_empty(&oap->oap_rpc_item)) {
+               if (!list_empty(&oap->oap_pending_item) ||
+                   !list_empty(&oap->oap_rpc_item)) {
+                       CDEBUG(D_CACHE, "Busy oap %p page %p for submit.\n",
+                              oap, opg);
                         result = -EBUSY;
                         break;
                 }
 
-                if (osc0 == NULL) { /* first iteration */
-                        cli = &exp->exp_obd->u.cli;
-                        osc0 = osc;
-                } else /* check that all pages are against the same object
-                        * (for now) */
-                        LASSERT(osc == osc0);
-                if (queued++ == 0)
-                        client_obd_list_lock(&cli->cl_loi_list_lock);
                 result = cl_page_prep(env, io, page, crt);
-                if (result == 0) {
-                        cl_page_list_move(qout, qin, page);
-                        if (list_empty(&oap->oap_pending_item)) {
-                                osc_io_submit_page(env, cl2osc_io(env, ios),
-                                                   opg, crt);
-                        } else {
-                                result = osc_set_async_flags_base(cli,
-                                                                  osc->oo_oinfo,
-                                                                  oap,
-                                                                  OSC_FLAGS);
-                                /*
-                                 * bug 18881: we can't just break out here when
-                                 * error occurrs after cl_page_prep has been
-                                 * called against the page. The correct
-                                 * way is to call page's completion routine,
-                                 * as in osc_oap_interrupted.  For simplicity,
-                                 * we just force osc_set_async_flags_base() to
-                                 * not return error.
-                                 */
-                                LASSERT(result == 0);
-                        }
-                        opg->ops_submit_time = cfs_time_current();
-                } else {
+               if (result != 0) {
                         LASSERT(result < 0);
                         if (result != -EALREADY)
                                 break;
@@ -205,44 +203,47 @@ static int osc_io_submit(const struct lu_env *env,
                          * is not dirty.
                          */
                         result = 0;
+                       continue;
                 }
-                /*
-                 * Don't keep client_obd_list_lock() for too long.
-                 *
-                 * XXX client_obd_list lock has to be unlocked periodically to
-                 * avoid soft-lockups that tend to happen otherwise (see bug
-                 * 16651). On the other hand, osc_io_submit_page() queues a
-                 * page with ASYNC_URGENT flag and so all pages queued up
-                 * until this point are sent out immediately by
-                 * osc_io_unplug() resulting in sub-optimal RPCs (sub-optimal
-                 * RPCs only happen during `warm up' phase when less than
-                 * cl_max_rpcs_in_flight RPCs are in flight). To balance these
-                 * conflicting requirements, one might unplug once enough
-                 * pages to form a large RPC were queued (i.e., use
-                 * cli->cl_max_pages_per_rpc as OSC_QUEUE_GRAIN, see
-                 * lop_makes_rpc()), or ignore soft-lockup issue altogether.
-                 *
-                 * XXX lock_need_resched() should be used here, but it is not
-                 * available in the older of supported kernels.
-                 */
-                if (queued > OSC_QUEUE_GRAIN || cfs_need_resched()) {
-                        queued = 0;
-                        osc_io_unplug(env, osc, cli);
-                        cfs_cond_resched();
-                }
-        }
-
-        LASSERT(ergo(result == 0, cli != NULL));
-        LASSERT(ergo(result == 0, osc == osc0));
 
-        if (queued > 0)
-                osc_io_unplug(env, osc, cli);
-        CDEBUG(D_INFO, "%i/%i %i\n", qin->pl_nr, qout->pl_nr, result);
-        return qout->pl_nr > 0 ? 0 : result;
+               spin_lock(&oap->oap_lock);
+               oap->oap_async_flags = ASYNC_URGENT|ASYNC_READY;
+               oap->oap_async_flags |= ASYNC_COUNT_STABLE;
+               spin_unlock(&oap->oap_lock);
+
+               osc_page_submit(env, opg, crt, brw_flags);
+               list_add_tail(&oap->oap_pending_item, &list);
+
+               if (page->cp_sync_io != NULL)
+                       cl_page_list_move(qout, qin, page);
+               else /* async IO */
+                       cl_page_list_del(env, qin, page);
+
+               if (++queued == max_pages) {
+                       queued = 0;
+                       result = osc_queue_sync_pages(env, osc, &list, cmd,
+                                                     brw_flags);
+                       if (result < 0)
+                               break;
+               }
+       }
+
+       if (queued > 0)
+               result = osc_queue_sync_pages(env, osc, &list, cmd, brw_flags);
+
+       CDEBUG(D_INFO, "%d/%d %d\n", qin->pl_nr, qout->pl_nr, result);
+       return qout->pl_nr > 0 ? 0 : result;
 }
 
+/**
+ * This is called when a page is accessed within file in a way that creates
+ * new page, if one were missing (i.e., if there were a hole at that place in
+ * the file, or accessed page is beyond the current file size).
+ *
+ * Expand stripe KMS if necessary.
+ */
 static void osc_page_touch_at(const struct lu_env *env,
-                              struct cl_object *obj, pgoff_t idx, unsigned to)
+                             struct cl_object *obj, pgoff_t idx, size_t to)
 {
         struct lov_oinfo  *loi  = cl2osc(obj)->oo_oinfo;
         struct cl_attr    *attr = &osc_env_info(env)->oti_attr;
@@ -264,182 +265,219 @@ static void osc_page_touch_at(const struct lu_env *env,
                kms > loi->loi_kms ? "" : "not ", loi->loi_kms, kms,
                loi->loi_lvb.lvb_size);
 
-        valid = 0;
-        if (kms > loi->loi_kms) {
-                attr->cat_kms = kms;
-                valid |= CAT_KMS;
-        }
-        if (kms > loi->loi_lvb.lvb_size) {
-                attr->cat_size = kms;
-                valid |= CAT_SIZE;
-        }
-        cl_object_attr_set(env, obj, attr, valid);
-        cl_object_attr_unlock(obj);
+       attr->cat_mtime = attr->cat_ctime = LTIME_S(CFS_CURRENT_TIME);
+       valid = CAT_MTIME | CAT_CTIME;
+       if (kms > loi->loi_kms) {
+               attr->cat_kms = kms;
+               valid |= CAT_KMS;
+       }
+       if (kms > loi->loi_lvb.lvb_size) {
+               attr->cat_size = kms;
+               valid |= CAT_SIZE;
+       }
+       cl_object_attr_update(env, obj, attr, valid);
+       cl_object_attr_unlock(obj);
 }
 
-/**
- * This is called when a page is accessed within file in a way that creates
- * new page, if one were missing (i.e., if there were a hole at that place in
- * the file, or accessed page is beyond the current file size). Examples:
- * ->commit_write() and ->nopage() methods.
- *
- * Expand stripe KMS if necessary.
- */
-static void osc_page_touch(const struct lu_env *env,
-                           struct osc_page *opage, unsigned to)
+static int osc_io_commit_async(const struct lu_env *env,
+                               const struct cl_io_slice *ios,
+                               struct cl_page_list *qin, int from, int to,
+                               cl_commit_cbt cb)
 {
-        struct cl_page    *page = opage->ops_cl.cpl_page;
-        struct cl_object  *obj  = opage->ops_cl.cpl_obj;
-
-        osc_page_touch_at(env, obj, page->cp_index, to);
+       struct cl_io    *io = ios->cis_io;
+       struct osc_io   *oio = cl2osc_io(env, ios);
+       struct osc_object *osc = cl2osc(ios->cis_obj);
+       struct cl_page  *page;
+       struct cl_page  *last_page;
+       struct osc_page *opg;
+       int result = 0;
+       ENTRY;
+
+       LASSERT(qin->pl_nr > 0);
+
+       /* Handle partial page cases */
+       last_page = cl_page_list_last(qin);
+       if (oio->oi_lockless) {
+               page = cl_page_list_first(qin);
+               if (page == last_page) {
+                       cl_page_clip(env, page, from, to);
+               } else {
+                       if (from != 0)
+                               cl_page_clip(env, page, from, PAGE_SIZE);
+                       if (to != PAGE_SIZE)
+                               cl_page_clip(env, last_page, 0, to);
+               }
+       }
+
+       while (qin->pl_nr > 0) {
+               struct osc_async_page *oap;
+
+               page = cl_page_list_first(qin);
+               opg = osc_cl_page_osc(page, osc);
+               oap = &opg->ops_oap;
+
+               if (!list_empty(&oap->oap_rpc_item)) {
+                       CDEBUG(D_CACHE, "Busy oap %p page %p for submit.\n",
+                              oap, opg);
+                       result = -EBUSY;
+                       break;
+               }
+
+               /* The page may be already in dirty cache. */
+               if (list_empty(&oap->oap_pending_item)) {
+                       result = osc_page_cache_add(env, &opg->ops_cl, io);
+                       if (result != 0)
+                               break;
+               }
+
+               osc_page_touch_at(env, osc2cl(osc), osc_index(opg),
+                                 page == last_page ? to : PAGE_SIZE);
+
+               cl_page_list_del(env, qin, page);
+
+               (*cb)(env, io, page);
+               /* Can't access page any more. Page can be in transfer and
+                * complete at any time. */
+       }
+
+       /* for sync write, kernel will wait for this page to be flushed before
+        * osc_io_end() is called, so release it earlier.
+        * for mkwrite(), it's known there is no further pages. */
+       if (cl_io_is_sync_write(io) && oio->oi_active != NULL) {
+               osc_extent_release(env, oio->oi_active);
+               oio->oi_active = NULL;
+       }
+
+       CDEBUG(D_INFO, "%d %d\n", qin->pl_nr, result);
+       RETURN(result);
 }
 
-/**
- * Implements cl_io_operations::cio_prepare_write() method for osc layer.
- *
- * \retval -EIO transfer initiated against this osc will most likely fail
- * \retval 0    transfer initiated against this osc will most likely succeed.
- *
- * The reason for this check is to immediately return an error to the caller
- * in the case of a deactivated import. Note, that import can be deactivated
- * later, while pages, dirtied by this IO, are still in the cache, but this is
- * irrelevant, because that would still return an error to the application (if
- * it does fsync), but many applications don't do fsync because of performance
- * issues, and we wanted to return an -EIO at write time to notify the
- * application.
- */
-static int osc_io_prepare_write(const struct lu_env *env,
-                                const struct cl_io_slice *ios,
-                                const struct cl_page_slice *slice,
-                                unsigned from, unsigned to)
+static int osc_io_rw_iter_init(const struct lu_env *env,
+                               const struct cl_io_slice *ios)
 {
-        struct osc_device *dev = lu2osc_dev(slice->cpl_obj->co_lu.lo_dev);
-        struct obd_import *imp = class_exp2cliimp(dev->od_exp);
-
-        ENTRY;
-
-        /*
-         * This implements OBD_BRW_CHECK logic from old client.
-         */
-
-        RETURN(imp == NULL || imp->imp_invalid ? -EIO : 0);
+       struct cl_io *io = ios->cis_io;
+       struct osc_io *oio = osc_env_io(env);
+       struct osc_object *osc = cl2osc(ios->cis_obj);
+       struct client_obd *cli = osc_cli(osc);
+       unsigned long c;
+       unsigned long npages;
+       unsigned long max_pages;
+       ENTRY;
+
+       if (cl_io_is_append(io))
+               RETURN(0);
+
+       npages = io->u.ci_rw.crw_count >> PAGE_CACHE_SHIFT;
+       if (io->u.ci_rw.crw_pos & ~PAGE_MASK)
+               ++npages;
+
+       max_pages = cli->cl_max_pages_per_rpc * cli->cl_max_rpcs_in_flight;
+       if (npages > max_pages)
+               npages = max_pages;
+
+       c = atomic_long_read(cli->cl_lru_left);
+       if (c < npages && osc_lru_reclaim(cli) > 0)
+               c = atomic_long_read(cli->cl_lru_left);
+       while (c >= npages) {
+               if (c == atomic_long_cmpxchg(cli->cl_lru_left, c, c - npages)) {
+                       oio->oi_lru_reserved = npages;
+                       break;
+               }
+               c = atomic_long_read(cli->cl_lru_left);
+       }
+
+       RETURN(0);
 }
 
-static int osc_io_commit_write(const struct lu_env *env,
-                               const struct cl_io_slice *ios,
-                               const struct cl_page_slice *slice,
-                               unsigned from, unsigned to)
+static void osc_io_rw_iter_fini(const struct lu_env *env,
+                               const struct cl_io_slice *ios)
 {
-        struct osc_page       *opg = cl2osc_page(slice);
-        struct osc_object     *obj = cl2osc(opg->ops_cl.cpl_obj);
-        struct osc_async_page *oap = &opg->ops_oap;
-        ENTRY;
-
-        LASSERT(to > 0);
-        /*
-         * XXX instead of calling osc_page_touch() here and in
-         * osc_io_fault_start() it might be more logical to introduce
-         * cl_page_touch() method, that generic cl_io_commit_write() and page
-         * fault code calls.
-         */
-        osc_page_touch(env, cl2osc_page(slice), to);
-        if (!client_is_remote(osc_export(obj)) &&
-            cfs_capable(CFS_CAP_SYS_RESOURCE))
-                oap->oap_brw_flags |= OBD_BRW_NOQUOTA;
-
-        RETURN(0);
+       struct osc_io *oio = osc_env_io(env);
+       struct osc_object *osc = cl2osc(ios->cis_obj);
+       struct client_obd *cli = osc_cli(osc);
+
+       if (oio->oi_lru_reserved > 0) {
+               atomic_long_add(oio->oi_lru_reserved, cli->cl_lru_left);
+               oio->oi_lru_reserved = 0;
+       }
+       oio->oi_write_osclock = NULL;
 }
 
 static int osc_io_fault_start(const struct lu_env *env,
-                              const struct cl_io_slice *ios)
+                             const struct cl_io_slice *ios)
 {
-        struct cl_io       *io;
-        struct cl_fault_io *fio;
-
-        ENTRY;
-
-        io  = ios->cis_io;
-        fio = &io->u.ci_fault;
-        CDEBUG(D_INFO, "%lu %i %i\n",
-               fio->ft_index, fio->ft_writable, fio->ft_nob);
-        /*
-         * If mapping is writeable, adjust kms to cover this page,
-         * but do not extend kms beyond actual file size.
-         * See bug 10919.
-         */
-        if (fio->ft_writable)
-                osc_page_touch_at(env, ios->cis_obj,
-                                  fio->ft_index, fio->ft_nob);
-        RETURN(0);
+       struct cl_io       *io;
+       struct cl_fault_io *fio;
+       ENTRY;
+
+       io  = ios->cis_io;
+       fio = &io->u.ci_fault;
+       CDEBUG(D_INFO, "%lu %d %zu\n",
+               fio->ft_index, fio->ft_writable, fio->ft_nob);
+       /*
+        * If mapping is writeable, adjust kms to cover this page,
+        * but do not extend kms beyond actual file size.
+        * See bug 10919.
+        */
+       if (fio->ft_writable)
+               osc_page_touch_at(env, ios->cis_obj,
+                                 fio->ft_index, fio->ft_nob);
+       RETURN(0);
 }
 
-static int osc_punch_upcall(void *a, int rc)
+static int osc_async_upcall(void *a, int rc)
 {
-        struct osc_punch_cbargs *args = a;
+       struct osc_async_cbargs *args = a;
 
         args->opc_rc = rc;
-        complete(&args->opc_sync);
+       complete(&args->opc_sync);
         return 0;
 }
 
-#ifdef __KERNEL__
 /**
  * Checks that there are no pages being written in the extent being truncated.
  */
+static int trunc_check_cb(const struct lu_env *env, struct cl_io *io,
+                         struct osc_page *ops , void *cbdata)
+{
+       struct cl_page *page = ops->ops_cl.cpl_page;
+       struct osc_async_page *oap;
+       __u64 start = *(__u64 *)cbdata;
+
+       oap = &ops->ops_oap;
+       if (oap->oap_cmd & OBD_BRW_WRITE &&
+           !list_empty(&oap->oap_pending_item))
+               CL_PAGE_DEBUG(D_ERROR, env, page, "exists " LPU64 "/%s.\n",
+                               start, current->comm);
+
+       if (PageLocked(page->cp_vmpage))
+               CDEBUG(D_CACHE, "page %p index %lu locked for %d.\n",
+                      ops, osc_index(ops), oap->oap_cmd & OBD_BRW_RWMASK);
+
+       return CLP_GANG_OKAY;
+}
+
 static void osc_trunc_check(const struct lu_env *env, struct cl_io *io,
-                            struct osc_io *oio, size_t size)
+                           struct osc_io *oio, __u64 size)
 {
-        struct osc_page     *cp;
-        struct osc_object   *obj;
-        struct cl_object    *clob;
-        struct cl_page      *page;
-        struct cl_page_list *list;
-        int                  partial;
-        pgoff_t              start;
+       struct cl_object *clob;
+       int     partial;
+       pgoff_t start;
 
         clob    = oio->oi_cl.cis_obj;
-        obj     = cl2osc(clob);
         start   = cl_index(clob, size);
         partial = cl_offset(clob, start) < size;
-        list    = &osc_env_info(env)->oti_plist;
 
         /*
          * Complain if there are pages in the truncated region.
-         *
-         * XXX this is quite expensive check.
          */
-        cl_page_list_init(list);
-        cl_page_gang_lookup(env, clob, io, start + partial, CL_PAGE_EOF, list);
-
-        cl_page_list_for_each(page, list)
-                CL_PAGE_DEBUG(D_ERROR, env, page, "exists %lu\n", start);
-
-        cl_page_list_disown(env, io, list);
-        cl_page_list_fini(env, list);
-
-        spin_lock(&obj->oo_seatbelt);
-        list_for_each_entry(cp, &obj->oo_inflight[CRT_WRITE], ops_inflight) {
-                page = cp->ops_cl.cpl_page;
-                if (page->cp_index >= start + partial) {
-                        cfs_task_t *submitter;
-
-                        submitter = cp->ops_submitter;
-                        /*
-                         * XXX Linux specific debugging stuff.
-                         */
-                        CL_PAGE_DEBUG(D_ERROR, env, page, "%s/%i %lu\n",
-                                      submitter->comm, submitter->pid, start);
-                        libcfs_debug_dumpstack(submitter);
-                }
-        }
-        spin_unlock(&obj->oo_seatbelt);
+       osc_page_gang_lookup(env, io, cl2osc(clob),
+                               start + partial, CL_PAGE_EOF,
+                               trunc_check_cb, (void *)&size);
 }
-#else /* __KERNEL__ */
-# define osc_trunc_check(env, io, oio, size) do {;} while (0)
-#endif
 
-static int osc_io_trunc_start(const struct lu_env *env,
-                              const struct cl_io_slice *slice)
+static int osc_io_setattr_start(const struct lu_env *env,
+                                const struct cl_io_slice *slice)
 {
         struct cl_io            *io     = slice->cis_io;
         struct osc_io           *oio    = cl2osc_io(env, slice);
@@ -447,134 +485,415 @@ static int osc_io_trunc_start(const struct lu_env *env,
         struct lov_oinfo        *loi    = cl2osc(obj)->oo_oinfo;
         struct cl_attr          *attr   = &osc_env_info(env)->oti_attr;
         struct obdo             *oa     = &oio->oi_oa;
-        struct osc_punch_cbargs *cbargs = &oio->oi_punch_cbarg;
-        struct obd_capa         *capa;
-        loff_t                   size   = io->u.ci_truncate.tr_size;
-        int                      result = 0;
-
-
-        memset(oa, 0, sizeof(*oa));
-
-        osc_trunc_check(env, io, oio, size);
-
-        if (oio->oi_lockless == 0) {
-                cl_object_attr_lock(obj);
-                result = cl_object_attr_get(env, obj, attr);
-                if (result == 0) {
-                        attr->cat_size = attr->cat_kms = size;
-                        result = cl_object_attr_set(env, obj, attr,
-                                                    CAT_SIZE|CAT_KMS);
+       struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
+       __u64                    size   = io->u.ci_setattr.sa_attr.lvb_size;
+       unsigned int             ia_valid = io->u.ci_setattr.sa_valid;
+       int                      result = 0;
+       struct obd_info          oinfo = { { { 0 } } };
+
+       /* truncate cache dirty pages first */
+       if (cl_io_is_trunc(io))
+               result = osc_cache_truncate_start(env, oio, cl2osc(obj), size);
+
+       if (result == 0 && oio->oi_lockless == 0) {
+               cl_object_attr_lock(obj);
+               result = cl_object_attr_get(env, obj, attr);
+               if (result == 0) {
+                       struct ost_lvb *lvb = &io->u.ci_setattr.sa_attr;
+                       unsigned int cl_valid = 0;
+
+                       if (ia_valid & ATTR_SIZE) {
+                               attr->cat_size = attr->cat_kms = size;
+                               cl_valid = (CAT_SIZE | CAT_KMS);
+                       }
+                       if (ia_valid & ATTR_MTIME_SET) {
+                               attr->cat_mtime = lvb->lvb_mtime;
+                               cl_valid |= CAT_MTIME;
+                       }
+                       if (ia_valid & ATTR_ATIME_SET) {
+                               attr->cat_atime = lvb->lvb_atime;
+                               cl_valid |= CAT_ATIME;
+                       }
+                       if (ia_valid & ATTR_CTIME_SET) {
+                               attr->cat_ctime = lvb->lvb_ctime;
+                               cl_valid |= CAT_CTIME;
+                       }
+                       result = cl_object_attr_update(env, obj, attr,
+                                                      cl_valid);
+               }
+               cl_object_attr_unlock(obj);
+       }
+       memset(oa, 0, sizeof(*oa));
+       if (result == 0) {
+               oa->o_oi = loi->loi_oi;
+               obdo_set_parent_fid(oa, io->u.ci_setattr.sa_parent_fid);
+               oa->o_stripe_idx = io->u.ci_setattr.sa_stripe_index;
+               oa->o_valid |= OBD_MD_FLID | OBD_MD_FLGROUP;
+               if (ia_valid & ATTR_CTIME) {
+                       oa->o_valid |= OBD_MD_FLCTIME;
+                       oa->o_ctime = attr->cat_ctime;
+               }
+               if (ia_valid & ATTR_ATIME) {
+                       oa->o_valid |= OBD_MD_FLATIME;
+                       oa->o_atime = attr->cat_atime;
+               }
+               if (ia_valid & ATTR_MTIME) {
+                       oa->o_valid |= OBD_MD_FLMTIME;
+                       oa->o_mtime = attr->cat_mtime;
+               }
+                if (ia_valid & ATTR_SIZE) {
+                        oa->o_size = size;
+                        oa->o_blocks = OBD_OBJECT_EOF;
+                        oa->o_valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
+
+                        if (oio->oi_lockless) {
+                                oa->o_flags = OBD_FL_SRVLOCK;
+                                oa->o_valid |= OBD_MD_FLFLAGS;
+                        }
+                } else {
+                        LASSERT(oio->oi_lockless == 0);
                 }
-                cl_object_attr_unlock(obj);
-        }
 
-        if (result == 0) {
-                oa->o_id = loi->loi_id;
-                oa->o_gr = loi->loi_gr;
-                oa->o_mtime = attr->cat_mtime;
-                oa->o_atime = attr->cat_atime;
-                oa->o_ctime = attr->cat_ctime;
-                oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP | OBD_MD_FLATIME |
-                        OBD_MD_FLCTIME | OBD_MD_FLMTIME;
-                if (oio->oi_lockless) {
-                        oa->o_flags = OBD_FL_TRUNCLOCK;
-                        oa->o_valid |= OBD_MD_FLFLAGS;
-                }
-                oa->o_size = size;
-                oa->o_blocks = OBD_OBJECT_EOF;
-                oa->o_valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
-
-                capa = io->u.ci_truncate.tr_capa;
-                init_completion(&cbargs->opc_sync);
-                result = osc_punch_base(osc_export(cl2osc(obj)), oa, capa,
-                                        osc_punch_upcall, cbargs, PTLRPCD_SET);
+               if (ia_valid & ATTR_ATTR_FLAG) {
+                       oa->o_flags = io->u.ci_setattr.sa_attr_flags;
+                       oa->o_valid |= OBD_MD_FLFLAGS;
+               }
+
+                oinfo.oi_oa = oa;
+                oinfo.oi_capa = io->u.ci_setattr.sa_capa;
+               init_completion(&cbargs->opc_sync);
+
+                if (ia_valid & ATTR_SIZE)
+                        result = osc_punch_base(osc_export(cl2osc(obj)),
+                                               &oinfo, osc_async_upcall,
+                                                cbargs, PTLRPCD_SET);
+                else
+                       result = osc_setattr_async(osc_export(cl2osc(obj)),
+                                                  &oinfo,
+                                                  osc_async_upcall,
+                                                  cbargs, PTLRPCD_SET);
+               cbargs->opc_rpc_sent = result == 0;
         }
         return result;
 }
 
-static void osc_io_trunc_end(const struct lu_env *env,
-                             const struct cl_io_slice *slice)
+static void osc_io_setattr_end(const struct lu_env *env,
+                               const struct cl_io_slice *slice)
 {
-        struct cl_io            *io     = slice->cis_io;
-        struct osc_io           *oio    = cl2osc_io(env, slice);
-        struct osc_punch_cbargs *cbargs = &oio->oi_punch_cbarg;
-        struct obdo             *oa     = &oio->oi_oa;
-        int result;
-
-        wait_for_completion(&cbargs->opc_sync);
+       struct cl_io     *io  = slice->cis_io;
+       struct osc_io    *oio = cl2osc_io(env, slice);
+       struct cl_object *obj = slice->cis_obj;
+       struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
+        int result = 0;
 
-        result = io->ci_result = cbargs->opc_rc;
+       if (cbargs->opc_rpc_sent) {
+               wait_for_completion(&cbargs->opc_sync);
+               result = io->ci_result = cbargs->opc_rc;
+       }
         if (result == 0) {
-                struct cl_object *obj = slice->cis_obj;
-                if (oio->oi_lockless == 0) {
-                        struct cl_attr *attr = &osc_env_info(env)->oti_attr;
-                        int valid = 0;
-
-                        /* Update kms & size */
-                        if (oa->o_valid & OBD_MD_FLSIZE) {
-                                attr->cat_size = oa->o_size;
-                                attr->cat_kms  = oa->o_size;
-                                valid |= CAT_KMS|CAT_SIZE;
-                        }
-                        if (oa->o_valid & OBD_MD_FLBLOCKS) {
-                                attr->cat_blocks = oa->o_blocks;
-                                valid |= CAT_BLOCKS;
-                        }
-                        if (oa->o_valid & OBD_MD_FLMTIME) {
-                                attr->cat_mtime = oa->o_mtime;
-                                valid |= CAT_MTIME;
-                        }
-                        if (oa->o_valid & OBD_MD_FLCTIME) {
-                                attr->cat_ctime = oa->o_ctime;
-                                valid |= CAT_CTIME;
-                        }
-                        if (oa->o_valid & OBD_MD_FLATIME) {
-                                attr->cat_atime = oa->o_atime;
-                                valid |= CAT_ATIME;
-                        }
-                        cl_object_attr_lock(obj);
-                        result = cl_object_attr_set(env, obj, attr, valid);
-                        cl_object_attr_unlock(obj);
-                } else {  /* lockless truncate */
+                if (oio->oi_lockless) {
+                        /* lockless truncate */
                         struct osc_device *osd = lu2osc_dev(obj->co_lu.lo_dev);
+
+                        LASSERT(cl_io_is_trunc(io));
                         /* XXX: Need a lock. */
                         osd->od_stats.os_lockless_truncates++;
                 }
         }
 
-        /* return result; */
+       if (cl_io_is_trunc(io)) {
+               __u64 size = io->u.ci_setattr.sa_attr.lvb_size;
+               osc_trunc_check(env, io, oio, size);
+               if (oio->oi_trunc != NULL) {
+                       osc_cache_truncate_end(env, oio, cl2osc(obj));
+                       oio->oi_trunc = NULL;
+               }
+       }
+}
+
+struct osc_data_version_args {
+       struct osc_io *dva_oio;
+};
+
+static int
+osc_data_version_interpret(const struct lu_env *env, struct ptlrpc_request *req,
+                          void *arg, int rc)
+{
+       struct osc_data_version_args *dva = arg;
+       struct osc_io *oio = dva->dva_oio;
+       const struct ost_body *body;
+
+       ENTRY;
+       if (rc < 0)
+               GOTO(out, rc);
+
+       body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
+       if (body == NULL)
+               GOTO(out, rc = -EPROTO);
+
+       lustre_get_wire_obdo(&req->rq_import->imp_connect_data, &oio->oi_oa,
+                            &body->oa);
+       EXIT;
+out:
+       oio->oi_cbarg.opc_rc = rc;
+       complete(&oio->oi_cbarg.opc_sync);
+
+       return 0;
+}
+
+static int osc_io_data_version_start(const struct lu_env *env,
+                                    const struct cl_io_slice *slice)
+{
+       struct cl_data_version_io *dv   = &slice->cis_io->u.ci_data_version;
+       struct osc_io           *oio    = cl2osc_io(env, slice);
+       struct obdo             *oa     = &oio->oi_oa;
+       struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
+       struct osc_object       *obj    = cl2osc(slice->cis_obj);
+       struct lov_oinfo        *loi    = obj->oo_oinfo;
+       struct obd_export       *exp    = osc_export(obj);
+       struct ptlrpc_request   *req;
+       struct ost_body         *body;
+       struct osc_data_version_args *dva;
+       int rc;
+
+       ENTRY;
+       memset(oa, 0, sizeof(*oa));
+       oa->o_oi = loi->loi_oi;
+       oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
+
+       if (dv->dv_flags & (LL_DV_RD_FLUSH | LL_DV_WR_FLUSH)) {
+               oa->o_valid |= OBD_MD_FLFLAGS;
+               oa->o_flags |= OBD_FL_SRVLOCK;
+               if (dv->dv_flags & LL_DV_WR_FLUSH)
+                       oa->o_flags |= OBD_FL_FLUSH;
+       }
+
+       init_completion(&cbargs->opc_sync);
+
+       req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
+       if (req == NULL)
+               RETURN(-ENOMEM);
+
+       rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
+       if (rc < 0) {
+               ptlrpc_request_free(req);
+               RETURN(rc);
+       }
+
+       body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
+       lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
+
+       ptlrpc_request_set_replen(req);
+       req->rq_interpret_reply = osc_data_version_interpret;
+       CLASSERT(sizeof(*dva) <= sizeof(req->rq_async_args));
+       dva = ptlrpc_req_async_args(req);
+       dva->dva_oio = oio;
+
+       ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
+
+       RETURN(0);
+}
+
+static void osc_io_data_version_end(const struct lu_env *env,
+                                   const struct cl_io_slice *slice)
+{
+       struct cl_data_version_io *dv = &slice->cis_io->u.ci_data_version;
+       struct osc_io           *oio    = cl2osc_io(env, slice);
+       struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
+
+       ENTRY;
+       wait_for_completion(&cbargs->opc_sync);
+
+       if (cbargs->opc_rc != 0) {
+               slice->cis_io->ci_result = cbargs->opc_rc;
+       } else if (!(oio->oi_oa.o_valid & OBD_MD_FLDATAVERSION)) {
+               slice->cis_io->ci_result = -EOPNOTSUPP;
+       } else {
+               dv->dv_data_version = oio->oi_oa.o_data_version;
+               slice->cis_io->ci_result = 0;
+       }
+
+       EXIT;
+}
+
+static int osc_io_read_start(const struct lu_env *env,
+                             const struct cl_io_slice *slice)
+{
+       struct cl_object *obj  = slice->cis_obj;
+       struct cl_attr   *attr = &osc_env_info(env)->oti_attr;
+       int rc = 0;
+       ENTRY;
+
+       if (!slice->cis_io->ci_noatime) {
+               cl_object_attr_lock(obj);
+               attr->cat_atime = LTIME_S(CFS_CURRENT_TIME);
+               rc = cl_object_attr_update(env, obj, attr, CAT_ATIME);
+               cl_object_attr_unlock(obj);
+       }
+
+       RETURN(rc);
+}
+
+static int osc_io_write_start(const struct lu_env *env,
+                              const struct cl_io_slice *slice)
+{
+       struct cl_object *obj   = slice->cis_obj;
+       struct cl_attr   *attr  = &osc_env_info(env)->oti_attr;
+       int rc = 0;
+       ENTRY;
+
+       OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_DELAY_SETTIME, 1);
+       cl_object_attr_lock(obj);
+       attr->cat_mtime = attr->cat_ctime = LTIME_S(CFS_CURRENT_TIME);
+       rc = cl_object_attr_update(env, obj, attr, CAT_MTIME | CAT_CTIME);
+       cl_object_attr_unlock(obj);
+
+       RETURN(rc);
+}
+
+static int osc_fsync_ost(const struct lu_env *env, struct osc_object *obj,
+                        struct cl_fsync_io *fio)
+{
+       struct osc_io    *oio   = osc_env_io(env);
+       struct obdo      *oa    = &oio->oi_oa;
+       struct obd_info  *oinfo = &oio->oi_info;
+       struct lov_oinfo *loi   = obj->oo_oinfo;
+       struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
+       int rc = 0;
+       ENTRY;
+
+       memset(oa, 0, sizeof(*oa));
+       oa->o_oi = loi->loi_oi;
+       oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
+
+       /* reload size abd blocks for start and end of sync range */
+       oa->o_size = fio->fi_start;
+       oa->o_blocks = fio->fi_end;
+       oa->o_valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
+
+       obdo_set_parent_fid(oa, fio->fi_fid);
+
+       memset(oinfo, 0, sizeof(*oinfo));
+       oinfo->oi_oa = oa;
+       oinfo->oi_capa = fio->fi_capa;
+       init_completion(&cbargs->opc_sync);
+
+       rc = osc_sync_base(osc_export(obj), oinfo, osc_async_upcall, cbargs,
+                          PTLRPCD_SET);
+       RETURN(rc);
+}
+
+static int osc_io_fsync_start(const struct lu_env *env,
+                             const struct cl_io_slice *slice)
+{
+       struct cl_io       *io  = slice->cis_io;
+       struct cl_fsync_io *fio = &io->u.ci_fsync;
+       struct cl_object   *obj = slice->cis_obj;
+       struct osc_object  *osc = cl2osc(obj);
+       pgoff_t start  = cl_index(obj, fio->fi_start);
+       pgoff_t end    = cl_index(obj, fio->fi_end);
+       int     result = 0;
+       ENTRY;
+
+       if (fio->fi_end == OBD_OBJECT_EOF)
+               end = CL_PAGE_EOF;
+
+       result = osc_cache_writeback_range(env, osc, start, end, 0,
+                                          fio->fi_mode == CL_FSYNC_DISCARD);
+       if (result > 0) {
+               fio->fi_nr_written += result;
+               result = 0;
+       }
+       if (fio->fi_mode == CL_FSYNC_ALL) {
+               int rc;
+
+               /* we have to wait for writeback to finish before we can
+                * send OST_SYNC RPC. This is bad because it causes extents
+                * to be written osc by osc. However, we usually start
+                * writeback before CL_FSYNC_ALL so this won't have any real
+                * problem. */
+               rc = osc_cache_wait_range(env, osc, start, end);
+               if (result == 0)
+                       result = rc;
+               rc = osc_fsync_ost(env, osc, fio);
+               if (result == 0)
+                       result = rc;
+       }
+
+       RETURN(result);
+}
+
+static void osc_io_fsync_end(const struct lu_env *env,
+                            const struct cl_io_slice *slice)
+{
+       struct cl_fsync_io *fio = &slice->cis_io->u.ci_fsync;
+       struct cl_object   *obj = slice->cis_obj;
+       pgoff_t start = cl_index(obj, fio->fi_start);
+       pgoff_t end   = cl_index(obj, fio->fi_end);
+       int result = 0;
+
+       if (fio->fi_mode == CL_FSYNC_LOCAL) {
+               result = osc_cache_wait_range(env, cl2osc(obj), start, end);
+       } else if (fio->fi_mode == CL_FSYNC_ALL) {
+               struct osc_io           *oio    = cl2osc_io(env, slice);
+               struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
+
+               wait_for_completion(&cbargs->opc_sync);
+               if (result == 0)
+                       result = cbargs->opc_rc;
+       }
+       slice->cis_io->ci_result = result;
+}
+
+static void osc_io_end(const struct lu_env *env,
+                      const struct cl_io_slice *slice)
+{
+       struct osc_io *oio = cl2osc_io(env, slice);
+
+       if (oio->oi_active) {
+               osc_extent_release(env, oio->oi_active);
+               oio->oi_active = NULL;
+       }
 }
 
 static const struct cl_io_operations osc_io_ops = {
-        .op = {
-                [CIT_READ] = {
-                        .cio_fini   = osc_io_fini
-                },
-                [CIT_WRITE] = {
-                        .cio_fini   = osc_io_fini
-                },
-                [CIT_TRUNC] = {
-                        .cio_start  = osc_io_trunc_start,
-                        .cio_end    = osc_io_trunc_end
-                },
-                [CIT_FAULT] = {
-                        .cio_fini   = osc_io_fini,
-                        .cio_start  = osc_io_fault_start
-                },
-                [CIT_MISC] = {
-                        .cio_fini   = osc_io_fini
-                }
-        },
-        .req_op = {
-                 [CRT_READ] = {
-                         .cio_submit    = osc_io_submit
-                 },
-                 [CRT_WRITE] = {
-                         .cio_submit    = osc_io_submit
-                 }
-         },
-        .cio_prepare_write = osc_io_prepare_write,
-        .cio_commit_write  = osc_io_commit_write
+       .op = {
+               [CIT_READ] = {
+                       .cio_start  = osc_io_read_start,
+                       .cio_fini   = osc_io_fini
+               },
+               [CIT_WRITE] = {
+                       .cio_iter_init = osc_io_rw_iter_init,
+                       .cio_iter_fini = osc_io_rw_iter_fini,
+                       .cio_start  = osc_io_write_start,
+                       .cio_end    = osc_io_end,
+                       .cio_fini   = osc_io_fini
+               },
+               [CIT_SETATTR] = {
+                       .cio_start  = osc_io_setattr_start,
+                       .cio_end    = osc_io_setattr_end
+               },
+               [CIT_DATA_VERSION] = {
+                       .cio_start  = osc_io_data_version_start,
+                       .cio_end    = osc_io_data_version_end,
+               },
+               [CIT_FAULT] = {
+                       .cio_start  = osc_io_fault_start,
+                       .cio_end    = osc_io_end,
+                       .cio_fini   = osc_io_fini
+               },
+               [CIT_FSYNC] = {
+                       .cio_start  = osc_io_fsync_start,
+                       .cio_end    = osc_io_fsync_end,
+                       .cio_fini   = osc_io_fini
+               },
+               [CIT_MISC] = {
+                       .cio_fini   = osc_io_fini
+               }
+       },
+       .cio_read_ahead             = osc_io_read_ahead,
+       .cio_submit                 = osc_io_submit,
+       .cio_commit_async           = osc_io_commit_async
 };
 
 /*****************************************************************************
@@ -600,50 +919,78 @@ static void osc_req_completion(const struct lu_env *env,
 
 /**
  * Implementation of struct cl_req_operations::cro_attr_set() for osc
- * layer. osc is responsible for struct obdo::o_id and struct obdo::o_gr
+ * layer. osc is responsible for struct obdo::o_id and struct obdo::o_seq
  * fields.
  */
 static void osc_req_attr_set(const struct lu_env *env,
-                             const struct cl_req_slice *slice,
-                             const struct cl_object *obj,
-                             struct cl_req_attr *attr, obd_valid flags)
+                            const struct cl_req_slice *slice,
+                            const struct cl_object *obj,
+                            struct cl_req_attr *attr, u64 flags)
 {
-        struct lov_oinfo *oinfo;
-        struct cl_req    *clerq;
-        struct cl_page   *apage; /* _some_ page in @clerq */
-        struct cl_lock   *lock;  /* _some_ lock protecting @apage */
-        struct osc_lock  *olck;
-        struct osc_page  *opg;
-        struct obdo      *oa;
-
-        oa = attr->cra_oa;
-        oinfo = cl2osc(obj)->oo_oinfo;
-        if (flags & OBD_MD_FLID) {
-                oa->o_id = oinfo->loi_id;
-                oa->o_valid |= OBD_MD_FLID;
-        }
-        if (flags & OBD_MD_FLGROUP) {
-                oa->o_gr = oinfo->loi_gr;
-                oa->o_valid |= OBD_MD_FLGROUP;
-        }
-        if (flags & OBD_MD_FLHANDLE) {
-                clerq = slice->crs_req;
-                LASSERT(!list_empty(&clerq->crq_pages));
-                apage = container_of(clerq->crq_pages.next,
-                                     struct cl_page, cp_flight);
-                opg = osc_cl_page_osc(apage);
-                apage = opg->ops_cl.cpl_page; /* now apage is a sub-page */
-                lock = cl_lock_at_page(env, apage->cp_obj, apage, NULL, 1, 1);
-                LASSERT(lock != NULL);
-                olck = osc_lock_at(lock);
-                LASSERT(olck != NULL);
-                /* check for lockless io. */
-                if (olck->ols_lock != NULL) {
-                        oa->o_handle = olck->ols_lock->l_remote_handle;
-                        oa->o_valid |= OBD_MD_FLHANDLE;
-                }
-                cl_lock_put(env, lock);
-        }
+       struct lov_oinfo *oinfo;
+       struct cl_req    *clerq;
+       struct cl_page   *apage; /* _some_ page in @clerq */
+       struct ldlm_lock *lock;  /* _some_ lock protecting @apage */
+       struct osc_page  *opg;
+       struct obdo      *oa;
+       struct ost_lvb   *lvb;
+
+       oinfo   = cl2osc(obj)->oo_oinfo;
+       lvb     = &oinfo->loi_lvb;
+       oa      = attr->cra_oa;
+
+       if ((flags & OBD_MD_FLMTIME) != 0) {
+               oa->o_mtime = lvb->lvb_mtime;
+               oa->o_valid |= OBD_MD_FLMTIME;
+       }
+       if ((flags & OBD_MD_FLATIME) != 0) {
+               oa->o_atime = lvb->lvb_atime;
+               oa->o_valid |= OBD_MD_FLATIME;
+       }
+       if ((flags & OBD_MD_FLCTIME) != 0) {
+               oa->o_ctime = lvb->lvb_ctime;
+               oa->o_valid |= OBD_MD_FLCTIME;
+       }
+       if (flags & OBD_MD_FLGROUP) {
+               ostid_set_seq(&oa->o_oi, ostid_seq(&oinfo->loi_oi));
+               oa->o_valid |= OBD_MD_FLGROUP;
+       }
+       if (flags & OBD_MD_FLID) {
+               ostid_set_id(&oa->o_oi, ostid_id(&oinfo->loi_oi));
+               oa->o_valid |= OBD_MD_FLID;
+       }
+       if (flags & OBD_MD_FLHANDLE) {
+               clerq = slice->crs_req;
+               LASSERT(!list_empty(&clerq->crq_pages));
+               apage = container_of(clerq->crq_pages.next,
+                                    struct cl_page, cp_flight);
+               opg = osc_cl_page_osc(apage, NULL);
+               lock = osc_dlmlock_at_pgoff(env, cl2osc(obj), osc_index(opg),
+                               OSC_DAP_FL_TEST_LOCK | OSC_DAP_FL_CANCELING);
+               if (lock == NULL && !opg->ops_srvlock) {
+                       struct ldlm_resource *res;
+                       struct ldlm_res_id *resname;
+
+                       CL_PAGE_DEBUG(D_ERROR, env, apage, "uncovered page!\n");
+
+                       resname = &osc_env_info(env)->oti_resname;
+                       ostid_build_res_name(&oinfo->loi_oi, resname);
+                       res = ldlm_resource_get(
+                               osc_export(cl2osc(obj))->exp_obd->obd_namespace,
+                               NULL, resname, LDLM_EXTENT, 0);
+                       ldlm_resource_dump(D_ERROR, res);
+
+                       libcfs_debug_dumpstack(NULL);
+                       LBUG();
+               }
+
+               /* check for lockless io. */
+               if (lock != NULL) {
+                       oa->o_handle = lock->l_remote_handle;
+                       oa->o_valid |= OBD_MD_FLHANDLE;
+                       LDLM_LOCK_PUT(lock);
+               }
+       }
 }
 
 static const struct cl_req_operations osc_req_ops = {
@@ -664,18 +1011,18 @@ int osc_io_init(const struct lu_env *env,
 }
 
 int osc_req_init(const struct lu_env *env, struct cl_device *dev,
-                 struct cl_req *req)
+                struct cl_req *req)
 {
-        struct osc_req *or;
-        int result;
-
-        OBD_SLAB_ALLOC_PTR_GFP(or, osc_req_kmem, CFS_ALLOC_IO);
-        if (or != NULL) {
-                cl_req_slice_add(req, &or->or_cl, dev, &osc_req_ops);
-                result = 0;
-        } else
-                result = -ENOMEM;
-        return result;
+       struct osc_req *or;
+       int result;
+
+       OBD_SLAB_ALLOC_PTR_GFP(or, osc_req_kmem, GFP_NOFS);
+       if (or != NULL) {
+               cl_req_slice_add(req, &or->or_cl, dev, &osc_req_ops);
+               result = 0;
+       } else
+               result = -ENOMEM;
+       return result;
 }
 
 /** @} osc */