Whamcloud - gitweb
LU-1303 osp: OSP logging functionality
authorMikhail Pershin <tappro@whamcloud.com>
Mon, 24 Sep 2012 09:54:54 +0000 (13:54 +0400)
committerOleg Drokin <green@whamcloud.com>
Fri, 28 Sep 2012 18:11:07 +0000 (14:11 -0400)
OSP writes unlink and setattr llogs and tracks corresponding commits
at OST side, then cancel llog records locally

Add missed obd_device methods and import event handling

Signed-off-by: Mikhail Pershin <tappro@whamcloud.com>
Change-Id: I63ecc975e483a7f9caac9554cc15c3a4fb4a6e6d
Reviewed-on: http://review.whamcloud.com/4095
Reviewed-by: wangdi <di.wang@whamcloud.com>
Tested-by: Hudson
Reviewed-by: Alex Zhuravlev <bzzz@whamcloud.com>
Tested-by: Maloo <whamcloud.maloo@gmail.com>
lustre/include/lustre_log.h
lustre/obdclass/llog_cat.c
lustre/osp/Makefile.in
lustre/osp/osp_dev.c
lustre/osp/osp_internal.h
lustre/osp/osp_object.c
lustre/osp/osp_precreate.c
lustre/osp/osp_sync.c [new file with mode: 0644]

index 850d0e7..8b5b98b 100644 (file)
@@ -185,6 +185,12 @@ struct llog_process_cat_args {
 };
 
 int llog_cat_close(const struct lu_env *env, struct llog_handle *cathandle);
+int llog_cat_add_rec(const struct lu_env *env, struct llog_handle *cathandle,
+                    struct llog_rec_hdr *rec, struct llog_cookie *reccookie,
+                    void *buf, struct thandle *th);
+int llog_cat_declare_add_rec(const struct lu_env *env,
+                            struct llog_handle *cathandle,
+                            struct llog_rec_hdr *rec, struct thandle *th);
 int llog_cat_add(const struct lu_env *env, struct llog_handle *cathandle,
                 struct llog_rec_hdr *rec, struct llog_cookie *reccookie,
                 void *buf);
index 0f9df2e..399293c 100644 (file)
@@ -638,7 +638,7 @@ int llog_cat_process(const struct lu_env *env, struct llog_handle *cat_llh,
                     llog_cb_t cb, void *data, int startcat, int startidx)
 {
        return llog_cat_process_or_fork(env, cat_llh, cb, data, startcat,
-                                       startidx, 0);
+                                       startidx, false);
 }
 EXPORT_SYMBOL(llog_cat_process);
 
@@ -877,7 +877,10 @@ int llog_cat_init_and_process(const struct lu_env *env,
                RETURN(rc);
 
        rc = llog_process(env, llh, cat_cancel_cb, NULL, NULL);
-       RETURN(rc);
+       if (rc)
+               CERROR("%s: llog_process() with cat_cancel_cb failed: rc = "
+                      "%d\n", llh->lgh_ctxt->loc_obd->obd_name, rc);
+       RETURN(0);
 }
 EXPORT_SYMBOL(llog_cat_init_and_process);
 
index 298a313..788d5a4 100644 (file)
@@ -1,5 +1,5 @@
 MODULES := osp
-osp-objs := osp_dev.o osp_object.o osp_precreate.o lproc_osp.o
+osp-objs := osp_dev.o osp_object.o osp_precreate.o osp_sync.o lproc_osp.o
 
 EXTRA_DIST = $(osp-objs:.o=.c) osp_internal.h
 
index 4303ed0..7866264 100644 (file)
@@ -222,6 +222,9 @@ static int osp_shutdown(const struct lu_env *env, struct osp_device *d)
        /* stop precreate thread */
        osp_precreate_fini(d);
 
+       /* stop sync thread */
+       osp_sync_fini(d);
+
        RETURN(rc);
 }
 
@@ -286,6 +289,49 @@ const struct lu_device_operations osp_lu_ops = {
        .ldo_recovery_complete  = osp_recovery_complete,
 };
 
+/**
+ * provides with statfs from corresponded OST
+ *
+ */
+static int osp_statfs(const struct lu_env *env, struct dt_device *dev,
+                     struct obd_statfs *sfs)
+{
+       struct osp_device *d = dt2osp_dev(dev);
+
+       ENTRY;
+
+       if (unlikely(d->opd_imp_active == 0)) {
+               /*
+                * in case of inactive OST we return nulls
+                * so that caller can understand this device
+                * is unusable for new objects
+                *
+                * XXX: shouldn't we take normal statfs and fill
+                * just few specific fields with zeroes?
+                */
+               memset(sfs, 0, sizeof(*sfs));
+               sfs->os_bsize = 4096;
+               RETURN(0);
+       }
+
+       /* return recently updated data */
+       *sfs = d->opd_statfs;
+
+       /*
+        * layer above osp (usually lod) can use ffree to estimate
+        * how many objects are available for immediate creation
+        */
+       cfs_spin_lock(&d->opd_pre_lock);
+       sfs->os_ffree = d->opd_pre_last_created - d->opd_pre_next;
+       cfs_spin_unlock(&d->opd_pre_lock);
+
+       CDEBUG(D_OTHER, "%s: "LPU64" blocks, "LPU64" free, "LPU64" avail, "
+              LPU64" files, "LPU64" free files\n", d->opd_obd->obd_name,
+              sfs->os_blocks, sfs->os_bfree, sfs->os_bavail,
+              sfs->os_files, sfs->os_ffree);
+       RETURN(0);
+}
+
 static int osp_sync(const struct lu_env *env, struct dt_device *dev)
 {
        ENTRY;
@@ -298,6 +344,7 @@ static int osp_sync(const struct lu_env *env, struct dt_device *dev)
 }
 
 static const struct dt_device_operations osp_dt_ops = {
+       .dt_statfs      = osp_statfs,
        .dt_sync        = osp_sync,
 };
 
@@ -466,6 +513,14 @@ static int osp_init0(const struct lu_env *env, struct osp_device *m,
                GOTO(out_last_used, rc);
 
        /*
+        * Initialize synhronization mechanism taking care of propogating
+        * changes to OST in near transactional manner
+        */
+       rc = osp_sync_init(env, m);
+       if (rc)
+               GOTO(out_precreat, rc);
+
+       /*
         * Initiate connect to OST
         */
        ll_generate_random_uuid(uuid);
@@ -481,6 +536,9 @@ static int osp_init0(const struct lu_env *env, struct osp_device *m,
        RETURN(0);
 
 out:
+       /* stop sync thread */
+       osp_sync_fini(m);
+out_precreat:
        /* stop precreate thread */
        osp_precreate_fini(m);
 out_last_used:
@@ -665,6 +723,166 @@ static int osp_obd_disconnect(struct obd_export *exp)
        RETURN(rc);
 }
 
+/*
+ * lprocfs helpers still use OBD API, let's keep obd_statfs() support
+ */
+static int osp_obd_statfs(const struct lu_env *env, struct obd_export *exp,
+                         struct obd_statfs *osfs, __u64 max_age, __u32 flags)
+{
+       struct obd_statfs       *msfs;
+       struct ptlrpc_request   *req;
+       struct obd_import       *imp = NULL;
+       int                      rc;
+
+       ENTRY;
+
+       /* Since the request might also come from lprocfs, so we need
+        * sync this with client_disconnect_export Bug15684 */
+       cfs_down_read(&exp->exp_obd->u.cli.cl_sem);
+       if (exp->exp_obd->u.cli.cl_import)
+               imp = class_import_get(exp->exp_obd->u.cli.cl_import);
+       cfs_up_read(&exp->exp_obd->u.cli.cl_sem);
+       if (!imp)
+               RETURN(-ENODEV);
+
+       /* We could possibly pass max_age in the request (as an absolute
+        * timestamp or a "seconds.usec ago") so the target can avoid doing
+        * extra calls into the filesystem if that isn't necessary (e.g.
+        * during mount that would help a bit).  Having relative timestamps
+        * is not so great if request processing is slow, while absolute
+        * timestamps are not ideal because they need time synchronization. */
+       req = ptlrpc_request_alloc(imp, &RQF_OST_STATFS);
+
+       class_import_put(imp);
+
+       if (req == NULL)
+               RETURN(-ENOMEM);
+
+       rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
+       if (rc) {
+               ptlrpc_request_free(req);
+               RETURN(rc);
+       }
+       ptlrpc_request_set_replen(req);
+       req->rq_request_portal = OST_CREATE_PORTAL;
+       ptlrpc_at_set_req_timeout(req);
+
+       if (flags & OBD_STATFS_NODELAY) {
+               /* procfs requests not want stat in wait for avoid deadlock */
+               req->rq_no_resend = 1;
+               req->rq_no_delay = 1;
+       }
+
+       rc = ptlrpc_queue_wait(req);
+       if (rc)
+               GOTO(out, rc);
+
+       msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
+       if (msfs == NULL)
+               GOTO(out, rc = -EPROTO);
+
+       *osfs = *msfs;
+
+       EXIT;
+out:
+       ptlrpc_req_finished(req);
+       return rc;
+}
+
+static int osp_import_event(struct obd_device *obd, struct obd_import *imp,
+                           enum obd_import_event event)
+{
+       struct osp_device *d = lu2osp_dev(obd->obd_lu_dev);
+
+       switch (event) {
+       case IMP_EVENT_DISCON:
+               d->opd_got_disconnected = 1;
+               d->opd_imp_connected = 0;
+               osp_pre_update_status(d, -ENODEV);
+               cfs_waitq_signal(&d->opd_pre_waitq);
+               CDEBUG(D_HA, "got disconnected\n");
+               break;
+       case IMP_EVENT_INACTIVE:
+               d->opd_imp_active = 0;
+               osp_pre_update_status(d, -ENODEV);
+               cfs_waitq_signal(&d->opd_pre_waitq);
+               CDEBUG(D_HA, "got inactive\n");
+               break;
+       case IMP_EVENT_ACTIVE:
+               d->opd_imp_active = 1;
+               if (d->opd_got_disconnected)
+                       d->opd_new_connection = 1;
+               d->opd_imp_connected = 1;
+               d->opd_imp_seen_connected = 1;
+               cfs_waitq_signal(&d->opd_pre_waitq);
+               __osp_sync_check_for_work(d);
+               CDEBUG(D_HA, "got connected\n");
+               break;
+       default:
+               CERROR("%s: unsupported import event: %#x\n",
+                      obd->obd_name, event);
+       }
+       return 0;
+}
+
+static int osp_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
+                        void *karg, void *uarg)
+{
+       struct obd_device       *obd = exp->exp_obd;
+       struct osp_device       *d;
+       struct obd_ioctl_data   *data = karg;
+       int                      rc = 0;
+
+       ENTRY;
+
+       LASSERT(obd->obd_lu_dev);
+       d = lu2osp_dev(obd->obd_lu_dev);
+       LASSERT(d->opd_dt_dev.dd_ops == &osp_dt_ops);
+
+       if (!cfs_try_module_get(THIS_MODULE)) {
+               CERROR("%s: can't get module. Is it alive?", obd->obd_name);
+               return -EINVAL;
+       }
+
+       switch (cmd) {
+       case OBD_IOC_CLIENT_RECOVER:
+               rc = ptlrpc_recover_import(obd->u.cli.cl_import,
+                                          data->ioc_inlbuf1, 0);
+               if (rc > 0)
+                       rc = 0;
+               break;
+       case IOC_OSC_SET_ACTIVE:
+               rc = ptlrpc_set_import_active(obd->u.cli.cl_import,
+                                             data->ioc_offset);
+               break;
+       case OBD_IOC_PING_TARGET:
+               rc = ptlrpc_obd_ping(obd);
+               break;
+       default:
+               CERROR("%s: unrecognized ioctl %#x by %s\n", obd->obd_name,
+                      cmd, cfs_curproc_comm());
+               rc = -ENOTTY;
+       }
+       cfs_module_put(THIS_MODULE);
+       return rc;
+}
+
+static int osp_obd_health_check(const struct lu_env *env,
+                               struct obd_device *obd)
+{
+       struct osp_device *d = lu2osp_dev(obd->obd_lu_dev);
+
+       ENTRY;
+
+       /*
+        * 1.8/2.0 behaviour is that OST being connected once at least
+        * is considired "healthy". and one "healty" OST is enough to
+        * allow lustre clients to connect to MDS
+        */
+       LASSERT(d);
+       RETURN(!d->opd_imp_seen_connected);
+}
+
 /* context key constructor/destructor: mdt_key_init, mdt_key_fini */
 LU_KEY_INIT_FINI(osp, struct osp_thread_info);
 static void osp_key_exit(const struct lu_context *ctx,
@@ -719,6 +937,10 @@ static struct obd_ops osp_obd_device_ops = {
        .o_reconnect    = osp_reconnect,
        .o_connect      = osp_obd_connect,
        .o_disconnect   = osp_obd_disconnect,
+       .o_health_check = osp_obd_health_check,
+       .o_import_event = osp_import_event,
+       .o_iocontrol    = osp_iocontrol,
+       .o_statfs       = osp_obd_statfs,
 };
 
 static int __init osp_mod_init(void)
index f76679a..7a82bca 100644 (file)
 #include <dt_object.h>
 #include <lustre_fid.h>
 
+/*
+ * Infrastructure to support tracking of last committed llog record
+ */
+struct osp_id_tracker {
+       cfs_spinlock_t           otr_lock;
+       __u32                    otr_next_id;
+       __u32                    otr_committed_id;
+       /* callback is register once per diskfs -- that's the whole point */
+       struct dt_txn_callback   otr_tx_cb;
+       /* single node can run many clusters */
+       cfs_list_t               otr_wakeup_list;
+       cfs_list_t               otr_list;
+       /* underlying shared device */
+       struct dt_device        *otr_dev;
+       /* how many users of this tracker */
+       cfs_atomic_t             otr_refcount;
+};
+
 struct osp_device {
        struct dt_device                 opd_dt_dev;
        /* corresponded OST index */
@@ -106,6 +124,41 @@ struct osp_device {
        int                              opd_pre_grow_slow;
 
        /*
+        * OST synchronization
+        */
+       cfs_spinlock_t                   opd_syn_lock;
+       /* unique generation, to recognize start of new records in the llog */
+       struct llog_gen                  opd_syn_generation;
+       /* number of changes to sync, used to wake up sync thread */
+       unsigned long                    opd_syn_changes;
+       /* processing of changes from previous mount is done? */
+       int                              opd_syn_prev_done;
+       /* found records */
+       struct ptlrpc_thread             opd_syn_thread;
+       cfs_waitq_t                      opd_syn_waitq;
+       /* list of remotely committed rpc */
+       cfs_list_t                       opd_syn_committed_there;
+       /* number of changes being under sync */
+       int                              opd_syn_sync_in_progress;
+       /* number of RPCs in flight - flow control */
+       int                              opd_syn_rpc_in_flight;
+       int                              opd_syn_max_rpc_in_flight;
+       /* number of RPC in processing (including non-committed by OST) */
+       int                              opd_syn_rpc_in_progress;
+       int                              opd_syn_max_rpc_in_progress;
+       /* osd api's commit cb control structure */
+       struct dt_txn_callback           opd_syn_txn_cb;
+       /* last used change number -- semantically similar to transno */
+       unsigned long                    opd_syn_last_used_id;
+       /* last committed change number -- semantically similar to
+        * last_committed */
+       unsigned long                    opd_syn_last_committed_id;
+       /* last processed (taken from llog) id */
+       unsigned long                    opd_syn_last_processed_id;
+       struct osp_id_tracker           *opd_syn_tracker;
+       cfs_list_t                       opd_syn_ontrack;
+
+       /*
         * statfs related fields: OSP maintains it on its own
         */
        struct obd_statfs                opd_statfs;
@@ -124,7 +177,8 @@ extern cfs_mem_cache_t *osp_object_kmem;
 struct osp_object {
        struct lu_object_header  opo_header;
        struct dt_object         opo_obj;
-       int                      opo_reserved;
+       int                      opo_reserved:1,
+                                opo_new:1;
 };
 
 extern struct lu_object_operations osp_lu_obj_ops;
@@ -136,6 +190,14 @@ struct osp_thread_info {
        struct ost_id            osi_oi;
        obd_id                   osi_id;
        loff_t                   osi_off;
+       union {
+               struct llog_rec_hdr             osi_hdr;
+               struct llog_unlink64_rec        osi_unlink;
+               struct llog_setattr64_rec       osi_setattr;
+               struct llog_gen_rec             osi_gen;
+       };
+       struct llog_cookie       osi_cookie;
+       struct llog_catid        osi_cid;
 };
 
 static inline void osp_objid_buf_prep(struct osp_thread_info *osi,
@@ -243,20 +305,13 @@ void osp_statfs_need_now(struct osp_device *d);
 void lprocfs_osp_init_vars(struct lprocfs_static_vars *lvars);
 
 /* osp_sync.c */
-/* functions below will be replaced by full versions with osp_sync.c code */
-static inline
 int osp_sync_declare_add(const struct lu_env *env, struct osp_object *o,
-                        llog_op_type type, struct thandle *th)
-{
-       return 0;
-}
-
-static inline
+                        llog_op_type type, struct thandle *th);
 int osp_sync_add(const struct lu_env *env, struct osp_object *o,
                 llog_op_type type, struct thandle *th,
-                const struct lu_attr *attr)
-{
-       return 0;
-}
+                const struct lu_attr *attr);
+int osp_sync_init(const struct lu_env *env, struct osp_device *d);
+int osp_sync_fini(struct osp_device *d);
+void __osp_sync_check_for_work(struct osp_device *d);
 
 #endif
index 89bbf9f..a0489f1 100644 (file)
@@ -93,7 +93,7 @@ static int osp_declare_attr_set(const struct lu_env *env, struct dt_object *dt,
         *
         * 2) send synchronous truncate RPC with just assigned id
         */
-       LASSERT(attr != NULL);
+       LASSERT(attr);
        if (attr->la_valid & LA_SIZE && attr->la_size > 0) {
                LASSERT(!dt_object_exists(dt));
                osp_object_assign_id(env, d, o);
@@ -102,6 +102,11 @@ static int osp_declare_attr_set(const struct lu_env *env, struct dt_object *dt,
                        RETURN(rc);
        }
 
+       if (o->opo_new) {
+               /* no need in logging for new objects being created */
+               RETURN(0);
+       }
+
        if (!(attr->la_valid & (LA_UID | LA_GID)))
                RETURN(0);
 
@@ -126,6 +131,15 @@ static int osp_attr_set(const struct lu_env *env, struct dt_object *dt,
        if (!(attr->la_valid & (LA_UID | LA_GID)))
                RETURN(0);
 
+       /* new object, the very first ->attr_set()
+        * initializing attributes needs no logging
+        * all subsequent one are subject to the
+        * logging and synchronization with OST */
+       if (o->opo_new) {
+               o->opo_new = 0;
+               RETURN(0);
+       }
+
        /*
         * once transaction is committed put proper command on
         * the queue going to our OST
@@ -252,6 +266,10 @@ static int osp_object_create(const struct lu_env *env, struct dt_object *dt,
                }
        }
 
+       /* new object, the very first ->attr_set()
+        * initializing attributes needs no logging */
+       o->opo_new = 1;
+
        osp_objid_buf_prep(osi, d, d->opd_index);
        rc = dt_record_write(env, d->opd_last_used_file, &osi->osi_lb,
                             &osi->osi_off, th);
index 49ea453..dfcb2a2 100644 (file)
@@ -358,8 +358,10 @@ static int osp_precreate_cleanup_orphans(struct osp_device *d)
        req->rq_no_resend = req->rq_no_delay = 1;
 
        rc = ptlrpc_queue_wait(req);
-       if (rc)
+       if (rc) {
+               ptlrpc_set_import_active(imp, 0);
                GOTO(out_req, rc);
+       }
 
        body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
        if (body == NULL)
@@ -425,6 +427,9 @@ void osp_pre_update_status(struct osp_device *d, int rc)
                                       d->opd_obd->obd_name, msfs->os_blocks,
                                       msfs->os_bfree, used, msfs->os_bavail,
                                       d->opd_pre_status, rc);
+                       CDEBUG(D_INFO,
+                              "non-commited changes: %lu, in progress: %u\n",
+                              d->opd_syn_changes, d->opd_syn_rpc_in_progress);
                } else if (old == -ENOSPC) {
                        d->opd_pre_status = 0;
                        d->opd_pre_grow_slow = 0;
@@ -551,8 +556,9 @@ static int osp_precreate_ready_condition(struct osp_device *d)
        if (d->opd_pre_next + d->opd_pre_reserved < d->opd_pre_last_created)
                return 1;
 
-       /* ready if OST reported no space */
-       if (d->opd_pre_status != 0)
+       /* ready if OST reported no space and no destoys in progress */
+       if (d->opd_syn_changes + d->opd_syn_rpc_in_progress == 0 &&
+           d->opd_pre_status != 0)
                return 1;
 
        return 0;
@@ -563,9 +569,11 @@ static int osp_precreate_timeout_condition(void *data)
        struct osp_device *d = data;
 
        LCONSOLE_WARN("%s: slow creates, last="LPU64", next="LPU64", "
-                     "reserved="LPU64", status=%d\n",
+                     "reserved="LPU64", syn_changes=%lu, "
+                     "syn_rpc_in_progress=%d, status=%d\n",
                      d->opd_obd->obd_name, d->opd_pre_last_created,
                      d->opd_pre_next, d->opd_pre_reserved,
+                     d->opd_syn_changes, d->opd_syn_rpc_in_progress,
                      d->opd_pre_status);
 
        return 0;
@@ -635,6 +643,28 @@ int osp_precreate_reserve(const struct lu_env *env, struct osp_device *d)
                }
                cfs_spin_unlock(&d->opd_pre_lock);
 
+               /*
+                * all precreated objects have been used and no-space
+                * status leave us no chance to succeed very soon
+                * but if there is destroy in progress, then we should
+                * wait till that is done - some space might be released
+                */
+               if (unlikely(rc == -ENOSPC)) {
+                       if (d->opd_syn_changes) {
+                               /* force local commit to release space */
+                               dt_commit_async(env, d->opd_storage);
+                       }
+                       if (d->opd_syn_rpc_in_progress) {
+                               /* just wait till destroys are done */
+                               /* see l_wait_even() few lines below */
+                       }
+                       if (d->opd_syn_changes +
+                           d->opd_syn_rpc_in_progress == 0) {
+                               /* no hope for free space */
+                               break;
+                       }
+               }
+
                /* XXX: don't wake up if precreation is in progress */
                cfs_waitq_signal(&d->opd_pre_waitq);
 
diff --git a/lustre/osp/osp_sync.c b/lustre/osp/osp_sync.c
new file mode 100644 (file)
index 0000000..2f9cbd2
--- /dev/null
@@ -0,0 +1,1224 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel, Inc.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/osp/osp_sync.c
+ *
+ * Lustre OST Proxy Device
+ *
+ * Author: Alex Zhuravlev <alexey.zhuravlev@intel.com>
+ * Author: Mikhail Pershin <mike.pershin@intel.com>
+ */
+
+#ifndef EXPORT_SYMTAB
+# define EXPORT_SYMTAB
+#endif
+#define DEBUG_SUBSYSTEM S_MDS
+
+#include <lustre_log.h>
+#include "osp_internal.h"
+
+static int osp_sync_id_traction_init(struct osp_device *d);
+static void osp_sync_id_traction_fini(struct osp_device *d);
+static __u32 osp_sync_id_get(struct osp_device *d, __u32 id);
+static void osp_sync_remove_from_tracker(struct osp_device *d);
+
+/*
+ * this is a components of OSP implementing synchronization between MDS and OST
+ * it llogs all interesting changes (currently it's uig/gid change and object
+ * destroy) atomically, then makes sure changes hit OST storage
+ *
+ * we have 4 queues of work:
+ *
+ * the first queue is llog itself, once read a change is stored in 2nd queue
+ * in form of RPC (but RPC isn't fired yet).
+ *
+ * the second queue (opd_syn_waiting_for_commit) holds changes awaiting local
+ * commit. once change is committed locally it migrates onto 3rd queue.
+ *
+ * the third queue (opd_syn_committed_here) holds changes committed locally,
+ * but not sent to OST (as the pipe can be full). once pipe becomes non-full
+ * we take a change from the queue and fire corresponded RPC.
+ *
+ * once RPC is reported committed by OST (using regular last_committed mech.)
+ * the change jumps into 4th queue (opd_syn_committed_there), now we can
+ * cancel corresponded llog record and release RPC
+ *
+ * opd_syn_changes is a number of unread llog records (to be processed).
+ * notice this number doesn't include llog records from previous boots.
+ * with OSP_SYN_THRESHOLD we try to batch processing a bit (TO BE IMPLEMENTED)
+ *
+ * opd_syn_rpc_in_progress is a number of requests in 2-4 queues.
+ * we control this with OSP_MAX_IN_PROGRESS so that OSP don't consume
+ * too much memory -- how to deal with 1000th OSTs ? batching could help?
+ *
+ * opd_syn_rpc_in_flight is a number of RPC in flight.
+ * we control this with OSP_MAX_IN_FLIGHT
+ */
+
+/* XXX: do math to learn reasonable threshold
+ * should it be ~ number of changes fitting bulk? */
+
+#define OSP_SYN_THRESHOLD      10
+#define OSP_MAX_IN_FLIGHT      8
+#define OSP_MAX_IN_PROGRESS    4096
+
+#define OSP_JOB_MAGIC          0x26112005
+
+static inline int osp_sync_running(struct osp_device *d)
+{
+       return !!(d->opd_syn_thread.t_flags & SVC_RUNNING);
+}
+
+static inline int osp_sync_stopped(struct osp_device *d)
+{
+       return !!(d->opd_syn_thread.t_flags & SVC_STOPPED);
+}
+
+static inline int osp_sync_has_new_job(struct osp_device *d)
+{
+       return ((d->opd_syn_last_processed_id < d->opd_syn_last_used_id) &&
+               (d->opd_syn_last_processed_id < d->opd_syn_last_committed_id))
+               || (d->opd_syn_prev_done == 0);
+}
+
+static inline int osp_sync_low_in_progress(struct osp_device *d)
+{
+       return d->opd_syn_rpc_in_progress < d->opd_syn_max_rpc_in_progress;
+}
+
+static inline int osp_sync_low_in_flight(struct osp_device *d)
+{
+       return d->opd_syn_rpc_in_flight < d->opd_syn_max_rpc_in_flight;
+}
+
+static inline int osp_sync_has_work(struct osp_device *d)
+{
+       /* has new/old changes and low in-progress? */
+       if (osp_sync_has_new_job(d) && osp_sync_low_in_progress(d) &&
+           osp_sync_low_in_flight(d) && d->opd_imp_connected)
+               return 1;
+
+       /* has remotely committed? */
+       if (!cfs_list_empty(&d->opd_syn_committed_there))
+               return 1;
+
+       return 0;
+}
+
+#define osp_sync_check_for_work(d)                      \
+{                                                       \
+       if (osp_sync_has_work(d)) {                     \
+               cfs_waitq_signal(&d->opd_syn_waitq);    \
+       }                                               \
+}
+
+void __osp_sync_check_for_work(struct osp_device *d)
+{
+       osp_sync_check_for_work(d);
+}
+
+static inline int osp_sync_can_process_new(struct osp_device *d,
+                                          struct llog_rec_hdr *rec)
+{
+       LASSERT(d);
+
+       if (!osp_sync_low_in_progress(d))
+               return 0;
+       if (!osp_sync_low_in_flight(d))
+               return 0;
+       if (!d->opd_imp_connected)
+               return 0;
+       if (d->opd_syn_prev_done == 0)
+               return 1;
+       if (d->opd_syn_changes == 0)
+               return 0;
+       if (rec == NULL || rec->lrh_id <= d->opd_syn_last_committed_id)
+               return 1;
+       return 0;
+}
+
+int osp_sync_declare_add(const struct lu_env *env, struct osp_object *o,
+                        llog_op_type type, struct thandle *th)
+{
+       struct osp_thread_info  *osi = osp_env_info(env);
+       struct osp_device       *d = lu2osp_dev(o->opo_obj.do_lu.lo_dev);
+       struct llog_ctxt        *ctxt;
+       int                      rc;
+
+       ENTRY;
+
+       /* it's a layering violation, to access internals of th,
+        * but we can do this as a sanity check, for a while */
+       LASSERT(th->th_dev == d->opd_storage);
+
+       switch (type) {
+       case MDS_UNLINK64_REC:
+               osi->osi_hdr.lrh_len = sizeof(struct llog_unlink64_rec);
+               break;
+       case MDS_SETATTR64_REC:
+               osi->osi_hdr.lrh_len = sizeof(struct llog_setattr64_rec);
+               break;
+       default:
+               LBUG();
+       }
+
+       /* we want ->dt_trans_start() to allocate per-thandle structure */
+       th->th_tags |= LCT_OSP_THREAD;
+
+       ctxt = llog_get_context(d->opd_obd, LLOG_MDS_OST_ORIG_CTXT);
+       LASSERT(ctxt);
+
+       rc = llog_declare_add(env, ctxt->loc_handle, &osi->osi_hdr, th);
+       llog_ctxt_put(ctxt);
+
+       RETURN(rc);
+}
+
+static int osp_sync_add_rec(const struct lu_env *env, struct osp_device *d,
+                           const struct lu_fid *fid, llog_op_type type,
+                           int count, struct thandle *th,
+                           const struct lu_attr *attr)
+{
+       struct osp_thread_info  *osi = osp_env_info(env);
+       struct llog_ctxt        *ctxt;
+       struct osp_txn_info     *txn;
+       int                      rc;
+
+       ENTRY;
+
+       /* it's a layering violation, to access internals of th,
+        * but we can do this as a sanity check, for a while */
+       LASSERT(th->th_dev == d->opd_storage);
+
+       switch (type) {
+       case MDS_UNLINK64_REC:
+               osi->osi_hdr.lrh_len = sizeof(osi->osi_unlink);
+               osi->osi_hdr.lrh_type = MDS_UNLINK64_REC;
+               osi->osi_unlink.lur_fid  = *fid;
+               osi->osi_unlink.lur_count = count;
+               break;
+       case MDS_SETATTR64_REC:
+               rc = fid_ostid_pack(fid, &osi->osi_oi);
+               LASSERT(rc == 0);
+               osi->osi_hdr.lrh_len = sizeof(osi->osi_setattr);
+               osi->osi_hdr.lrh_type = MDS_SETATTR64_REC;
+               osi->osi_setattr.lsr_oid  = osi->osi_oi.oi_id;
+               osi->osi_setattr.lsr_oseq = osi->osi_oi.oi_seq;
+               LASSERT(attr);
+               osi->osi_setattr.lsr_uid = attr->la_uid;
+               osi->osi_setattr.lsr_gid = attr->la_gid;
+               break;
+       default:
+               LBUG();
+       }
+
+       txn = osp_txn_info(&th->th_ctx);
+       LASSERT(txn);
+
+       txn->oti_current_id = osp_sync_id_get(d, txn->oti_current_id);
+       osi->osi_hdr.lrh_id = txn->oti_current_id;
+
+       ctxt = llog_get_context(d->opd_obd, LLOG_MDS_OST_ORIG_CTXT);
+       if (ctxt == NULL)
+               RETURN(-ENOMEM);
+       rc = llog_add(env, ctxt->loc_handle, &osi->osi_hdr, &osi->osi_cookie,
+                     NULL, th);
+       llog_ctxt_put(ctxt);
+
+       CDEBUG(D_OTHER, "%s: new record %lu:%lu:%lu/%lu: %d\n",
+              d->opd_obd->obd_name,
+              (unsigned long) osi->osi_cookie.lgc_lgl.lgl_oid,
+              (unsigned long) osi->osi_cookie.lgc_lgl.lgl_oseq,
+              (unsigned long) osi->osi_cookie.lgc_lgl.lgl_ogen,
+              (unsigned long) osi->osi_cookie.lgc_index, rc);
+
+       if (rc > 0)
+               rc = 0;
+
+       if (likely(rc == 0)) {
+               cfs_spin_lock(&d->opd_syn_lock);
+               d->opd_syn_changes++;
+               cfs_spin_unlock(&d->opd_syn_lock);
+       }
+
+       RETURN(rc);
+}
+
+int osp_sync_add(const struct lu_env *env, struct osp_object *o,
+                llog_op_type type, struct thandle *th,
+                const struct lu_attr *attr)
+{
+       return osp_sync_add_rec(env, lu2osp_dev(o->opo_obj.do_lu.lo_dev),
+                               lu_object_fid(&o->opo_obj.do_lu), type, 1,
+                               th, attr);
+}
+
+int osp_sync_gap(const struct lu_env *env, struct osp_device *d,
+                struct lu_fid *fid, int lost, struct thandle *th)
+{
+       return osp_sync_add_rec(env, d, fid, MDS_UNLINK64_REC, lost, th, NULL);
+}
+
+/*
+ * it's quite obvious we can't maintain all the structures in the memory:
+ * while OST is down, MDS can be processing thousands and thousands of unlinks
+ * filling persistent llogs and in-core respresentation
+ *
+ * this doesn't scale at all. so we need basically the following:
+ * a) destroy/setattr append llog records
+ * b) once llog has grown to X records, we process first Y committed records
+ *
+ *  once record R is found via llog_process(), it becomes committed after any
+ *  subsequent commit callback (at the most)
+ */
+
+/*
+ * called for each atomic on-disk change (not once per transaction batch)
+ * and goes over the list
+ * XXX: should be optimized?
+ */
+
+/**
+ * called for each RPC reported committed
+ */
+static void osp_sync_request_commit_cb(struct ptlrpc_request *req)
+{
+       struct osp_device *d = req->rq_cb_data;
+
+       CDEBUG(D_HA, "commit req %p, transno "LPU64"\n", req, req->rq_transno);
+
+       if (unlikely(req->rq_transno == 0))
+               return;
+
+       /* XXX: what if request isn't committed for very long? */
+       LASSERT(d);
+       LASSERT(req->rq_svc_thread == (void *) OSP_JOB_MAGIC);
+       LASSERT(cfs_list_empty(&req->rq_exp_list));
+
+       ptlrpc_request_addref(req);
+
+       cfs_spin_lock(&d->opd_syn_lock);
+       cfs_list_add(&req->rq_exp_list, &d->opd_syn_committed_there);
+       cfs_spin_unlock(&d->opd_syn_lock);
+
+       /* XXX: some batching wouldn't hurt */
+       cfs_waitq_signal(&d->opd_syn_waitq);
+}
+
+static int osp_sync_interpret(const struct lu_env *env,
+                             struct ptlrpc_request *req, void *aa, int rc)
+{
+       struct osp_device *d = req->rq_cb_data;
+
+       /* XXX: error handling here */
+       if (req->rq_svc_thread != (void *) OSP_JOB_MAGIC)
+               DEBUG_REQ(D_ERROR, req, "bad magic %p\n", req->rq_svc_thread);
+       LASSERT(req->rq_svc_thread == (void *) OSP_JOB_MAGIC);
+       LASSERT(d);
+
+       CDEBUG(D_HA, "reply req %p/%d, rc %d, transno %u\n", req,
+              cfs_atomic_read(&req->rq_refcount),
+              rc, (unsigned) req->rq_transno);
+       LASSERT(rc || req->rq_transno);
+
+       if (rc == -ENOENT) {
+               /*
+                * we tried to destroy object or update attributes,
+                * but object doesn't exist anymore - cancell llog record
+                */
+               LASSERT(req->rq_transno == 0);
+               LASSERT(cfs_list_empty(&req->rq_exp_list));
+
+               ptlrpc_request_addref(req);
+
+               cfs_spin_lock(&d->opd_syn_lock);
+               cfs_list_add(&req->rq_exp_list, &d->opd_syn_committed_there);
+               cfs_spin_unlock(&d->opd_syn_lock);
+
+               cfs_waitq_signal(&d->opd_syn_waitq);
+       } else if (rc) {
+               struct obd_import *imp = req->rq_import;
+               /*
+                * error happened, we'll try to repeat on next boot ?
+                */
+               LASSERTF(req->rq_transno == 0 ||
+                        req->rq_import_generation < imp->imp_generation,
+                        "transno "LPU64", rc %d, gen: req %d, imp %d\n",
+                        req->rq_transno, rc, req->rq_import_generation,
+                        imp->imp_generation);
+               LASSERT(d->opd_syn_rpc_in_progress > 0);
+               if (req->rq_transno == 0) {
+                       /* this is the last time we see the request
+                        * if transno is not zero, then commit cb
+                        * will be called at some point */
+                       cfs_spin_lock(&d->opd_syn_lock);
+                       d->opd_syn_rpc_in_progress--;
+                       cfs_spin_unlock(&d->opd_syn_lock);
+               }
+
+               cfs_waitq_signal(&d->opd_syn_waitq);
+       } else if (unlikely(d->opd_pre_status == -ENOSPC)) {
+               /*
+                * if current status is -ENOSPC (lack of free space on OST)
+                * then we should poll OST immediately once object destroy
+                * is replied
+                */
+               osp_statfs_need_now(d);
+       }
+
+       LASSERT(d->opd_syn_rpc_in_flight > 0);
+       cfs_spin_lock(&d->opd_syn_lock);
+       d->opd_syn_rpc_in_flight--;
+       cfs_spin_unlock(&d->opd_syn_lock);
+       CDEBUG(D_OTHER, "%s: %d in flight, %d in progress\n",
+              d->opd_obd->obd_name, d->opd_syn_rpc_in_flight,
+              d->opd_syn_rpc_in_progress);
+
+       osp_sync_check_for_work(d);
+
+       return 0;
+}
+
+/*
+ * the function walks through list of committed locally changes
+ * and send them to RPC until the pipe is full
+ */
+static void osp_sync_send_new_rpc(struct osp_device *d,
+                                 struct ptlrpc_request *req)
+{
+       LASSERT(d->opd_syn_rpc_in_flight <= d->opd_syn_max_rpc_in_flight);
+       LASSERT(req->rq_svc_thread == (void *) OSP_JOB_MAGIC);
+
+       ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
+}
+
+static struct ptlrpc_request *osp_sync_new_job(struct osp_device *d,
+                                              struct llog_handle *llh,
+                                              struct llog_rec_hdr *h,
+                                              ost_cmd_t op,
+                                              const struct req_format *format)
+{
+       struct ptlrpc_request   *req;
+       struct ost_body         *body;
+       struct obd_import       *imp;
+       int                      rc;
+
+       /* Prepare the request */
+       imp = d->opd_obd->u.cli.cl_import;
+       LASSERT(imp);
+       req = ptlrpc_request_alloc(imp, format);
+       if (req == NULL)
+               RETURN(ERR_PTR(-ENOMEM));
+
+       rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, op);
+       if (rc) {
+               ptlrpc_req_finished(req);
+               return ERR_PTR(rc);
+       }
+
+       /*
+        * this is a trick: to save on memory allocations we put cookie
+        * into the request, but don't set corresponded flag in o_valid
+        * so that OST doesn't interpret this cookie. once the request
+        * is committed on OST we take cookie from the request and cancel
+        */
+       body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
+       LASSERT(body);
+       body->oa.o_lcookie.lgc_lgl = llh->lgh_id;
+       body->oa.o_lcookie.lgc_subsys = LLOG_MDS_OST_ORIG_CTXT;
+       body->oa.o_lcookie.lgc_index = h->lrh_index;
+       CFS_INIT_LIST_HEAD(&req->rq_exp_list);
+       req->rq_svc_thread = (void *) OSP_JOB_MAGIC;
+
+       req->rq_interpret_reply = osp_sync_interpret;
+       req->rq_commit_cb = osp_sync_request_commit_cb;
+       req->rq_cb_data = d;
+
+       ptlrpc_request_set_replen(req);
+
+       return req;
+}
+
+static int osp_sync_new_setattr_job(struct osp_device *d,
+                                   struct llog_handle *llh,
+                                   struct llog_rec_hdr *h)
+{
+       struct llog_setattr64_rec       *rec = (struct llog_setattr64_rec *)h;
+       struct ptlrpc_request           *req;
+       struct ost_body                 *body;
+
+       ENTRY;
+       LASSERT(h->lrh_type == MDS_SETATTR64_REC);
+
+       req = osp_sync_new_job(d, llh, h, OST_SETATTR, &RQF_OST_SETATTR);
+       if (IS_ERR(req))
+               RETURN(PTR_ERR(req));
+
+       body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
+       LASSERT(body);
+       body->oa.o_id  = rec->lsr_oid;
+       body->oa.o_seq = rec->lsr_oseq;
+       body->oa.o_uid = rec->lsr_uid;
+       body->oa.o_gid = rec->lsr_gid;
+       body->oa.o_valid = OBD_MD_FLGROUP | OBD_MD_FLID |
+                          OBD_MD_FLUID | OBD_MD_FLGID;
+
+       osp_sync_send_new_rpc(d, req);
+       RETURN(0);
+}
+
+/* Old records may be in old format, so we handle that too */
+static int osp_sync_new_unlink_job(struct osp_device *d,
+                                  struct llog_handle *llh,
+                                  struct llog_rec_hdr *h)
+{
+       struct llog_unlink_rec  *rec = (struct llog_unlink_rec *)h;
+       struct ptlrpc_request   *req;
+       struct ost_body         *body;
+
+       ENTRY;
+       LASSERT(h->lrh_type == MDS_UNLINK_REC);
+
+       req = osp_sync_new_job(d, llh, h, OST_DESTROY, &RQF_OST_DESTROY);
+       if (IS_ERR(req))
+               RETURN(PTR_ERR(req));
+
+       body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
+       LASSERT(body);
+       body->oa.o_id  = rec->lur_oid;
+       body->oa.o_seq = rec->lur_oseq;
+       body->oa.o_misc = rec->lur_count;
+       body->oa.o_valid = OBD_MD_FLGROUP | OBD_MD_FLID;
+       if (rec->lur_count)
+               body->oa.o_valid |= OBD_MD_FLOBJCOUNT;
+
+       osp_sync_send_new_rpc(d, req);
+       RETURN(0);
+}
+
+static int osp_sync_new_unlink64_job(struct osp_device *d,
+                                    struct llog_handle *llh,
+                                    struct llog_rec_hdr *h)
+{
+       struct llog_unlink64_rec        *rec = (struct llog_unlink64_rec *)h;
+       struct ptlrpc_request           *req;
+       struct ost_body                 *body;
+       int                              rc;
+
+       ENTRY;
+       LASSERT(h->lrh_type == MDS_UNLINK64_REC);
+
+       req = osp_sync_new_job(d, llh, h, OST_DESTROY, &RQF_OST_DESTROY);
+       if (IS_ERR(req))
+               RETURN(PTR_ERR(req));
+
+       body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
+       if (body == NULL)
+               RETURN(-EFAULT);
+       rc = fid_ostid_pack(&rec->lur_fid, &body->oa.o_oi);
+       if (rc < 0)
+               RETURN(rc);
+       body->oa.o_misc = rec->lur_count;
+       body->oa.o_valid = OBD_MD_FLGROUP | OBD_MD_FLID | OBD_MD_FLOBJCOUNT;
+
+       osp_sync_send_new_rpc(d, req);
+       RETURN(0);
+}
+
+static int osp_sync_process_record(const struct lu_env *env,
+                                  struct osp_device *d,
+                                  struct llog_handle *llh,
+                                  struct llog_rec_hdr *rec)
+{
+       struct llog_cookie       cookie;
+       int                      rc = 0;
+
+       cookie.lgc_lgl = llh->lgh_id;
+       cookie.lgc_subsys = LLOG_MDS_OST_ORIG_CTXT;
+       cookie.lgc_index = rec->lrh_index;
+
+       if (unlikely(rec->lrh_type == LLOG_GEN_REC)) {
+               struct llog_gen_rec *gen = (struct llog_gen_rec *)rec;
+
+               /* we're waiting for the record generated by this instance */
+               LASSERT(d->opd_syn_prev_done == 0);
+               if (!memcmp(&d->opd_syn_generation, &gen->lgr_gen,
+                           sizeof(gen->lgr_gen))) {
+                       CDEBUG(D_HA, "processed all old entries\n");
+                       d->opd_syn_prev_done = 1;
+               }
+
+               /* cancel any generation record */
+               rc = llog_cat_cancel_records(env, llh->u.phd.phd_cat_handle,
+                                            1, &cookie);
+
+               return rc;
+       }
+
+       /*
+        * now we prepare and fill requests to OST, put them on the queue
+        * and fire after next commit callback
+        */
+
+       /* notice we increment counters before sending RPC, to be consistent
+        * in RPC interpret callback which may happen very quickly */
+       cfs_spin_lock(&d->opd_syn_lock);
+       d->opd_syn_rpc_in_flight++;
+       d->opd_syn_rpc_in_progress++;
+       cfs_spin_unlock(&d->opd_syn_lock);
+
+       switch (rec->lrh_type) {
+       /* case MDS_UNLINK_REC is kept for compatibility */
+       case MDS_UNLINK_REC:
+               rc = osp_sync_new_unlink_job(d, llh, rec);
+               break;
+       case MDS_UNLINK64_REC:
+               rc = osp_sync_new_unlink64_job(d, llh, rec);
+               break;
+       case MDS_SETATTR64_REC:
+               rc = osp_sync_new_setattr_job(d, llh, rec);
+               break;
+       default:
+               CERROR("unknown record type: %x\n", rec->lrh_type);
+                      rc = -EINVAL;
+                      break;
+       }
+
+       if (likely(rc == 0)) {
+               cfs_spin_lock(&d->opd_syn_lock);
+               if (d->opd_syn_prev_done) {
+                       LASSERT(d->opd_syn_changes > 0);
+                       LASSERT(rec->lrh_id <= d->opd_syn_last_committed_id);
+                       /*
+                        * NOTE: it's possible to meet same id if
+                        * OST stores few stripes of same file
+                        */
+                       if (rec->lrh_id > d->opd_syn_last_processed_id)
+                               d->opd_syn_last_processed_id = rec->lrh_id;
+
+                       d->opd_syn_changes--;
+               }
+               CDEBUG(D_OTHER, "%s: %d in flight, %d in progress\n",
+                      d->opd_obd->obd_name, d->opd_syn_rpc_in_flight,
+                      d->opd_syn_rpc_in_progress);
+               cfs_spin_unlock(&d->opd_syn_lock);
+       } else {
+               cfs_spin_lock(&d->opd_syn_lock);
+               d->opd_syn_rpc_in_flight--;
+               d->opd_syn_rpc_in_progress--;
+               cfs_spin_unlock(&d->opd_syn_lock);
+       }
+
+       CDEBUG(D_HA, "found record %x, %d, idx %u, id %u: %d\n",
+              rec->lrh_type, rec->lrh_len, rec->lrh_index, rec->lrh_id, rc);
+       return rc;
+}
+
+static void osp_sync_process_committed(const struct lu_env *env,
+                                      struct osp_device *d)
+{
+       struct obd_device       *obd = d->opd_obd;
+       struct obd_import       *imp = obd->u.cli.cl_import;
+       struct ost_body         *body;
+       struct ptlrpc_request   *req, *tmp;
+       struct llog_ctxt        *ctxt;
+       struct llog_handle      *llh;
+       cfs_list_t               list;
+       int                      rc, done = 0;
+
+       ENTRY;
+
+       if (cfs_list_empty(&d->opd_syn_committed_there))
+               return;
+
+       /*
+        * if current status is -ENOSPC (lack of free space on OST)
+        * then we should poll OST immediately once object destroy
+        * is committed.
+        * notice: we do this upon commit as well because some backends
+        * (like DMU) do not release space right away.
+        */
+       if (unlikely(d->opd_pre_status == -ENOSPC))
+               osp_statfs_need_now(d);
+
+       /*
+        * now cancel them all
+        * XXX: can we improve this using some batching?
+        *      with batch RPC that'll happen automatically?
+        * XXX: can we store ctxt in lod_device and save few cycles ?
+        */
+       ctxt = llog_get_context(obd, LLOG_MDS_OST_ORIG_CTXT);
+       LASSERT(ctxt);
+
+       llh = ctxt->loc_handle;
+       LASSERT(llh);
+
+       CFS_INIT_LIST_HEAD(&list);
+       cfs_spin_lock(&d->opd_syn_lock);
+       cfs_list_splice(&d->opd_syn_committed_there, &list);
+       CFS_INIT_LIST_HEAD(&d->opd_syn_committed_there);
+       cfs_spin_unlock(&d->opd_syn_lock);
+
+       cfs_list_for_each_entry_safe(req, tmp, &list, rq_exp_list) {
+               LASSERT(req->rq_svc_thread == (void *) OSP_JOB_MAGIC);
+               cfs_list_del_init(&req->rq_exp_list);
+
+               body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
+               LASSERT(body);
+
+               /* import can be closing, thus all commit cb's are
+                * called we can check committness directly */
+               if (req->rq_transno <= imp->imp_peer_committed_transno) {
+                       rc = llog_cat_cancel_records(env, llh, 1,
+                                                    &body->oa.o_lcookie);
+                       if (rc)
+                               CERROR("%s: can't cancel record: %d\n",
+                                      obd->obd_name, rc);
+               } else {
+                       DEBUG_REQ(D_HA, req, "not committed");
+               }
+
+               ptlrpc_req_finished(req);
+               done++;
+       }
+
+       llog_ctxt_put(ctxt);
+
+       LASSERT(d->opd_syn_rpc_in_progress >= done);
+       cfs_spin_lock(&d->opd_syn_lock);
+       d->opd_syn_rpc_in_progress -= done;
+       cfs_spin_unlock(&d->opd_syn_lock);
+       CDEBUG(D_OTHER, "%s: %d in flight, %d in progress\n",
+              d->opd_obd->obd_name, d->opd_syn_rpc_in_flight,
+              d->opd_syn_rpc_in_progress);
+
+       osp_sync_check_for_work(d);
+
+       EXIT;
+}
+
+/*
+ * this is where most of queues processing happens
+ */
+static int osp_sync_process_queues(const struct lu_env *env,
+                                  struct llog_handle *llh,
+                                  struct llog_rec_hdr *rec,
+                                  void *data)
+{
+       struct osp_device       *d = data;
+       int                      rc;
+
+       do {
+               struct l_wait_info lwi = { 0 };
+
+               if (!osp_sync_running(d)) {
+                       CDEBUG(D_HA, "stop llog processing\n");
+                       return LLOG_PROC_BREAK;
+               }
+
+               /* process requests committed by OST */
+               osp_sync_process_committed(env, d);
+
+               /* if we there are changes to be processed and we have
+                * resources for this ... do now */
+               if (osp_sync_can_process_new(d, rec)) {
+                       if (llh == NULL) {
+                               /* ask llog for another record */
+                               CDEBUG(D_HA, "%lu changes, %u in progress, %u in flight\n",
+                                      d->opd_syn_changes,
+                                      d->opd_syn_rpc_in_progress,
+                                      d->opd_syn_rpc_in_flight);
+                               return 0;
+                       }
+
+                       /*
+                        * try to send, in case of disconnection, suspend
+                        * processing till we can send this request
+                        */
+                       do {
+                               rc = osp_sync_process_record(env, d, llh, rec);
+                               /*
+                                * XXX: probably different handling is needed
+                                * for some bugs, like immediate exit or if
+                                * OSP gets inactive
+                                */
+                               if (rc) {
+                                       CERROR("can't send: %d\n", rc);
+                                       l_wait_event(d->opd_syn_waitq,
+                                                    !osp_sync_running(d) ||
+                                                    osp_sync_has_work(d),
+                                                    &lwi);
+                               }
+                       } while (rc != 0 && osp_sync_running(d));
+
+                       llh = NULL;
+                       rec = NULL;
+               }
+
+               if (d->opd_syn_last_processed_id == d->opd_syn_last_used_id)
+                       osp_sync_remove_from_tracker(d);
+
+               l_wait_event(d->opd_syn_waitq,
+                            !osp_sync_running(d) ||
+                            osp_sync_can_process_new(d, rec) ||
+                            !cfs_list_empty(&d->opd_syn_committed_there),
+                            &lwi);
+       } while (1);
+}
+
+/*
+ * this thread runs llog_cat_process() scanner calling our callback
+ * to process llog records. in the callback we implement tricky
+ * state machine as we don't want to start scanning of the llog again
+ * and again, also we don't want to process too many records and send
+ * too many RPCs a time. so, depending on current load (num of changes
+ * being synced to OST) the callback can suspend awaiting for some
+ * new conditions, like syncs completed.
+ *
+ * in order to process llog records left by previous boots and to allow
+ * llog_process_thread() to find something (otherwise it'd just exit
+ * immediately) we add a special GENERATATION record on each boot.
+ */
+static int osp_sync_thread(void *_arg)
+{
+       struct osp_device       *d = _arg;
+       struct ptlrpc_thread    *thread = &d->opd_syn_thread;
+       struct l_wait_info       lwi = { 0 };
+       struct llog_ctxt        *ctxt;
+       struct obd_device       *obd = d->opd_obd;
+       struct llog_handle      *llh;
+       struct lu_env            env;
+       int                      rc;
+       char                     pname[16];
+
+       ENTRY;
+
+       rc = lu_env_init(&env, LCT_LOCAL);
+       if (rc) {
+               CERROR("%s: can't initialize env: rc = %d\n",
+                      obd->obd_name, rc);
+               RETURN(rc);
+       }
+
+       sprintf(pname, "osp-syn-%u\n", d->opd_index);
+       cfs_daemonize(pname);
+
+       cfs_spin_lock(&d->opd_syn_lock);
+       thread->t_flags = SVC_RUNNING;
+       cfs_spin_unlock(&d->opd_syn_lock);
+       cfs_waitq_signal(&thread->t_ctl_waitq);
+
+       ctxt = llog_get_context(obd, LLOG_MDS_OST_ORIG_CTXT);
+       if (ctxt == NULL) {
+               CERROR("can't get appropriate context\n");
+               GOTO(out, rc = -EINVAL);
+       }
+
+       llh = ctxt->loc_handle;
+       if (llh == NULL) {
+               CERROR("can't get llh\n");
+               llog_ctxt_put(ctxt);
+               GOTO(out, rc = -EINVAL);
+       }
+
+       rc = llog_cat_process(&env, llh, osp_sync_process_queues, d, 0, 0);
+       LASSERTF(rc == 0 || rc == LLOG_PROC_BREAK,
+                "%lu changes, %u in progress, %u in flight: %d\n",
+                d->opd_syn_changes, d->opd_syn_rpc_in_progress,
+                d->opd_syn_rpc_in_flight, rc);
+
+       /* we don't expect llog_process_thread() to exit till umount */
+       LASSERTF(thread->t_flags != SVC_RUNNING,
+                "%lu changes, %u in progress, %u in flight\n",
+                d->opd_syn_changes, d->opd_syn_rpc_in_progress,
+                d->opd_syn_rpc_in_flight);
+
+       osp_sync_process_committed(&env, d);
+
+       llog_cat_close(&env, llh);
+       rc = llog_cleanup(&env, ctxt);
+       if (rc)
+               CERROR("can't cleanup llog: %d\n", rc);
+out:
+       thread->t_flags = SVC_STOPPED;
+
+       /*
+        * there might be a race between osp sync thread sending RPCs and
+        * import invalidation. this can result in RPCs being in ptlrpcd
+        * till this point. for safete reason let's wait till they are done
+        */
+       l_wait_event(d->opd_syn_waitq, d->opd_syn_rpc_in_flight == 0, &lwi);
+
+       cfs_waitq_signal(&thread->t_ctl_waitq);
+       LASSERTF(d->opd_syn_rpc_in_progress == 0,
+                "%s: %d %d %sempty\n",
+                d->opd_obd->obd_name, d->opd_syn_rpc_in_progress,
+                d->opd_syn_rpc_in_flight,
+                cfs_list_empty(&d->opd_syn_committed_there) ? "" : "!");
+
+       lu_env_fini(&env);
+
+       RETURN(0);
+}
+
+static struct llog_operations osp_mds_ost_orig_logops;
+
+static int osp_sync_llog_init(const struct lu_env *env, struct osp_device *d)
+{
+       struct osp_thread_info *osi = osp_env_info(env);
+       struct llog_handle     *lgh;
+       struct obd_device      *obd = d->opd_obd;
+       struct llog_ctxt       *ctxt;
+       int                     rc;
+
+       ENTRY;
+
+       LASSERT(obd);
+
+       /*
+        * open llog corresponding to our OST
+        */
+       OBD_SET_CTXT_MAGIC(&obd->obd_lvfs_ctxt);
+       obd->obd_lvfs_ctxt.dt = d->opd_storage;
+
+       rc = llog_osd_get_cat_list(env, d->opd_storage, d->opd_index, 1,
+                                  &osi->osi_cid);
+       if (rc) {
+               CERROR("%s: can't get id from catalogs: rc = %d\n",
+                      obd->obd_name, rc);
+               RETURN(rc);
+       }
+
+       CDEBUG(D_INFO, "%s: Init llog for %d - catid "LPX64"/"LPX64":%x\n",
+              obd->obd_name, d->opd_index, osi->osi_cid.lci_logid.lgl_oid,
+              osi->osi_cid.lci_logid.lgl_oseq,
+              osi->osi_cid.lci_logid.lgl_ogen);
+
+       osp_mds_ost_orig_logops = llog_osd_ops;
+       rc = llog_setup(env, obd, &obd->obd_olg, LLOG_MDS_OST_ORIG_CTXT, obd,
+                       &osp_mds_ost_orig_logops);
+       if (rc)
+               RETURN(rc);
+
+       ctxt = llog_get_context(obd, LLOG_MDS_OST_ORIG_CTXT);
+       LASSERT(ctxt);
+
+       if (likely(osi->osi_cid.lci_logid.lgl_oid != 0)) {
+               rc = llog_open(env, ctxt, &lgh, &osi->osi_cid.lci_logid, NULL,
+                              LLOG_OPEN_EXISTS);
+               /* re-create llog if it is missing */
+               if (rc == -ENOENT)
+                       osi->osi_cid.lci_logid.lgl_oid = 0;
+               else if (rc < 0)
+                       GOTO(out_cleanup, rc);
+       }
+
+       if (unlikely(osi->osi_cid.lci_logid.lgl_oid == 0)) {
+               rc = llog_open_create(env, ctxt, &lgh, NULL, NULL);
+               if (rc < 0)
+                       GOTO(out_cleanup, rc);
+               osi->osi_cid.lci_logid = lgh->lgh_id;
+       }
+
+       ctxt->loc_handle = lgh;
+       lgh->lgh_logops->lop_add = llog_cat_add_rec;
+       lgh->lgh_logops->lop_declare_add = llog_cat_declare_add_rec;
+
+       rc = llog_cat_init_and_process(env, lgh);
+       if (rc)
+               GOTO(out_close, rc);
+
+       rc = llog_osd_put_cat_list(env, d->opd_storage, d->opd_index, 1,
+                                  &osi->osi_cid);
+       if (rc)
+               GOTO(out_close, rc);
+
+       /*
+        * put a mark in the llog till which we'll be processing
+        * old records restless
+        */
+       d->opd_syn_generation.mnt_cnt = cfs_time_current();
+       d->opd_syn_generation.conn_cnt = cfs_time_current();
+
+       osi->osi_hdr.lrh_type = LLOG_GEN_REC;
+       osi->osi_hdr.lrh_len = sizeof(osi->osi_gen);
+
+       memcpy(&osi->osi_gen.lgr_gen, &d->opd_syn_generation,
+              sizeof(osi->osi_gen.lgr_gen));
+
+       rc = llog_cat_add(env, lgh, &osi->osi_gen.lgr_hdr, &osi->osi_cookie,
+                         NULL);
+       if (rc < 0)
+               GOTO(out_close, rc);
+       llog_ctxt_put(ctxt);
+       RETURN(0);
+out_close:
+       llog_cat_close(env, lgh);
+out_cleanup:
+       llog_cleanup(env, ctxt);
+       RETURN(rc);
+}
+
+static void osp_sync_llog_fini(const struct lu_env *env, struct osp_device *d)
+{
+       struct llog_ctxt *ctxt;
+
+       ctxt = llog_get_context(d->opd_obd, LLOG_MDS_OST_ORIG_CTXT);
+       llog_cat_close(env, ctxt->loc_handle);
+       llog_cleanup(env, ctxt);
+}
+
+/*
+ * initializes sync component of OSP
+ */
+int osp_sync_init(const struct lu_env *env, struct osp_device *d)
+{
+       struct l_wait_info       lwi = { 0 };
+       int                      rc;
+
+       ENTRY;
+
+       rc = osp_sync_id_traction_init(d);
+       if (rc)
+               RETURN(rc);
+
+       /*
+        * initialize llog storing changes
+        */
+       rc = osp_sync_llog_init(env, d);
+       if (rc) {
+               CERROR("%s: can't initialize llog: rc = %d\n",
+                      d->opd_obd->obd_name, rc);
+               GOTO(err_id, rc);
+       }
+
+       /*
+        * Start synchronization thread
+        */
+       d->opd_syn_max_rpc_in_flight = OSP_MAX_IN_FLIGHT;
+       d->opd_syn_max_rpc_in_progress = OSP_MAX_IN_PROGRESS;
+       cfs_spin_lock_init(&d->opd_syn_lock);
+       cfs_waitq_init(&d->opd_syn_waitq);
+       cfs_waitq_init(&d->opd_syn_thread.t_ctl_waitq);
+       CFS_INIT_LIST_HEAD(&d->opd_syn_committed_there);
+
+       rc = cfs_create_thread(osp_sync_thread, d, 0);
+       if (rc < 0) {
+               CERROR("%s: can't start sync thread: rc = %d\n",
+                      d->opd_obd->obd_name, rc);
+               GOTO(err_llog, rc);
+       }
+
+       l_wait_event(d->opd_syn_thread.t_ctl_waitq,
+                    osp_sync_running(d) || osp_sync_stopped(d), &lwi);
+
+       RETURN(0);
+err_llog:
+       osp_sync_llog_fini(env, d);
+err_id:
+       osp_sync_id_traction_fini(d);
+       return rc;
+}
+
+int osp_sync_fini(struct osp_device *d)
+{
+       struct ptlrpc_thread *thread = &d->opd_syn_thread;
+
+       ENTRY;
+
+       thread->t_flags = SVC_STOPPING;
+       cfs_waitq_signal(&d->opd_syn_waitq);
+       cfs_wait_event(thread->t_ctl_waitq, thread->t_flags & SVC_STOPPED);
+
+       /*
+        * unregister transaction callbacks only when sync thread
+        * has finished operations with llog
+        */
+       osp_sync_id_traction_fini(d);
+
+       RETURN(0);
+}
+
+static CFS_DEFINE_MUTEX(osp_id_tracker_sem);
+static CFS_LIST_HEAD(osp_id_tracker_list);
+
+static void osp_sync_tracker_commit_cb(struct thandle *th, void *cookie)
+{
+       struct osp_id_tracker   *tr = cookie;
+       struct osp_device       *d;
+       struct osp_txn_info     *txn;
+
+       LASSERT(tr);
+
+       txn = osp_txn_info(&th->th_ctx);
+       if (txn == NULL || txn->oti_current_id < tr->otr_committed_id)
+               return;
+
+       cfs_spin_lock(&tr->otr_lock);
+       if (likely(txn->oti_current_id > tr->otr_committed_id)) {
+               CDEBUG(D_OTHER, "committed: %u -> %u\n",
+                      tr->otr_committed_id, txn->oti_current_id);
+               tr->otr_committed_id = txn->oti_current_id;
+
+               cfs_list_for_each_entry(d, &tr->otr_wakeup_list,
+                                       opd_syn_ontrack) {
+                       d->opd_syn_last_committed_id = tr->otr_committed_id;
+                       cfs_waitq_signal(&d->opd_syn_waitq);
+               }
+       }
+       cfs_spin_unlock(&tr->otr_lock);
+}
+
+static int osp_sync_id_traction_init(struct osp_device *d)
+{
+       struct osp_id_tracker   *tr, *found = NULL;
+       int                      rc = 0;
+
+       LASSERT(d);
+       LASSERT(d->opd_storage);
+       LASSERT(d->opd_syn_tracker == NULL);
+       CFS_INIT_LIST_HEAD(&d->opd_syn_ontrack);
+
+       cfs_mutex_lock(&osp_id_tracker_sem);
+       cfs_list_for_each_entry(tr, &osp_id_tracker_list, otr_list) {
+               if (tr->otr_dev == d->opd_storage) {
+                       LASSERT(cfs_atomic_read(&tr->otr_refcount));
+                       cfs_atomic_inc(&tr->otr_refcount);
+                       d->opd_syn_tracker = tr;
+                       found = tr;
+                       break;
+               }
+       }
+
+       if (found == NULL) {
+               rc = -ENOMEM;
+               OBD_ALLOC_PTR(tr);
+               if (tr) {
+                       d->opd_syn_tracker = tr;
+                       cfs_spin_lock_init(&tr->otr_lock);
+                       tr->otr_dev = d->opd_storage;
+                       tr->otr_next_id = 1;
+                       tr->otr_committed_id = 0;
+                       cfs_atomic_set(&tr->otr_refcount, 1);
+                       CFS_INIT_LIST_HEAD(&tr->otr_wakeup_list);
+                       cfs_list_add(&tr->otr_list, &osp_id_tracker_list);
+                       tr->otr_tx_cb.dtc_txn_commit =
+                                               osp_sync_tracker_commit_cb;
+                       tr->otr_tx_cb.dtc_cookie = tr;
+                       tr->otr_tx_cb.dtc_tag = LCT_MD_THREAD;
+                       dt_txn_callback_add(d->opd_storage, &tr->otr_tx_cb);
+                       rc = 0;
+               }
+       }
+       cfs_mutex_unlock(&osp_id_tracker_sem);
+
+       return rc;
+}
+
+static void osp_sync_id_traction_fini(struct osp_device *d)
+{
+       struct osp_id_tracker *tr;
+
+       ENTRY;
+
+       LASSERT(d);
+       tr = d->opd_syn_tracker;
+       if (tr == NULL) {
+               EXIT;
+               return;
+       }
+
+       osp_sync_remove_from_tracker(d);
+
+       cfs_mutex_lock(&osp_id_tracker_sem);
+       if (cfs_atomic_dec_and_test(&tr->otr_refcount)) {
+               dt_txn_callback_del(d->opd_storage, &tr->otr_tx_cb);
+               LASSERT(cfs_list_empty(&tr->otr_wakeup_list));
+               cfs_list_del(&tr->otr_list);
+               OBD_FREE_PTR(tr);
+               d->opd_syn_tracker = NULL;
+       }
+       cfs_mutex_unlock(&osp_id_tracker_sem);
+
+       EXIT;
+}
+
+/*
+ * generates id for the tracker
+ */
+static __u32 osp_sync_id_get(struct osp_device *d, __u32 id)
+{
+       struct osp_id_tracker *tr;
+
+       tr = d->opd_syn_tracker;
+       LASSERT(tr);
+
+       /* XXX: we can improve this introducing per-cpu preallocated ids? */
+       cfs_spin_lock(&tr->otr_lock);
+       if (unlikely(tr->otr_next_id <= d->opd_syn_last_used_id)) {
+               cfs_spin_unlock(&tr->otr_lock);
+               CERROR("%s: next %u, last synced %lu\n",
+                      d->opd_obd->obd_name, tr->otr_next_id,
+                      d->opd_syn_last_used_id);
+               LBUG();
+       }
+
+       if (id == 0)
+               id = tr->otr_next_id++;
+       if (id > d->opd_syn_last_used_id)
+               d->opd_syn_last_used_id = id;
+       if (cfs_list_empty(&d->opd_syn_ontrack))
+               cfs_list_add(&d->opd_syn_ontrack, &tr->otr_wakeup_list);
+       cfs_spin_unlock(&tr->otr_lock);
+       CDEBUG(D_OTHER, "new id %u\n", (unsigned) id);
+
+       return id;
+}
+
+static void osp_sync_remove_from_tracker(struct osp_device *d)
+{
+       struct osp_id_tracker *tr;
+
+       tr = d->opd_syn_tracker;
+       LASSERT(tr);
+
+       if (cfs_list_empty(&d->opd_syn_ontrack))
+               return;
+
+       cfs_spin_lock(&tr->otr_lock);
+       cfs_list_del_init(&d->opd_syn_ontrack);
+       cfs_spin_unlock(&tr->otr_lock);
+}
+