* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2012, 2013, Intel Corporation.
+ * Copyright (c) 2012, 2015, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#define DEBUG_SUBSYSTEM S_MDS
+#include <linux/kthread.h>
#include <lustre_log.h>
+#include <lustre_update.h>
#include "osp_internal.h"
static int osp_sync_id_traction_init(struct osp_device *d);
static void osp_sync_id_traction_fini(struct osp_device *d);
-static __u32 osp_sync_id_get(struct osp_device *d, __u32 id);
+static __u64 osp_sync_id_get(struct osp_device *d, __u64 id);
static void osp_sync_remove_from_tracker(struct osp_device *d);
/*
#define OSP_JOB_MAGIC 0x26112005
+struct osp_job_req_args {
+ /** bytes reserved for ptlrpc_replay_req() */
+ struct ptlrpc_replay_async_args jra_raa;
+ struct list_head jra_committed_link;
+ struct list_head jra_inflight_link;
+ __u32 jra_magic;
+};
+
static inline int osp_sync_running(struct osp_device *d)
{
return !!(d->opd_syn_thread.t_flags & SVC_RUNNING);
}
+/**
+ * Check status: whether OSP thread has stopped
+ *
+ * \param[in] d OSP device
+ *
+ * \retval 0 still running
+ * \retval 1 stopped
+ */
static inline int osp_sync_stopped(struct osp_device *d)
{
return !!(d->opd_syn_thread.t_flags & SVC_STOPPED);
}
+/*
+ ** Check for new changes to sync
+ *
+ * \param[in] d OSP device
+ *
+ * \retval 1 there are changes
+ * \retval 0 there are no changes
+ */
static inline int osp_sync_has_new_job(struct osp_device *d)
{
return ((d->opd_syn_last_processed_id < d->opd_syn_last_used_id) &&
|| (d->opd_syn_prev_done == 0);
}
+static inline int osp_sync_inflight_conflict(struct osp_device *d,
+ struct llog_rec_hdr *h)
+{
+ struct osp_job_req_args *jra;
+ struct ost_id ostid;
+ int conflict = 0;
+
+ if (h == NULL || h->lrh_type == LLOG_GEN_REC ||
+ list_empty(&d->opd_syn_inflight_list))
+ return conflict;
+
+ memset(&ostid, 0, sizeof(ostid));
+ switch (h->lrh_type) {
+ case MDS_UNLINK_REC:
+ ostid_set_seq(&ostid, ((struct llog_unlink_rec *)h)->lur_oseq);
+ ostid_set_id(&ostid, ((struct llog_unlink_rec *)h)->lur_oid);
+ break;
+ case MDS_UNLINK64_REC:
+ fid_to_ostid(&((struct llog_unlink64_rec *)h)->lur_fid, &ostid);
+ break;
+ case MDS_SETATTR64_REC:
+ ostid = ((struct llog_setattr64_rec *)h)->lsr_oi;
+ break;
+ default:
+ LBUG();
+ }
+
+ spin_lock(&d->opd_syn_lock);
+ list_for_each_entry(jra, &d->opd_syn_inflight_list, jra_inflight_link) {
+ struct ptlrpc_request *req;
+ struct ost_body *body;
+
+ LASSERT(jra->jra_magic == OSP_JOB_MAGIC);
+
+ req = container_of((void *)jra, struct ptlrpc_request,
+ rq_async_args);
+ body = req_capsule_client_get(&req->rq_pill,
+ &RMF_OST_BODY);
+ LASSERT(body);
+
+ if (memcmp(&ostid, &body->oa.o_oi, sizeof(ostid)) == 0) {
+ conflict = 1;
+ break;
+ }
+ }
+ spin_unlock(&d->opd_syn_lock);
+
+ return conflict;
+}
+
static inline int osp_sync_low_in_progress(struct osp_device *d)
{
- return d->opd_syn_rpc_in_progress < d->opd_syn_max_rpc_in_progress;
+ return atomic_read(&d->opd_syn_rpc_in_progress) <
+ d->opd_syn_max_rpc_in_progress;
}
+/**
+ * Check for room in the network pipe to OST
+ *
+ * \param[in] d OSP device
+ *
+ * \retval 1 there is room
+ * \retval 0 no room, the pipe is full
+ */
static inline int osp_sync_low_in_flight(struct osp_device *d)
{
- return d->opd_syn_rpc_in_flight < d->opd_syn_max_rpc_in_flight;
+ return atomic_read(&d->opd_syn_rpc_in_flight) <
+ d->opd_syn_max_rpc_in_flight;
}
+/**
+ * Wake up check for the main sync thread
+ *
+ * \param[in] d OSP device
+ *
+ * \retval 1 time to wake up
+ * \retval 0 no need to wake up
+ */
static inline int osp_sync_has_work(struct osp_device *d)
{
/* has new/old changes and low in-progress? */
return 1;
/* has remotely committed? */
- if (!cfs_list_empty(&d->opd_syn_committed_there))
+ if (!list_empty(&d->opd_syn_committed_there))
return 1;
return 0;
#define osp_sync_check_for_work(d) \
{ \
if (osp_sync_has_work(d)) { \
- cfs_waitq_signal(&d->opd_syn_waitq); \
+ wake_up(&d->opd_syn_waitq); \
} \
}
osp_sync_check_for_work(d);
}
+static inline __u64 osp_sync_correct_id(struct osp_device *d,
+ struct llog_rec_hdr *rec)
+{
+ /*
+ * llog use cyclic store with 32 bit lrh_id
+ * so overflow lrh_id is possible. Range between
+ * last_processed and last_committed is less than
+ * 64745 ^ 2 and less than 2^32 - 1
+ */
+ __u64 correct_id = d->opd_syn_last_committed_id;
+
+ if ((correct_id & 0xffffffffULL) < rec->lrh_id)
+ correct_id -= 0x100000000ULL;
+
+ correct_id &= ~0xffffffffULL;
+ correct_id |= rec->lrh_id;
+
+ return correct_id;
+}
+/**
+ * Check and return ready-for-new status.
+ *
+ * The thread processing llog record uses this function to check whether
+ * it's time to take another record and process it. The number of conditions
+ * must be met: the connection should be ready, RPCs in flight not exceeding
+ * the limit, the record is committed locally, etc (see the lines below).
+ *
+ * \param[in] d OSP device
+ * \param[in] rec next llog record to process
+ *
+ * \retval 0 not ready
+ * \retval 1 ready
+ */
static inline int osp_sync_can_process_new(struct osp_device *d,
struct llog_rec_hdr *rec)
{
LASSERT(d);
+ if (unlikely(atomic_read(&d->opd_syn_barrier) > 0))
+ return 0;
+ if (unlikely(osp_sync_inflight_conflict(d, rec)))
+ return 0;
if (!osp_sync_low_in_progress(d))
return 0;
if (!osp_sync_low_in_flight(d))
return 0;
if (d->opd_syn_prev_done == 0)
return 1;
- if (d->opd_syn_changes == 0)
+ if (atomic_read(&d->opd_syn_changes) == 0)
return 0;
- if (rec == NULL || rec->lrh_id <= d->opd_syn_last_committed_id)
+ if (rec == NULL ||
+ osp_sync_correct_id(d, rec) <= d->opd_syn_last_committed_id)
return 1;
return 0;
}
+/**
+ * Declare intention to add a new change.
+ *
+ * With regard to OSD API, we have to declare any changes ahead. In this
+ * case we declare an intention to add a llog record representing the
+ * change on the local storage.
+ *
+ * \param[in] env LU environment provided by the caller
+ * \param[in] o OSP object
+ * \param[in] type type of change: MDS_UNLINK64_REC or MDS_SETATTR64_REC
+ * \param[in] th transaction handle (local)
+ *
+ * \retval 0 on success
+ * \retval negative negated errno on error
+ */
int osp_sync_declare_add(const struct lu_env *env, struct osp_object *o,
llog_op_type type, struct thandle *th)
{
struct osp_thread_info *osi = osp_env_info(env);
struct osp_device *d = lu2osp_dev(o->opo_obj.do_lu.lo_dev);
struct llog_ctxt *ctxt;
+ struct thandle *storage_th;
int rc;
ENTRY;
/* it's a layering violation, to access internals of th,
* but we can do this as a sanity check, for a while */
- LASSERT(th->th_dev == d->opd_storage);
+ LASSERT(th->th_top != NULL);
+ storage_th = thandle_get_sub_by_dt(env, th->th_top, d->opd_storage);
+ if (IS_ERR(storage_th))
+ RETURN(PTR_ERR(storage_th));
switch (type) {
case MDS_UNLINK64_REC:
}
/* we want ->dt_trans_start() to allocate per-thandle structure */
- th->th_tags |= LCT_OSP_THREAD;
+ storage_th->th_tags |= LCT_OSP_THREAD;
ctxt = llog_get_context(d->opd_obd, LLOG_MDS_OST_ORIG_CTXT);
LASSERT(ctxt);
- rc = llog_declare_add(env, ctxt->loc_handle, &osi->osi_hdr, th);
+ rc = llog_declare_add(env, ctxt->loc_handle, &osi->osi_hdr,
+ storage_th);
llog_ctxt_put(ctxt);
RETURN(rc);
}
+/**
+ * Generate a llog record for a given change.
+ *
+ * Generates a llog record for the change passed. The change can be of two
+ * types: unlink and setattr. The record gets an ID which later will be
+ * used to track commit status of the change. For unlink changes, the caller
+ * can supply a starting FID and the count of the objects to destroy. For
+ * setattr the caller should apply attributes to apply.
+ *
+ *
+ * \param[in] env LU environment provided by the caller
+ * \param[in] d OSP device
+ * \param[in] fid fid of the object the change should be applied to
+ * \param[in] type type of change: MDS_UNLINK64_REC or MDS_SETATTR64_REC
+ * \param[in] count count of objects to destroy
+ * \param[in] th transaction handle (local)
+ * \param[in] attr attributes for setattr
+ *
+ * \retval 0 on success
+ * \retval negative negated errno on error
+ */
static int osp_sync_add_rec(const struct lu_env *env, struct osp_device *d,
const struct lu_fid *fid, llog_op_type type,
int count, struct thandle *th,
struct osp_thread_info *osi = osp_env_info(env);
struct llog_ctxt *ctxt;
struct osp_txn_info *txn;
+ struct thandle *storage_th;
int rc;
ENTRY;
/* it's a layering violation, to access internals of th,
* but we can do this as a sanity check, for a while */
- LASSERT(th->th_dev == d->opd_storage);
+ LASSERT(th->th_top != NULL);
+ storage_th = thandle_get_sub_by_dt(env, th->th_top, d->opd_storage);
+ if (IS_ERR(storage_th))
+ RETURN(PTR_ERR(storage_th));
switch (type) {
case MDS_UNLINK64_REC:
LASSERT(attr);
osi->osi_setattr.lsr_uid = attr->la_uid;
osi->osi_setattr.lsr_gid = attr->la_gid;
+ osi->osi_setattr.lsr_valid =
+ ((attr->la_valid & LA_UID) ? OBD_MD_FLUID : 0) |
+ ((attr->la_valid & LA_GID) ? OBD_MD_FLGID : 0);
break;
default:
LBUG();
}
- txn = osp_txn_info(&th->th_ctx);
+ txn = osp_txn_info(&storage_th->th_ctx);
LASSERT(txn);
txn->oti_current_id = osp_sync_id_get(d, txn->oti_current_id);
- osi->osi_hdr.lrh_id = txn->oti_current_id;
-
+ osi->osi_hdr.lrh_id = (txn->oti_current_id & 0xffffffffULL);
ctxt = llog_get_context(d->opd_obd, LLOG_MDS_OST_ORIG_CTXT);
if (ctxt == NULL)
RETURN(-ENOMEM);
+
rc = llog_add(env, ctxt->loc_handle, &osi->osi_hdr, &osi->osi_cookie,
- NULL, th);
+ storage_th);
llog_ctxt_put(ctxt);
- CDEBUG(D_OTHER, "%s: new record "DOSTID":%lu/%lu: %d\n",
- d->opd_obd->obd_name, POSTID(&osi->osi_cookie.lgc_lgl.lgl_oi),
- (unsigned long) osi->osi_cookie.lgc_lgl.lgl_ogen,
- (unsigned long) osi->osi_cookie.lgc_index, rc);
-
- if (rc > 0)
- rc = 0;
-
- if (likely(rc == 0)) {
- spin_lock(&d->opd_syn_lock);
- d->opd_syn_changes++;
- spin_unlock(&d->opd_syn_lock);
+ if (likely(rc >= 0)) {
+ CDEBUG(D_OTHER, "%s: new record "DOSTID":%lu/%lu: %d\n",
+ d->opd_obd->obd_name,
+ POSTID(&osi->osi_cookie.lgc_lgl.lgl_oi),
+ (unsigned long)osi->osi_cookie.lgc_lgl.lgl_ogen,
+ (unsigned long)osi->osi_cookie.lgc_index, rc);
+ atomic_inc(&d->opd_syn_changes);
}
-
- RETURN(rc);
+ /* return 0 always here, error case just cause no llog record */
+ RETURN(0);
}
int osp_sync_add(const struct lu_env *env, struct osp_object *o,
}
int osp_sync_gap(const struct lu_env *env, struct osp_device *d,
- struct lu_fid *fid, int lost, struct thandle *th)
+ struct lu_fid *fid, int lost, struct thandle *th)
{
return osp_sync_add_rec(env, d, fid, MDS_UNLINK64_REC, lost, th, NULL);
}
* subsequent commit callback (at the most)
*/
-/*
- * called for each atomic on-disk change (not once per transaction batch)
- * and goes over the list
- * XXX: should be optimized?
- */
-
/**
- * called for each RPC reported committed
+ * ptlrpc commit callback.
+ *
+ * The callback is called by PTLRPC when a RPC is reported committed by the
+ * target (OST). We register the callback for the every RPC applying a change
+ * from the llog. This way we know then the llog records can be cancelled.
+ * Notice the callback can be called when OSP is finishing. We can detect this
+ * checking that actual transno in the request is less or equal of known
+ * committed transno (see osp_sync_process_committed() for the details).
+ * XXX: this is pretty expensive and can be improved later using batching.
+ *
+ * \param[in] req request
*/
static void osp_sync_request_commit_cb(struct ptlrpc_request *req)
{
struct osp_device *d = req->rq_cb_data;
+ struct osp_job_req_args *jra;
CDEBUG(D_HA, "commit req %p, transno "LPU64"\n", req, req->rq_transno);
/* do not do any opd_dyn_rpc_* accounting here
* it's done in osp_sync_interpret sooner or later */
-
LASSERT(d);
- LASSERT(req->rq_svc_thread == (void *) OSP_JOB_MAGIC);
- LASSERT(cfs_list_empty(&req->rq_exp_list));
+
+ jra = ptlrpc_req_async_args(req);
+ LASSERT(jra->jra_magic == OSP_JOB_MAGIC);
+ LASSERT(list_empty(&jra->jra_committed_link));
ptlrpc_request_addref(req);
spin_lock(&d->opd_syn_lock);
- cfs_list_add(&req->rq_exp_list, &d->opd_syn_committed_there);
+ list_add(&jra->jra_committed_link, &d->opd_syn_committed_there);
spin_unlock(&d->opd_syn_lock);
/* XXX: some batching wouldn't hurt */
- cfs_waitq_signal(&d->opd_syn_waitq);
+ wake_up(&d->opd_syn_waitq);
}
+/**
+ * RPC interpretation callback.
+ *
+ * The callback is called by ptlrpc when RPC is replied. Now we have to decide
+ * whether we should:
+ * - put request on a special list to wait until it's committed by the target,
+ * if the request is successful
+ * - schedule llog record cancel if no target object is found
+ * - try later (essentially after reboot) in case of unexpected error
+ *
+ * \param[in] env LU environment provided by the caller
+ * \param[in] req request replied
+ * \param[in] aa callback data
+ * \param[in] rc result of RPC
+ *
+ * \retval 0 always
+ */
static int osp_sync_interpret(const struct lu_env *env,
struct ptlrpc_request *req, void *aa, int rc)
{
struct osp_device *d = req->rq_cb_data;
+ struct osp_job_req_args *jra = aa;
- if (req->rq_svc_thread != (void *) OSP_JOB_MAGIC)
- DEBUG_REQ(D_ERROR, req, "bad magic %p\n", req->rq_svc_thread);
- LASSERT(req->rq_svc_thread == (void *) OSP_JOB_MAGIC);
+ if (jra->jra_magic != OSP_JOB_MAGIC) {
+ DEBUG_REQ(D_ERROR, req, "bad magic %u\n", jra->jra_magic);
+ LBUG();
+ }
LASSERT(d);
CDEBUG(D_HA, "reply req %p/%d, rc %d, transno %u\n", req,
- cfs_atomic_read(&req->rq_refcount),
+ atomic_read(&req->rq_refcount),
rc, (unsigned) req->rq_transno);
LASSERT(rc || req->rq_transno);
* but object doesn't exist anymore - cancell llog record
*/
LASSERT(req->rq_transno == 0);
- LASSERT(cfs_list_empty(&req->rq_exp_list));
+ LASSERT(list_empty(&jra->jra_committed_link));
ptlrpc_request_addref(req);
spin_lock(&d->opd_syn_lock);
- cfs_list_add(&req->rq_exp_list, &d->opd_syn_committed_there);
+ list_add(&jra->jra_committed_link, &d->opd_syn_committed_there);
spin_unlock(&d->opd_syn_lock);
- cfs_waitq_signal(&d->opd_syn_waitq);
+ wake_up(&d->opd_syn_waitq);
} else if (rc) {
struct obd_import *imp = req->rq_import;
/*
/* this is the last time we see the request
* if transno is not zero, then commit cb
* will be called at some point */
- LASSERT(d->opd_syn_rpc_in_progress > 0);
- spin_lock(&d->opd_syn_lock);
- d->opd_syn_rpc_in_progress--;
- spin_unlock(&d->opd_syn_lock);
+ LASSERT(atomic_read(&d->opd_syn_rpc_in_progress) > 0);
+ atomic_dec(&d->opd_syn_rpc_in_progress);
}
- cfs_waitq_signal(&d->opd_syn_waitq);
- } else if (unlikely(d->opd_pre_status == -ENOSPC)) {
+ wake_up(&d->opd_syn_waitq);
+ } else if (d->opd_pre != NULL &&
+ unlikely(d->opd_pre_status == -ENOSPC)) {
/*
* if current status is -ENOSPC (lack of free space on OST)
* then we should poll OST immediately once object destroy
osp_statfs_need_now(d);
}
- LASSERT(d->opd_syn_rpc_in_flight > 0);
spin_lock(&d->opd_syn_lock);
- d->opd_syn_rpc_in_flight--;
+ list_del_init(&jra->jra_inflight_link);
spin_unlock(&d->opd_syn_lock);
+ LASSERT(atomic_read(&d->opd_syn_rpc_in_flight) > 0);
+ atomic_dec(&d->opd_syn_rpc_in_flight);
+ if (unlikely(atomic_read(&d->opd_syn_barrier) > 0))
+ wake_up(&d->opd_syn_barrier_waitq);
CDEBUG(D_OTHER, "%s: %d in flight, %d in progress\n",
- d->opd_obd->obd_name, d->opd_syn_rpc_in_flight,
- d->opd_syn_rpc_in_progress);
+ d->opd_obd->obd_name, atomic_read(&d->opd_syn_rpc_in_flight),
+ atomic_read(&d->opd_syn_rpc_in_progress));
osp_sync_check_for_work(d);
}
/*
- * the function walks through list of committed locally changes
- * and send them to RPC until the pipe is full
+ ** Add request to ptlrpc queue.
+ *
+ * This is just a tiny helper function to put the request on the sending list
+ *
+ * \param[in] d OSP device
+ * \param[in] req request
*/
static void osp_sync_send_new_rpc(struct osp_device *d,
struct ptlrpc_request *req)
{
- LASSERT(d->opd_syn_rpc_in_flight <= d->opd_syn_max_rpc_in_flight);
- LASSERT(req->rq_svc_thread == (void *) OSP_JOB_MAGIC);
+ struct osp_job_req_args *jra;
- ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
+ LASSERT(atomic_read(&d->opd_syn_rpc_in_flight) <=
+ d->opd_syn_max_rpc_in_flight);
+
+ jra = ptlrpc_req_async_args(req);
+ jra->jra_magic = OSP_JOB_MAGIC;
+ INIT_LIST_HEAD(&jra->jra_committed_link);
+ spin_lock(&d->opd_syn_lock);
+ list_add_tail(&jra->jra_inflight_link, &d->opd_syn_inflight_list);
+ spin_unlock(&d->opd_syn_lock);
+
+ ptlrpcd_add_req(req);
}
+
+/**
+ * Allocate and prepare RPC for a new change.
+ *
+ * The function allocates and initializes an RPC which will be sent soon to
+ * apply the change to the target OST. The request is initialized from the
+ * llog record passed. Notice only the fields common to all type of changes
+ * are initialized.
+ *
+ * \param[in] d OSP device
+ * \param[in] llh llog handle where the record is stored
+ * \param[in] h llog record
+ * \param[in] op type of the change
+ * \param[in] format request format to be used
+ *
+ * \retval pointer new request on success
+ * \retval ERR_PTR(errno) on error
+ */
static struct ptlrpc_request *osp_sync_new_job(struct osp_device *d,
struct llog_handle *llh,
struct llog_rec_hdr *h,
/* Prepare the request */
imp = d->opd_obd->u.cli.cl_import;
LASSERT(imp);
+
+ if (OBD_FAIL_CHECK(OBD_FAIL_OSP_CHECK_ENOMEM))
+ RETURN(ERR_PTR(-ENOMEM));
+
req = ptlrpc_request_alloc(imp, format);
if (req == NULL)
RETURN(ERR_PTR(-ENOMEM));
body->oa.o_lcookie.lgc_lgl = llh->lgh_id;
body->oa.o_lcookie.lgc_subsys = LLOG_MDS_OST_ORIG_CTXT;
body->oa.o_lcookie.lgc_index = h->lrh_index;
- CFS_INIT_LIST_HEAD(&req->rq_exp_list);
- req->rq_svc_thread = (void *) OSP_JOB_MAGIC;
req->rq_interpret_reply = osp_sync_interpret;
req->rq_commit_cb = osp_sync_request_commit_cb;
return req;
}
+/**
+ * Generate a request for setattr change.
+ *
+ * The function prepares a new RPC, initializes it with setattr specific
+ * bits and send the RPC.
+ *
+ * \param[in] d OSP device
+ * \param[in] llh llog handle where the record is stored
+ * \param[in] h llog record
+ *
+ * \retval 0 on success
+ * \retval 1 on invalid record
+ * \retval negative negated errno on error
+ */
static int osp_sync_new_setattr_job(struct osp_device *d,
struct llog_handle *llh,
struct llog_rec_hdr *h)
ENTRY;
LASSERT(h->lrh_type == MDS_SETATTR64_REC);
+ if (OBD_FAIL_CHECK(OBD_FAIL_OSP_CHECK_INVALID_REC))
+ RETURN(1);
+ /* lsr_valid can only be 0 or have OBD_MD_{FLUID,FLGID} set,
+ * so no bits other than these should be set. */
+ if ((rec->lsr_valid & ~(OBD_MD_FLUID | OBD_MD_FLGID)) != 0) {
+ CERROR("%s: invalid setattr record, lsr_valid:"LPU64"\n",
+ d->opd_obd->obd_name, rec->lsr_valid);
+ /* return 1 on invalid record */
+ RETURN(1);
+ }
+
req = osp_sync_new_job(d, llh, h, OST_SETATTR, &RQF_OST_SETATTR);
if (IS_ERR(req))
RETURN(PTR_ERR(req));
body->oa.o_oi = rec->lsr_oi;
body->oa.o_uid = rec->lsr_uid;
body->oa.o_gid = rec->lsr_gid;
- body->oa.o_valid = OBD_MD_FLGROUP | OBD_MD_FLID |
- OBD_MD_FLUID | OBD_MD_FLGID;
+ body->oa.o_valid = OBD_MD_FLGROUP | OBD_MD_FLID;
+ /* old setattr record (prior 2.6.0) doesn't have 'valid' stored,
+ * we assume that both UID and GID are valid in that case. */
+ if (rec->lsr_valid == 0)
+ body->oa.o_valid |= (OBD_MD_FLUID | OBD_MD_FLGID);
+ else
+ body->oa.o_valid |= rec->lsr_valid;
osp_sync_send_new_rpc(d, req);
RETURN(0);
}
-/* Old records may be in old format, so we handle that too */
+/**
+ * Generate a request for unlink change.
+ *
+ * The function prepares a new RPC, initializes it with unlink(destroy)
+ * specific bits and sends the RPC. The function is used to handle
+ * llog_unlink_rec which were used in the older versions of Lustre.
+ * Current version uses llog_unlink_rec64.
+ *
+ * \param[in] d OSP device
+ * \param[in] llh llog handle where the record is stored
+ * \param[in] h llog record
+ *
+ * \retval 0 on success
+ * \retval negative negated errno on error
+ */
static int osp_sync_new_unlink_job(struct osp_device *d,
struct llog_handle *llh,
struct llog_rec_hdr *h)
RETURN(0);
}
+/**
+ * Generate a request for unlink change.
+ *
+ * The function prepares a new RPC, initializes it with unlink(destroy)
+ * specific bits and sends the RPC. Depending on the target (MDT or OST)
+ * two different protocols are used. For MDT we use OUT (basically OSD API
+ * updates transferred via a network). For OST we still use the old
+ * protocol (OBD?), originally for compatibility. Later we can start to
+ * use OUT for OST as well, this will allow batching and better code
+ * unification.
+ *
+ * \param[in] d OSP device
+ * \param[in] llh llog handle where the record is stored
+ * \param[in] h llog record
+ *
+ * \retval 0 on success
+ * \retval negative negated errno on error
+ */
static int osp_sync_new_unlink64_job(struct osp_device *d,
struct llog_handle *llh,
struct llog_rec_hdr *h)
{
struct llog_unlink64_rec *rec = (struct llog_unlink64_rec *)h;
- struct ptlrpc_request *req;
+ struct ptlrpc_request *req = NULL;
struct ost_body *body;
int rc;
ENTRY;
LASSERT(h->lrh_type == MDS_UNLINK64_REC);
-
- req = osp_sync_new_job(d, llh, h, OST_DESTROY, &RQF_OST_DESTROY);
+ req = osp_sync_new_job(d, llh, h, OST_DESTROY,
+ &RQF_OST_DESTROY);
if (IS_ERR(req))
RETURN(PTR_ERR(req));
if (rc < 0)
RETURN(rc);
body->oa.o_misc = rec->lur_count;
- body->oa.o_valid = OBD_MD_FLGROUP | OBD_MD_FLID | OBD_MD_FLOBJCOUNT;
-
+ body->oa.o_valid = OBD_MD_FLGROUP | OBD_MD_FLID |
+ OBD_MD_FLOBJCOUNT;
osp_sync_send_new_rpc(d, req);
RETURN(0);
}
-static int osp_sync_process_record(const struct lu_env *env,
- struct osp_device *d,
- struct llog_handle *llh,
- struct llog_rec_hdr *rec)
+/**
+ * Process llog records.
+ *
+ * This function is called to process the llog records committed locally.
+ * In the recovery model used by OSP we can apply a change to a remote
+ * target once corresponding transaction (like posix unlink) is committed
+ * locally so can't revert.
+ * Depending on the llog record type, a given handler is called that is
+ * responsible for preparing and sending the RPC to apply the change.
+ * Special record type LLOG_GEN_REC marking a reboot is cancelled right away.
+ *
+ * \param[in] env LU environment provided by the caller
+ * \param[in] d OSP device
+ * \param[in] llh llog handle where the record is stored
+ * \param[in] rec llog record
+ */
+static void osp_sync_process_record(const struct lu_env *env,
+ struct osp_device *d,
+ struct llog_handle *llh,
+ struct llog_rec_hdr *rec)
{
+ struct llog_handle *cathandle = llh->u.phd.phd_cat_handle;
struct llog_cookie cookie;
int rc = 0;
+ ENTRY;
+
cookie.lgc_lgl = llh->lgh_id;
cookie.lgc_subsys = LLOG_MDS_OST_ORIG_CTXT;
cookie.lgc_index = rec->lrh_index;
}
/* cancel any generation record */
- rc = llog_cat_cancel_records(env, llh->u.phd.phd_cat_handle,
- 1, &cookie);
+ rc = llog_cat_cancel_records(env, cathandle, 1, &cookie);
- return rc;
+ RETURN_EXIT;
}
/*
/* notice we increment counters before sending RPC, to be consistent
* in RPC interpret callback which may happen very quickly */
- spin_lock(&d->opd_syn_lock);
- d->opd_syn_rpc_in_flight++;
- d->opd_syn_rpc_in_progress++;
- spin_unlock(&d->opd_syn_lock);
+ atomic_inc(&d->opd_syn_rpc_in_flight);
+ atomic_inc(&d->opd_syn_rpc_in_progress);
switch (rec->lrh_type) {
/* case MDS_UNLINK_REC is kept for compatibility */
rc = osp_sync_new_setattr_job(d, llh, rec);
break;
default:
- CERROR("unknown record type: %x\n", rec->lrh_type);
- rc = -EINVAL;
- break;
+ CERROR("%s: unknown record type: %x\n", d->opd_obd->obd_name,
+ rec->lrh_type);
+ /* treat "unknown record type" as "invalid" */
+ rc = 1;
+ break;
}
- if (likely(rc == 0)) {
- spin_lock(&d->opd_syn_lock);
- if (d->opd_syn_prev_done) {
- LASSERT(d->opd_syn_changes > 0);
- LASSERT(rec->lrh_id <= d->opd_syn_last_committed_id);
- /*
- * NOTE: it's possible to meet same id if
- * OST stores few stripes of same file
- */
- if (rec->lrh_id > d->opd_syn_last_processed_id)
- d->opd_syn_last_processed_id = rec->lrh_id;
-
- d->opd_syn_changes--;
+ /* For all kinds of records, not matter successful or not,
+ * we should decrease changes and bump last_processed_id.
+ */
+ if (d->opd_syn_prev_done) {
+ __u64 correct_id = osp_sync_correct_id(d, rec);
+ LASSERT(atomic_read(&d->opd_syn_changes) > 0);
+ LASSERT(correct_id <= d->opd_syn_last_committed_id);
+ /* NOTE: it's possible to meet same id if
+ * OST stores few stripes of same file
+ */
+ while (1) {
+ /* another thread may be trying to set new value */
+ rmb();
+ if (correct_id > d->opd_syn_last_processed_id) {
+ d->opd_syn_last_processed_id = correct_id;
+ wake_up(&d->opd_syn_barrier_waitq);
+ } else
+ break;
}
- CDEBUG(D_OTHER, "%s: %d in flight, %d in progress\n",
- d->opd_obd->obd_name, d->opd_syn_rpc_in_flight,
- d->opd_syn_rpc_in_progress);
- spin_unlock(&d->opd_syn_lock);
- } else {
- spin_lock(&d->opd_syn_lock);
- d->opd_syn_rpc_in_flight--;
- d->opd_syn_rpc_in_progress--;
- spin_unlock(&d->opd_syn_lock);
+ atomic_dec(&d->opd_syn_changes);
+ }
+ if (rc != 0) {
+ atomic_dec(&d->opd_syn_rpc_in_flight);
+ atomic_dec(&d->opd_syn_rpc_in_progress);
}
- CDEBUG(D_HA, "found record %x, %d, idx %u, id %u: %d\n",
- rec->lrh_type, rec->lrh_len, rec->lrh_index, rec->lrh_id, rc);
- return rc;
+ CDEBUG(D_OTHER, "%s: %d in flight, %d in progress\n",
+ d->opd_obd->obd_name, atomic_read(&d->opd_syn_rpc_in_flight),
+ atomic_read(&d->opd_syn_rpc_in_progress));
+
+ /* Delete the invalid record */
+ if (rc == 1) {
+ rc = llog_cat_cancel_records(env, cathandle, 1, &cookie);
+ if (rc != 0)
+ CERROR("%s: can't delete invalid record: "
+ "fid = "DFID", rec_id = %u, rc = %d\n",
+ d->opd_obd->obd_name,
+ PFID(lu_object_fid(&cathandle->lgh_obj->do_lu)),
+ rec->lrh_id, rc);
+ }
+
+ CDEBUG(D_HA, "found record %x, %d, idx %u, id %u\n",
+ rec->lrh_type, rec->lrh_len, rec->lrh_index, rec->lrh_id);
+
+ RETURN_EXIT;
}
+/**
+ * Cancel llog records for the committed changes.
+ *
+ * The function walks through the list of the committed RPCs and cancels
+ * corresponding llog records. see osp_sync_request_commit_cb() for the
+ * details.
+ *
+ * \param[in] env LU environment provided by the caller
+ * \param[in] d OSP device
+ */
static void osp_sync_process_committed(const struct lu_env *env,
struct osp_device *d)
{
struct obd_device *obd = d->opd_obd;
struct obd_import *imp = obd->u.cli.cl_import;
struct ost_body *body;
- struct ptlrpc_request *req, *tmp;
+ struct ptlrpc_request *req;
struct llog_ctxt *ctxt;
struct llog_handle *llh;
- cfs_list_t list;
+ struct list_head list;
int rc, done = 0;
ENTRY;
- if (cfs_list_empty(&d->opd_syn_committed_there))
+ if (list_empty(&d->opd_syn_committed_there))
return;
/*
* notice: we do this upon commit as well because some backends
* (like DMU) do not release space right away.
*/
- if (unlikely(d->opd_pre_status == -ENOSPC))
+ if (d->opd_pre != NULL && unlikely(d->opd_pre_status == -ENOSPC))
osp_statfs_need_now(d);
/*
llh = ctxt->loc_handle;
LASSERT(llh);
- CFS_INIT_LIST_HEAD(&list);
+ INIT_LIST_HEAD(&list);
spin_lock(&d->opd_syn_lock);
- cfs_list_splice(&d->opd_syn_committed_there, &list);
- CFS_INIT_LIST_HEAD(&d->opd_syn_committed_there);
+ list_splice(&d->opd_syn_committed_there, &list);
+ INIT_LIST_HEAD(&d->opd_syn_committed_there);
spin_unlock(&d->opd_syn_lock);
- cfs_list_for_each_entry_safe(req, tmp, &list, rq_exp_list) {
- LASSERT(req->rq_svc_thread == (void *) OSP_JOB_MAGIC);
- cfs_list_del_init(&req->rq_exp_list);
+ while (!list_empty(&list)) {
+ struct osp_job_req_args *jra;
- body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
- LASSERT(body);
+ jra = list_entry(list.next, struct osp_job_req_args,
+ jra_committed_link);
+ LASSERT(jra->jra_magic == OSP_JOB_MAGIC);
+ list_del_init(&jra->jra_committed_link);
+ req = container_of((void *)jra, struct ptlrpc_request,
+ rq_async_args);
+ body = req_capsule_client_get(&req->rq_pill,
+ &RMF_OST_BODY);
+ LASSERT(body);
/* import can be closing, thus all commit cb's are
* called we can check committness directly */
- if (req->rq_transno <= imp->imp_peer_committed_transno) {
+ if (req->rq_import_generation == imp->imp_generation) {
rc = llog_cat_cancel_records(env, llh, 1,
&body->oa.o_lcookie);
if (rc)
CERROR("%s: can't cancel record: %d\n",
obd->obd_name, rc);
} else {
- DEBUG_REQ(D_HA, req, "not committed");
+ DEBUG_REQ(D_OTHER, req, "imp_committed = "LPU64,
+ imp->imp_peer_committed_transno);
}
-
ptlrpc_req_finished(req);
done++;
}
llog_ctxt_put(ctxt);
- LASSERT(d->opd_syn_rpc_in_progress >= done);
- spin_lock(&d->opd_syn_lock);
- d->opd_syn_rpc_in_progress -= done;
- spin_unlock(&d->opd_syn_lock);
+ LASSERT(atomic_read(&d->opd_syn_rpc_in_progress) >= done);
+ atomic_sub(done, &d->opd_syn_rpc_in_progress);
CDEBUG(D_OTHER, "%s: %d in flight, %d in progress\n",
- d->opd_obd->obd_name, d->opd_syn_rpc_in_flight,
- d->opd_syn_rpc_in_progress);
+ d->opd_obd->obd_name, atomic_read(&d->opd_syn_rpc_in_flight),
+ atomic_read(&d->opd_syn_rpc_in_progress));
osp_sync_check_for_work(d);
/* wake up the thread if requested to stop:
* it might be waiting for in-progress to complete */
if (unlikely(osp_sync_running(d) == 0))
- cfs_waitq_signal(&d->opd_syn_waitq);
+ wake_up(&d->opd_syn_waitq);
EXIT;
}
-/*
- * this is where most of queues processing happens
+/**
+ * The core of the syncing mechanism.
+ *
+ * This is a callback called by the llog processing function. Essentially it
+ * suspends llog processing until there is a record to process (it's supposed
+ * to be committed locally). The function handles RPCs committed by the target
+ * and cancels corresponding llog records.
+ *
+ * \param[in] env LU environment provided by the caller
+ * \param[in] llh llog handle we're processing
+ * \param[in] rec current llog record
+ * \param[in] data callback data containing a pointer to the device
+ *
+ * \retval 0 to ask the caller (llog_process()) to continue
+ * \retval LLOG_PROC_BREAK to ask the caller to break
*/
static int osp_sync_process_queues(const struct lu_env *env,
struct llog_handle *llh,
void *data)
{
struct osp_device *d = data;
- int rc;
do {
struct l_wait_info lwi = { 0 };
if (osp_sync_can_process_new(d, rec)) {
if (llh == NULL) {
/* ask llog for another record */
- CDEBUG(D_HA, "%lu changes, %u in progress, %u in flight\n",
- d->opd_syn_changes,
- d->opd_syn_rpc_in_progress,
- d->opd_syn_rpc_in_flight);
+ CDEBUG(D_HA, "%u changes, %u in progress,"
+ " %u in flight\n",
+ atomic_read(&d->opd_syn_changes),
+ atomic_read(&d->opd_syn_rpc_in_progress),
+ atomic_read(&d->opd_syn_rpc_in_flight));
return 0;
}
-
- /*
- * try to send, in case of disconnection, suspend
- * processing till we can send this request
- */
- do {
- rc = osp_sync_process_record(env, d, llh, rec);
- /*
- * XXX: probably different handling is needed
- * for some bugs, like immediate exit or if
- * OSP gets inactive
- */
- if (rc) {
- CERROR("can't send: %d\n", rc);
- l_wait_event(d->opd_syn_waitq,
- !osp_sync_running(d) ||
- osp_sync_has_work(d),
- &lwi);
- }
- } while (rc != 0 && osp_sync_running(d));
-
+ osp_sync_process_record(env, d, llh, rec);
llh = NULL;
rec = NULL;
}
l_wait_event(d->opd_syn_waitq,
!osp_sync_running(d) ||
osp_sync_can_process_new(d, rec) ||
- !cfs_list_empty(&d->opd_syn_committed_there),
+ !list_empty(&d->opd_syn_committed_there),
&lwi);
} while (1);
}
-/*
- * this thread runs llog_cat_process() scanner calling our callback
+/**
+ * OSP sync thread.
+ *
+ * This thread runs llog_cat_process() scanner calling our callback
* to process llog records. in the callback we implement tricky
* state machine as we don't want to start scanning of the llog again
* and again, also we don't want to process too many records and send
* being synced to OST) the callback can suspend awaiting for some
* new conditions, like syncs completed.
*
- * in order to process llog records left by previous boots and to allow
+ * In order to process llog records left by previous boots and to allow
* llog_process_thread() to find something (otherwise it'd just exit
* immediately) we add a special GENERATATION record on each boot.
+ *
+ * \param[in] _arg a pointer to thread's arguments
+ *
+ * \retval 0 on success
+ * \retval negative negated errno on error
*/
static int osp_sync_thread(void *_arg)
{
spin_lock(&d->opd_syn_lock);
thread->t_flags = SVC_RUNNING;
spin_unlock(&d->opd_syn_lock);
- cfs_waitq_signal(&thread->t_ctl_waitq);
+ wake_up(&thread->t_ctl_waitq);
ctxt = llog_get_context(obd, LLOG_MDS_OST_ORIG_CTXT);
if (ctxt == NULL) {
rc = llog_cat_process(&env, llh, osp_sync_process_queues, d, 0, 0);
LASSERTF(rc == 0 || rc == LLOG_PROC_BREAK,
- "%lu changes, %u in progress, %u in flight: %d\n",
- d->opd_syn_changes, d->opd_syn_rpc_in_progress,
- d->opd_syn_rpc_in_flight, rc);
+ "%u changes, %u in progress, %u in flight: %d\n",
+ atomic_read(&d->opd_syn_changes),
+ atomic_read(&d->opd_syn_rpc_in_progress),
+ atomic_read(&d->opd_syn_rpc_in_flight), rc);
/* we don't expect llog_process_thread() to exit till umount */
LASSERTF(thread->t_flags != SVC_RUNNING,
- "%lu changes, %u in progress, %u in flight\n",
- d->opd_syn_changes, d->opd_syn_rpc_in_progress,
- d->opd_syn_rpc_in_flight);
+ "%u changes, %u in progress, %u in flight\n",
+ atomic_read(&d->opd_syn_changes),
+ atomic_read(&d->opd_syn_rpc_in_progress),
+ atomic_read(&d->opd_syn_rpc_in_flight));
/* wait till all the requests are completed */
count = 0;
- while (d->opd_syn_rpc_in_progress > 0) {
+ while (atomic_read(&d->opd_syn_rpc_in_progress) > 0) {
osp_sync_process_committed(&env, d);
lwi = LWI_TIMEOUT(cfs_time_seconds(5), NULL, NULL);
rc = l_wait_event(d->opd_syn_waitq,
- d->opd_syn_rpc_in_progress == 0,
+ atomic_read(&d->opd_syn_rpc_in_progress) == 0,
&lwi);
if (rc == -ETIMEDOUT)
count++;
LASSERTF(count < 10, "%s: %d %d %sempty\n",
- d->opd_obd->obd_name, d->opd_syn_rpc_in_progress,
- d->opd_syn_rpc_in_flight,
- cfs_list_empty(&d->opd_syn_committed_there) ? "" :"!");
+ d->opd_obd->obd_name,
+ atomic_read(&d->opd_syn_rpc_in_progress),
+ atomic_read(&d->opd_syn_rpc_in_flight),
+ list_empty(&d->opd_syn_committed_there) ? "" : "!");
}
if (rc)
CERROR("can't cleanup llog: %d\n", rc);
out:
- LASSERTF(d->opd_syn_rpc_in_progress == 0,
+ LASSERTF(atomic_read(&d->opd_syn_rpc_in_progress) == 0,
"%s: %d %d %sempty\n",
- d->opd_obd->obd_name, d->opd_syn_rpc_in_progress,
- d->opd_syn_rpc_in_flight,
- cfs_list_empty(&d->opd_syn_committed_there) ? "" : "!");
+ d->opd_obd->obd_name, atomic_read(&d->opd_syn_rpc_in_progress),
+ atomic_read(&d->opd_syn_rpc_in_flight),
+ list_empty(&d->opd_syn_committed_there) ? "" : "!");
thread->t_flags = SVC_STOPPED;
- cfs_waitq_signal(&thread->t_ctl_waitq);
+ wake_up(&thread->t_ctl_waitq);
lu_env_fini(&env);
RETURN(0);
}
+/**
+ * Initialize llog.
+ *
+ * Initializes the llog. Specific llog to be used depends on the type of the
+ * target OSP represents (OST or MDT). The function adds appends a new llog
+ * record to mark the place where the records associated with this boot
+ * start.
+ *
+ * \param[in] env LU environment provided by the caller
+ * \param[in] d OSP device
+ *
+ * \retval 0 on success
+ * \retval negative negated errno on error
+ */
static int osp_sync_llog_init(const struct lu_env *env, struct osp_device *d)
{
- struct osp_thread_info *osi = osp_env_info(env);
- struct llog_handle *lgh = NULL;
- struct obd_device *obd = d->opd_obd;
- struct llog_ctxt *ctxt;
- int rc;
+ struct osp_thread_info *osi = osp_env_info(env);
+ struct lu_fid *fid = &osi->osi_fid;
+ struct llog_handle *lgh = NULL;
+ struct obd_device *obd = d->opd_obd;
+ struct llog_ctxt *ctxt;
+ int rc;
ENTRY;
OBD_SET_CTXT_MAGIC(&obd->obd_lvfs_ctxt);
obd->obd_lvfs_ctxt.dt = d->opd_storage;
+ lu_local_obj_fid(fid, LLOG_CATALOGS_OID);
+
rc = llog_osd_get_cat_list(env, d->opd_storage, d->opd_index, 1,
- &osi->osi_cid);
- if (rc) {
- CERROR("%s: can't get id from catalogs: rc = %d\n",
- obd->obd_name, rc);
- RETURN(rc);
+ &osi->osi_cid, fid);
+ if (rc < 0) {
+ if (rc != -EFAULT) {
+ CERROR("%s: can't get id from catalogs: rc = %d\n",
+ obd->obd_name, rc);
+ RETURN(rc);
+ }
+
+ /* After sparse OST indices is supported, the CATALOG file
+ * may become a sparse file that results in failure on
+ * reading. Skip this error as the llog will be created
+ * later */
+ memset(&osi->osi_cid, 0, sizeof(osi->osi_cid));
+ rc = 0;
}
CDEBUG(D_INFO, "%s: Init llog for %d - catid "DOSTID":%x\n",
POSTID(&osi->osi_cid.lci_logid.lgl_oi),
osi->osi_cid.lci_logid.lgl_ogen);
- rc = llog_setup(env, obd, &obd->obd_olg, LLOG_MDS_OST_ORIG_CTXT, obd,
+ rc = llog_setup(env, obd, &obd->obd_olg, LLOG_MDS_OST_ORIG_CTXT,
+ d->opd_storage->dd_lu_dev.ld_obd,
&osp_mds_ost_orig_logops);
if (rc)
RETURN(rc);
LASSERT(lgh != NULL);
ctxt->loc_handle = lgh;
- rc = llog_cat_init_and_process(env, lgh);
+ rc = llog_init_handle(env, lgh, LLOG_F_IS_CAT, NULL);
if (rc)
GOTO(out_close, rc);
rc = llog_osd_put_cat_list(env, d->opd_storage, d->opd_index, 1,
- &osi->osi_cid);
+ &osi->osi_cid, fid);
if (rc)
GOTO(out_close, rc);
memcpy(&osi->osi_gen.lgr_gen, &d->opd_syn_generation,
sizeof(osi->osi_gen.lgr_gen));
- rc = llog_cat_add(env, lgh, &osi->osi_gen.lgr_hdr, &osi->osi_cookie,
- NULL);
+ rc = llog_cat_add(env, lgh, &osi->osi_gen.lgr_hdr, &osi->osi_cookie);
if (rc < 0)
GOTO(out_close, rc);
llog_ctxt_put(ctxt);
RETURN(rc);
}
+/**
+ * Cleanup llog used for syncing.
+ *
+ * Closes and cleanups the llog. The function is called when the device is
+ * shutting down.
+ *
+ * \param[in] env LU environment provided by the caller
+ * \param[in] d OSP device
+ */
static void osp_sync_llog_fini(const struct lu_env *env, struct osp_device *d)
{
struct llog_ctxt *ctxt;
llog_cleanup(env, ctxt);
}
-/*
- * initializes sync component of OSP
+/**
+ * Initialization of the sync component of OSP.
+ *
+ * Initializes the llog and starts a new thread to handle the changes to
+ * the remote target (OST or MDT).
+ *
+ * \param[in] env LU environment provided by the caller
+ * \param[in] d OSP device
+ *
+ * \retval 0 on success
+ * \retval negative negated errno on error
*/
int osp_sync_init(const struct lu_env *env, struct osp_device *d)
{
struct l_wait_info lwi = { 0 };
+ struct task_struct *task;
int rc;
ENTRY;
d->opd_syn_max_rpc_in_flight = OSP_MAX_IN_FLIGHT;
d->opd_syn_max_rpc_in_progress = OSP_MAX_IN_PROGRESS;
spin_lock_init(&d->opd_syn_lock);
- cfs_waitq_init(&d->opd_syn_waitq);
- cfs_waitq_init(&d->opd_syn_thread.t_ctl_waitq);
- CFS_INIT_LIST_HEAD(&d->opd_syn_committed_there);
-
- rc = PTR_ERR(kthread_run(osp_sync_thread, d,
- "osp-syn-%u", d->opd_index));
- if (IS_ERR_VALUE(rc)) {
- CERROR("%s: can't start sync thread: rc = %d\n",
+ init_waitqueue_head(&d->opd_syn_waitq);
+ init_waitqueue_head(&d->opd_syn_barrier_waitq);
+ init_waitqueue_head(&d->opd_syn_thread.t_ctl_waitq);
+ INIT_LIST_HEAD(&d->opd_syn_inflight_list);
+ INIT_LIST_HEAD(&d->opd_syn_committed_there);
+
+ task = kthread_run(osp_sync_thread, d, "osp-syn-%u-%u",
+ d->opd_index, d->opd_group);
+ if (IS_ERR(task)) {
+ rc = PTR_ERR(task);
+ CERROR("%s: cannot start sync thread: rc = %d\n",
d->opd_obd->obd_name, rc);
GOTO(err_llog, rc);
}
return rc;
}
+/**
+ * Stop the syncing thread.
+ *
+ * Asks the syncing thread to stop and wait until it's stopped.
+ *
+ * \param[in] d OSP device
+ *
+ * \retval 0
+ */
int osp_sync_fini(struct osp_device *d)
{
struct ptlrpc_thread *thread = &d->opd_syn_thread;
ENTRY;
thread->t_flags = SVC_STOPPING;
- cfs_waitq_signal(&d->opd_syn_waitq);
- cfs_wait_event(thread->t_ctl_waitq, thread->t_flags & SVC_STOPPED);
+ wake_up(&d->opd_syn_waitq);
+ wait_event(thread->t_ctl_waitq, thread->t_flags & SVC_STOPPED);
/*
* unregister transaction callbacks only when sync thread
}
static DEFINE_MUTEX(osp_id_tracker_sem);
-static CFS_LIST_HEAD(osp_id_tracker_list);
+static struct list_head osp_id_tracker_list =
+ LIST_HEAD_INIT(osp_id_tracker_list);
+/**
+ * OSD commit callback.
+ *
+ * The function is used as a local OSD commit callback to track the highest
+ * committed llog record id. see osp_sync_id_traction_init() for the details.
+ *
+ * \param[in] th local transaction handle committed
+ * \param[in] cookie commit callback data (our private structure)
+ */
static void osp_sync_tracker_commit_cb(struct thandle *th, void *cookie)
{
struct osp_id_tracker *tr = cookie;
spin_lock(&tr->otr_lock);
if (likely(txn->oti_current_id > tr->otr_committed_id)) {
- CDEBUG(D_OTHER, "committed: %u -> %u\n",
+ CDEBUG(D_OTHER, "committed: "LPU64" -> "LPU64"\n",
tr->otr_committed_id, txn->oti_current_id);
tr->otr_committed_id = txn->oti_current_id;
- cfs_list_for_each_entry(d, &tr->otr_wakeup_list,
- opd_syn_ontrack) {
+ list_for_each_entry(d, &tr->otr_wakeup_list,
+ opd_syn_ontrack) {
d->opd_syn_last_committed_id = tr->otr_committed_id;
- cfs_waitq_signal(&d->opd_syn_waitq);
+ wake_up(&d->opd_syn_waitq);
}
}
spin_unlock(&tr->otr_lock);
}
+/**
+ * Initialize commit tracking mechanism.
+ *
+ * Some setups may have thousands of OSTs and each will be represented by OSP.
+ * Meaning order of magnitute many more changes to apply every second. In order
+ * to keep the number of commit callbacks low this mechanism was introduced.
+ * The mechanism is very similar to transno used by MDT service: it's an single
+ * ID stream which can be assigned by any OSP to its llog records. The tricky
+ * part is that ID is stored in per-transaction data and re-used by all the OSPs
+ * involved in that transaction. Then all these OSPs are woken up utilizing a single OSD commit callback.
+ *
+ * The function initializes the data used by the tracker described above.
+ * A singler tracker per OSD device is created.
+ *
+ * \param[in] d OSP device
+ *
+ * \retval 0 on success
+ * \retval negative negated errno on error
+ */
static int osp_sync_id_traction_init(struct osp_device *d)
{
struct osp_id_tracker *tr, *found = NULL;
LASSERT(d);
LASSERT(d->opd_storage);
LASSERT(d->opd_syn_tracker == NULL);
- CFS_INIT_LIST_HEAD(&d->opd_syn_ontrack);
+ INIT_LIST_HEAD(&d->opd_syn_ontrack);
mutex_lock(&osp_id_tracker_sem);
- cfs_list_for_each_entry(tr, &osp_id_tracker_list, otr_list) {
+ list_for_each_entry(tr, &osp_id_tracker_list, otr_list) {
if (tr->otr_dev == d->opd_storage) {
- LASSERT(cfs_atomic_read(&tr->otr_refcount));
- cfs_atomic_inc(&tr->otr_refcount);
+ LASSERT(atomic_read(&tr->otr_refcount));
+ atomic_inc(&tr->otr_refcount);
d->opd_syn_tracker = tr;
found = tr;
break;
tr->otr_dev = d->opd_storage;
tr->otr_next_id = 1;
tr->otr_committed_id = 0;
- cfs_atomic_set(&tr->otr_refcount, 1);
- CFS_INIT_LIST_HEAD(&tr->otr_wakeup_list);
- cfs_list_add(&tr->otr_list, &osp_id_tracker_list);
+ atomic_set(&tr->otr_refcount, 1);
+ INIT_LIST_HEAD(&tr->otr_wakeup_list);
+ list_add(&tr->otr_list, &osp_id_tracker_list);
tr->otr_tx_cb.dtc_txn_commit =
osp_sync_tracker_commit_cb;
tr->otr_tx_cb.dtc_cookie = tr;
return rc;
}
+/**
+ * Release commit tracker.
+ *
+ * Decrease a refcounter on the tracker used by the given OSP device \a d.
+ * If no more users left, then the tracker is released.
+ *
+ * \param[in] d OSP device
+ */
static void osp_sync_id_traction_fini(struct osp_device *d)
{
struct osp_id_tracker *tr;
osp_sync_remove_from_tracker(d);
mutex_lock(&osp_id_tracker_sem);
- if (cfs_atomic_dec_and_test(&tr->otr_refcount)) {
+ if (atomic_dec_and_test(&tr->otr_refcount)) {
dt_txn_callback_del(d->opd_storage, &tr->otr_tx_cb);
- LASSERT(cfs_list_empty(&tr->otr_wakeup_list));
- cfs_list_del(&tr->otr_list);
+ LASSERT(list_empty(&tr->otr_wakeup_list));
+ list_del(&tr->otr_list);
OBD_FREE_PTR(tr);
d->opd_syn_tracker = NULL;
}
EXIT;
}
-/*
- * generates id for the tracker
+/**
+ * Generate a new ID on a tracker.
+ *
+ * Generates a new ID using the tracker associated with the given OSP device
+ * \a d, if the given ID \a id is non-zero. Unconditially adds OSP device to
+ * the wakeup list, so OSP won't miss when a transaction using the ID is
+ * committed.
+ *
+ * \param[in] d OSP device
+ * \param[in] id 0 or ID generated previously
+ *
+ * \retval ID the caller should use
*/
-static __u32 osp_sync_id_get(struct osp_device *d, __u32 id)
+static __u64 osp_sync_id_get(struct osp_device *d, __u64 id)
{
struct osp_id_tracker *tr;
/* XXX: we can improve this introducing per-cpu preallocated ids? */
spin_lock(&tr->otr_lock);
+ if (OBD_FAIL_CHECK(OBD_FAIL_MDS_TRACK_OVERFLOW))
+ tr->otr_next_id = 0xfffffff0;
+
if (unlikely(tr->otr_next_id <= d->opd_syn_last_used_id)) {
spin_unlock(&tr->otr_lock);
- CERROR("%s: next %u, last synced %lu\n",
+ CERROR("%s: next "LPU64", last synced "LPU64"\n",
d->opd_obd->obd_name, tr->otr_next_id,
d->opd_syn_last_used_id);
LBUG();
id = tr->otr_next_id++;
if (id > d->opd_syn_last_used_id)
d->opd_syn_last_used_id = id;
- if (cfs_list_empty(&d->opd_syn_ontrack))
- cfs_list_add(&d->opd_syn_ontrack, &tr->otr_wakeup_list);
+ if (list_empty(&d->opd_syn_ontrack))
+ list_add(&d->opd_syn_ontrack, &tr->otr_wakeup_list);
spin_unlock(&tr->otr_lock);
- CDEBUG(D_OTHER, "new id %u\n", (unsigned) id);
+ CDEBUG(D_OTHER, "new id "LPU64"\n", id);
return id;
}
+/**
+ * Stop to propagate commit status to OSP.
+ *
+ * If the OSP does not have any llog records she's waiting to commit, then
+ * it is possible to unsubscribe from wakeups from the tracking using this
+ * method.
+ *
+ * \param[in] d OSP device not willing to wakeup
+ */
static void osp_sync_remove_from_tracker(struct osp_device *d)
{
struct osp_id_tracker *tr;
tr = d->opd_syn_tracker;
LASSERT(tr);
- if (cfs_list_empty(&d->opd_syn_ontrack))
+ if (list_empty(&d->opd_syn_ontrack))
return;
spin_lock(&tr->otr_lock);
- cfs_list_del_init(&d->opd_syn_ontrack);
+ list_del_init(&d->opd_syn_ontrack);
spin_unlock(&tr->otr_lock);
}