* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2012, Intel, Inc.
+ * Copyright (c) 2012, 2013, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
* Author: Mikhail Pershin <mike.pershin@intel.com>
*/
-#ifndef EXPORT_SYMTAB
-# define EXPORT_SYMTAB
-#endif
#define DEBUG_SUBSYSTEM S_MDS
#include <lustre_log.h>
return 1;
/* has remotely committed? */
- if (!cfs_list_empty(&d->opd_syn_committed_there))
+ if (!list_empty(&d->opd_syn_committed_there))
return 1;
return 0;
#define osp_sync_check_for_work(d) \
{ \
if (osp_sync_has_work(d)) { \
- cfs_waitq_signal(&d->opd_syn_waitq); \
+ wake_up(&d->opd_syn_waitq); \
} \
}
{
LASSERT(d);
+ if (unlikely(atomic_read(&d->opd_syn_barrier) > 0))
+ return 0;
if (!osp_sync_low_in_progress(d))
return 0;
if (!osp_sync_low_in_flight(d))
osi->osi_unlink.lur_count = count;
break;
case MDS_SETATTR64_REC:
- rc = fid_ostid_pack(fid, &osi->osi_oi);
+ rc = fid_to_ostid(fid, &osi->osi_oi);
LASSERT(rc == 0);
osi->osi_hdr.lrh_len = sizeof(osi->osi_setattr);
osi->osi_hdr.lrh_type = MDS_SETATTR64_REC;
- osi->osi_setattr.lsr_oid = osi->osi_oi.oi_id;
- osi->osi_setattr.lsr_oseq = osi->osi_oi.oi_seq;
+ osi->osi_setattr.lsr_oi = osi->osi_oi;
LASSERT(attr);
osi->osi_setattr.lsr_uid = attr->la_uid;
osi->osi_setattr.lsr_gid = attr->la_gid;
+ osi->osi_setattr.lsr_valid = attr->la_valid;
break;
default:
LBUG();
if (ctxt == NULL)
RETURN(-ENOMEM);
rc = llog_add(env, ctxt->loc_handle, &osi->osi_hdr, &osi->osi_cookie,
- NULL, th);
+ th);
llog_ctxt_put(ctxt);
- CDEBUG(D_OTHER, "%s: new record %lu:%lu:%lu/%lu: %d\n",
- d->opd_obd->obd_name,
- (unsigned long) osi->osi_cookie.lgc_lgl.lgl_oid,
- (unsigned long) osi->osi_cookie.lgc_lgl.lgl_oseq,
+ CDEBUG(D_OTHER, "%s: new record "DOSTID":%lu/%lu: %d\n",
+ d->opd_obd->obd_name, POSTID(&osi->osi_cookie.lgc_lgl.lgl_oi),
(unsigned long) osi->osi_cookie.lgc_lgl.lgl_ogen,
(unsigned long) osi->osi_cookie.lgc_index, rc);
rc = 0;
if (likely(rc == 0)) {
- cfs_spin_lock(&d->opd_syn_lock);
+ spin_lock(&d->opd_syn_lock);
d->opd_syn_changes++;
- cfs_spin_unlock(&d->opd_syn_lock);
+ spin_unlock(&d->opd_syn_lock);
}
RETURN(rc);
static void osp_sync_request_commit_cb(struct ptlrpc_request *req)
{
struct osp_device *d = req->rq_cb_data;
- struct obd_import *imp = req->rq_import;
CDEBUG(D_HA, "commit req %p, transno "LPU64"\n", req, req->rq_transno);
if (unlikely(req->rq_transno == 0))
return;
- if (unlikely(req->rq_transno > imp->imp_peer_committed_transno)) {
- /* this request was aborted by the shutdown procedure,
- * not committed by the peer. we should preserve llog
- * record */
- cfs_spin_lock(&d->opd_syn_lock);
- d->opd_syn_rpc_in_progress--;
- cfs_spin_unlock(&d->opd_syn_lock);
- cfs_waitq_signal(&d->opd_syn_waitq);
- return;
- }
+ /* do not do any opd_dyn_rpc_* accounting here
+ * it's done in osp_sync_interpret sooner or later */
- /* XXX: what if request isn't committed for very long? */
LASSERT(d);
LASSERT(req->rq_svc_thread == (void *) OSP_JOB_MAGIC);
- LASSERT(cfs_list_empty(&req->rq_exp_list));
+ LASSERT(list_empty(&req->rq_exp_list));
ptlrpc_request_addref(req);
- cfs_spin_lock(&d->opd_syn_lock);
- cfs_list_add(&req->rq_exp_list, &d->opd_syn_committed_there);
- cfs_spin_unlock(&d->opd_syn_lock);
+ spin_lock(&d->opd_syn_lock);
+ list_add(&req->rq_exp_list, &d->opd_syn_committed_there);
+ spin_unlock(&d->opd_syn_lock);
/* XXX: some batching wouldn't hurt */
- cfs_waitq_signal(&d->opd_syn_waitq);
+ wake_up(&d->opd_syn_waitq);
}
static int osp_sync_interpret(const struct lu_env *env,
{
struct osp_device *d = req->rq_cb_data;
- /* XXX: error handling here */
if (req->rq_svc_thread != (void *) OSP_JOB_MAGIC)
DEBUG_REQ(D_ERROR, req, "bad magic %p\n", req->rq_svc_thread);
LASSERT(req->rq_svc_thread == (void *) OSP_JOB_MAGIC);
LASSERT(d);
CDEBUG(D_HA, "reply req %p/%d, rc %d, transno %u\n", req,
- cfs_atomic_read(&req->rq_refcount),
+ atomic_read(&req->rq_refcount),
rc, (unsigned) req->rq_transno);
LASSERT(rc || req->rq_transno);
* but object doesn't exist anymore - cancell llog record
*/
LASSERT(req->rq_transno == 0);
- LASSERT(cfs_list_empty(&req->rq_exp_list));
+ LASSERT(list_empty(&req->rq_exp_list));
ptlrpc_request_addref(req);
- cfs_spin_lock(&d->opd_syn_lock);
- cfs_list_add(&req->rq_exp_list, &d->opd_syn_committed_there);
- cfs_spin_unlock(&d->opd_syn_lock);
+ spin_lock(&d->opd_syn_lock);
+ list_add(&req->rq_exp_list, &d->opd_syn_committed_there);
+ spin_unlock(&d->opd_syn_lock);
- cfs_waitq_signal(&d->opd_syn_waitq);
+ wake_up(&d->opd_syn_waitq);
} else if (rc) {
struct obd_import *imp = req->rq_import;
/*
"transno "LPU64", rc %d, gen: req %d, imp %d\n",
req->rq_transno, rc, req->rq_import_generation,
imp->imp_generation);
- LASSERT(d->opd_syn_rpc_in_progress > 0);
if (req->rq_transno == 0) {
/* this is the last time we see the request
* if transno is not zero, then commit cb
* will be called at some point */
- cfs_spin_lock(&d->opd_syn_lock);
+ LASSERT(d->opd_syn_rpc_in_progress > 0);
+ spin_lock(&d->opd_syn_lock);
d->opd_syn_rpc_in_progress--;
- cfs_spin_unlock(&d->opd_syn_lock);
+ spin_unlock(&d->opd_syn_lock);
}
- cfs_waitq_signal(&d->opd_syn_waitq);
- } else if (unlikely(d->opd_pre_status == -ENOSPC)) {
+ wake_up(&d->opd_syn_waitq);
+ } else if (d->opd_pre != NULL &&
+ unlikely(d->opd_pre_status == -ENOSPC)) {
/*
* if current status is -ENOSPC (lack of free space on OST)
* then we should poll OST immediately once object destroy
}
LASSERT(d->opd_syn_rpc_in_flight > 0);
- cfs_spin_lock(&d->opd_syn_lock);
+ spin_lock(&d->opd_syn_lock);
d->opd_syn_rpc_in_flight--;
- cfs_spin_unlock(&d->opd_syn_lock);
+ spin_unlock(&d->opd_syn_lock);
+ if (unlikely(atomic_read(&d->opd_syn_barrier) > 0))
+ wake_up(&d->opd_syn_barrier_waitq);
CDEBUG(D_OTHER, "%s: %d in flight, %d in progress\n",
d->opd_obd->obd_name, d->opd_syn_rpc_in_flight,
d->opd_syn_rpc_in_progress);
body->oa.o_lcookie.lgc_lgl = llh->lgh_id;
body->oa.o_lcookie.lgc_subsys = LLOG_MDS_OST_ORIG_CTXT;
body->oa.o_lcookie.lgc_index = h->lrh_index;
- CFS_INIT_LIST_HEAD(&req->rq_exp_list);
+ INIT_LIST_HEAD(&req->rq_exp_list);
req->rq_svc_thread = (void *) OSP_JOB_MAGIC;
req->rq_interpret_reply = osp_sync_interpret;
ENTRY;
LASSERT(h->lrh_type == MDS_SETATTR64_REC);
+ /* lsr_valid can only be 0 or LA_UID/GID set */
+ if (!rec->lsr_valid && !(rec->lsr_valid & ~(LA_UID | LA_GID))) {
+ CERROR("%s: invalid setattr record, lsr_valid:"LPU64"\n",
+ d->opd_obd->obd_name, rec->lsr_valid);
+ RETURN(-EINVAL);
+ }
+
req = osp_sync_new_job(d, llh, h, OST_SETATTR, &RQF_OST_SETATTR);
if (IS_ERR(req))
RETURN(PTR_ERR(req));
body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
LASSERT(body);
- body->oa.o_id = rec->lsr_oid;
- body->oa.o_seq = rec->lsr_oseq;
+ body->oa.o_oi = rec->lsr_oi;
body->oa.o_uid = rec->lsr_uid;
body->oa.o_gid = rec->lsr_gid;
- body->oa.o_valid = OBD_MD_FLGROUP | OBD_MD_FLID |
- OBD_MD_FLUID | OBD_MD_FLGID;
+ body->oa.o_valid = OBD_MD_FLGROUP | OBD_MD_FLID;
+ /* old setattr record (prior 2.6.0) doesn't have 'valid' stored,
+ * we assume that both UID and GID are valid in that case. */
+ if (rec->lsr_valid == 0) {
+ body->oa.o_valid |= (OBD_MD_FLUID | OBD_MD_FLGID);
+ } else {
+ if (rec->lsr_valid & LA_UID)
+ body->oa.o_valid |= OBD_MD_FLUID;
+ if (rec->lsr_valid & LA_GID)
+ body->oa.o_valid |= OBD_MD_FLGID;
+ }
osp_sync_send_new_rpc(d, req);
RETURN(0);
body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
LASSERT(body);
- body->oa.o_id = rec->lur_oid;
- body->oa.o_seq = rec->lur_oseq;
+ ostid_set_seq(&body->oa.o_oi, rec->lur_oseq);
+ ostid_set_id(&body->oa.o_oi, rec->lur_oid);
body->oa.o_misc = rec->lur_count;
body->oa.o_valid = OBD_MD_FLGROUP | OBD_MD_FLID;
if (rec->lur_count)
RETURN(0);
}
-static int osp_sync_new_unlink64_job(struct osp_device *d,
+static int osp_prep_unlink_update_req(const struct lu_env *env,
+ struct osp_device *osp,
+ struct llog_handle *llh,
+ struct llog_rec_hdr *h,
+ struct ptlrpc_request **reqp)
+{
+ struct llog_unlink64_rec *rec = (struct llog_unlink64_rec *)h;
+ struct dt_update_request *update = NULL;
+ struct ptlrpc_request *req;
+ const char *buf;
+ struct llog_cookie lcookie;
+ int size;
+ int rc;
+ ENTRY;
+
+ update = out_create_update_req(&osp->opd_dt_dev);
+ if (IS_ERR(update))
+ RETURN(PTR_ERR(update));
+
+ /* This can only happens for unlink slave directory, so decrease
+ * ref for ".." and "." */
+ rc = out_insert_update(env, update, OUT_REF_DEL, &rec->lur_fid, 0,
+ NULL, NULL);
+ if (rc != 0)
+ GOTO(out, rc);
+
+ rc = out_insert_update(env, update, OUT_REF_DEL, &rec->lur_fid, 0,
+ NULL, NULL);
+ if (rc != 0)
+ GOTO(out, rc);
+
+ lcookie.lgc_lgl = llh->lgh_id;
+ lcookie.lgc_subsys = LLOG_MDS_OST_ORIG_CTXT;
+ lcookie.lgc_index = h->lrh_index;
+ size = sizeof(lcookie);
+ buf = (const char *)&lcookie;
+
+ rc = out_insert_update(env, update, OUT_DESTROY, &rec->lur_fid, 1,
+ &size, &buf);
+ if (rc != 0)
+ GOTO(out, rc);
+
+ rc = out_prep_update_req(env, osp->opd_obd->u.cli.cl_import,
+ update->dur_req, &req);
+ if (rc != 0)
+ GOTO(out, rc);
+
+ INIT_LIST_HEAD(&req->rq_exp_list);
+ req->rq_svc_thread = (void *)OSP_JOB_MAGIC;
+
+ req->rq_interpret_reply = osp_sync_interpret;
+ req->rq_commit_cb = osp_sync_request_commit_cb;
+ req->rq_cb_data = osp;
+
+ ptlrpc_request_set_replen(req);
+ *reqp = req;
+out:
+ if (update != NULL)
+ out_destroy_update_req(update);
+
+ RETURN(rc);
+}
+
+static int osp_sync_new_unlink64_job(const struct lu_env *env,
+ struct osp_device *d,
struct llog_handle *llh,
struct llog_rec_hdr *h)
{
struct llog_unlink64_rec *rec = (struct llog_unlink64_rec *)h;
- struct ptlrpc_request *req;
+ struct ptlrpc_request *req = NULL;
struct ost_body *body;
int rc;
ENTRY;
LASSERT(h->lrh_type == MDS_UNLINK64_REC);
- req = osp_sync_new_job(d, llh, h, OST_DESTROY, &RQF_OST_DESTROY);
- if (IS_ERR(req))
- RETURN(PTR_ERR(req));
-
- body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
- if (body == NULL)
- RETURN(-EFAULT);
- rc = fid_ostid_pack(&rec->lur_fid, &body->oa.o_oi);
- if (rc < 0)
- RETURN(rc);
- body->oa.o_misc = rec->lur_count;
- body->oa.o_valid = OBD_MD_FLGROUP | OBD_MD_FLID | OBD_MD_FLOBJCOUNT;
+ if (d->opd_connect_mdt) {
+ rc = osp_prep_unlink_update_req(env, d, llh, h, &req);
+ if (rc != 0)
+ RETURN(rc);
+ } else {
+ req = osp_sync_new_job(d, llh, h, OST_DESTROY,
+ &RQF_OST_DESTROY);
+ if (IS_ERR(req))
+ RETURN(PTR_ERR(req));
+ body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
+ if (body == NULL)
+ RETURN(-EFAULT);
+ rc = fid_to_ostid(&rec->lur_fid, &body->oa.o_oi);
+ if (rc < 0)
+ RETURN(rc);
+ body->oa.o_misc = rec->lur_count;
+ body->oa.o_valid = OBD_MD_FLGROUP | OBD_MD_FLID |
+ OBD_MD_FLOBJCOUNT;
+ }
osp_sync_send_new_rpc(d, req);
RETURN(0);
}
/* notice we increment counters before sending RPC, to be consistent
* in RPC interpret callback which may happen very quickly */
- cfs_spin_lock(&d->opd_syn_lock);
+ spin_lock(&d->opd_syn_lock);
d->opd_syn_rpc_in_flight++;
d->opd_syn_rpc_in_progress++;
- cfs_spin_unlock(&d->opd_syn_lock);
+ spin_unlock(&d->opd_syn_lock);
switch (rec->lrh_type) {
/* case MDS_UNLINK_REC is kept for compatibility */
rc = osp_sync_new_unlink_job(d, llh, rec);
break;
case MDS_UNLINK64_REC:
- rc = osp_sync_new_unlink64_job(d, llh, rec);
+ rc = osp_sync_new_unlink64_job(env, d, llh, rec);
break;
case MDS_SETATTR64_REC:
rc = osp_sync_new_setattr_job(d, llh, rec);
break;
default:
- CERROR("unknown record type: %x\n", rec->lrh_type);
- rc = -EINVAL;
- break;
+ CERROR("%s: unknown record type: %x\n", d->opd_obd->obd_name,
+ rec->lrh_type);
+ /* we should continue processing */
+ return 0;
}
if (likely(rc == 0)) {
- cfs_spin_lock(&d->opd_syn_lock);
+ spin_lock(&d->opd_syn_lock);
if (d->opd_syn_prev_done) {
LASSERT(d->opd_syn_changes > 0);
LASSERT(rec->lrh_id <= d->opd_syn_last_committed_id);
* NOTE: it's possible to meet same id if
* OST stores few stripes of same file
*/
- if (rec->lrh_id > d->opd_syn_last_processed_id)
+ if (rec->lrh_id > d->opd_syn_last_processed_id) {
d->opd_syn_last_processed_id = rec->lrh_id;
+ wake_up(&d->opd_syn_barrier_waitq);
+ }
d->opd_syn_changes--;
}
CDEBUG(D_OTHER, "%s: %d in flight, %d in progress\n",
d->opd_obd->obd_name, d->opd_syn_rpc_in_flight,
d->opd_syn_rpc_in_progress);
- cfs_spin_unlock(&d->opd_syn_lock);
+ spin_unlock(&d->opd_syn_lock);
} else {
- cfs_spin_lock(&d->opd_syn_lock);
+ spin_lock(&d->opd_syn_lock);
d->opd_syn_rpc_in_flight--;
d->opd_syn_rpc_in_progress--;
- cfs_spin_unlock(&d->opd_syn_lock);
+ spin_unlock(&d->opd_syn_lock);
}
CDEBUG(D_HA, "found record %x, %d, idx %u, id %u: %d\n",
struct ptlrpc_request *req, *tmp;
struct llog_ctxt *ctxt;
struct llog_handle *llh;
- cfs_list_t list;
+ struct list_head list;
int rc, done = 0;
ENTRY;
- if (cfs_list_empty(&d->opd_syn_committed_there))
+ if (list_empty(&d->opd_syn_committed_there))
return;
/*
* notice: we do this upon commit as well because some backends
* (like DMU) do not release space right away.
*/
- if (unlikely(d->opd_pre_status == -ENOSPC))
+ if (d->opd_pre != NULL && unlikely(d->opd_pre_status == -ENOSPC))
osp_statfs_need_now(d);
/*
llh = ctxt->loc_handle;
LASSERT(llh);
- CFS_INIT_LIST_HEAD(&list);
- cfs_spin_lock(&d->opd_syn_lock);
- cfs_list_splice(&d->opd_syn_committed_there, &list);
- CFS_INIT_LIST_HEAD(&d->opd_syn_committed_there);
- cfs_spin_unlock(&d->opd_syn_lock);
+ INIT_LIST_HEAD(&list);
+ spin_lock(&d->opd_syn_lock);
+ list_splice(&d->opd_syn_committed_there, &list);
+ INIT_LIST_HEAD(&d->opd_syn_committed_there);
+ spin_unlock(&d->opd_syn_lock);
- cfs_list_for_each_entry_safe(req, tmp, &list, rq_exp_list) {
- LASSERT(req->rq_svc_thread == (void *) OSP_JOB_MAGIC);
- cfs_list_del_init(&req->rq_exp_list);
-
- body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
- LASSERT(body);
+ list_for_each_entry_safe(req, tmp, &list, rq_exp_list) {
+ struct llog_cookie *lcookie = NULL;
+ LASSERT(req->rq_svc_thread == (void *) OSP_JOB_MAGIC);
+ list_del_init(&req->rq_exp_list);
+
+ if (d->opd_connect_mdt) {
+ struct object_update_request *ureq;
+ struct object_update *update;
+ ureq = req_capsule_client_get(&req->rq_pill,
+ &RMF_OUT_UPDATE);
+ LASSERT(ureq != NULL &&
+ ureq->ourq_magic == UPDATE_REQUEST_MAGIC);
+
+ /* 1st/2nd is for decref . and .., 3rd one is for
+ * destroy, where the log cookie is stored.
+ * See osp_prep_unlink_update_req */
+ update = object_update_request_get(ureq, 2, NULL);
+ LASSERT(update != NULL);
+ lcookie = object_update_param_get(update, 0, NULL);
+ LASSERT(lcookie != NULL);
+ } else {
+ body = req_capsule_client_get(&req->rq_pill,
+ &RMF_OST_BODY);
+ LASSERT(body);
+ lcookie = &body->oa.o_lcookie;
+ }
/* import can be closing, thus all commit cb's are
* called we can check committness directly */
if (req->rq_transno <= imp->imp_peer_committed_transno) {
- rc = llog_cat_cancel_records(env, llh, 1,
- &body->oa.o_lcookie);
+ rc = llog_cat_cancel_records(env, llh, 1, lcookie);
if (rc)
CERROR("%s: can't cancel record: %d\n",
obd->obd_name, rc);
llog_ctxt_put(ctxt);
LASSERT(d->opd_syn_rpc_in_progress >= done);
- cfs_spin_lock(&d->opd_syn_lock);
+ spin_lock(&d->opd_syn_lock);
d->opd_syn_rpc_in_progress -= done;
- cfs_spin_unlock(&d->opd_syn_lock);
+ spin_unlock(&d->opd_syn_lock);
CDEBUG(D_OTHER, "%s: %d in flight, %d in progress\n",
d->opd_obd->obd_name, d->opd_syn_rpc_in_flight,
d->opd_syn_rpc_in_progress);
osp_sync_check_for_work(d);
+ /* wake up the thread if requested to stop:
+ * it might be waiting for in-progress to complete */
+ if (unlikely(osp_sync_running(d) == 0))
+ wake_up(&d->opd_syn_waitq);
+
EXIT;
}
l_wait_event(d->opd_syn_waitq,
!osp_sync_running(d) ||
osp_sync_can_process_new(d, rec) ||
- !cfs_list_empty(&d->opd_syn_committed_there),
+ !list_empty(&d->opd_syn_committed_there),
&lwi);
} while (1);
}
struct obd_device *obd = d->opd_obd;
struct llog_handle *llh;
struct lu_env env;
- int rc;
- char pname[16];
+ int rc, count;
ENTRY;
RETURN(rc);
}
- sprintf(pname, "osp-syn-%u\n", d->opd_index);
- cfs_daemonize(pname);
-
- cfs_spin_lock(&d->opd_syn_lock);
+ spin_lock(&d->opd_syn_lock);
thread->t_flags = SVC_RUNNING;
- cfs_spin_unlock(&d->opd_syn_lock);
- cfs_waitq_signal(&thread->t_ctl_waitq);
+ spin_unlock(&d->opd_syn_lock);
+ wake_up(&thread->t_ctl_waitq);
ctxt = llog_get_context(obd, LLOG_MDS_OST_ORIG_CTXT);
if (ctxt == NULL) {
d->opd_syn_rpc_in_flight);
/* wait till all the requests are completed */
+ count = 0;
while (d->opd_syn_rpc_in_progress > 0) {
osp_sync_process_committed(&env, d);
- l_wait_event(d->opd_syn_waitq,
- d->opd_syn_rpc_in_progress == 0,
- &lwi);
+
+ lwi = LWI_TIMEOUT(cfs_time_seconds(5), NULL, NULL);
+ rc = l_wait_event(d->opd_syn_waitq,
+ d->opd_syn_rpc_in_progress == 0,
+ &lwi);
+ if (rc == -ETIMEDOUT)
+ count++;
+ LASSERTF(count < 10, "%s: %d %d %sempty\n",
+ d->opd_obd->obd_name, d->opd_syn_rpc_in_progress,
+ d->opd_syn_rpc_in_flight,
+ list_empty(&d->opd_syn_committed_there) ? "" : "!");
+
}
llog_cat_close(&env, llh);
if (rc)
CERROR("can't cleanup llog: %d\n", rc);
out:
- thread->t_flags = SVC_STOPPED;
-
- cfs_waitq_signal(&thread->t_ctl_waitq);
LASSERTF(d->opd_syn_rpc_in_progress == 0,
"%s: %d %d %sempty\n",
d->opd_obd->obd_name, d->opd_syn_rpc_in_progress,
d->opd_syn_rpc_in_flight,
- cfs_list_empty(&d->opd_syn_committed_there) ? "" : "!");
+ list_empty(&d->opd_syn_committed_there) ? "" : "!");
+
+ thread->t_flags = SVC_STOPPED;
+
+ wake_up(&thread->t_ctl_waitq);
lu_env_fini(&env);
RETURN(0);
}
-static struct llog_operations osp_mds_ost_orig_logops;
-
static int osp_sync_llog_init(const struct lu_env *env, struct osp_device *d)
{
- struct osp_thread_info *osi = osp_env_info(env);
- struct llog_handle *lgh;
- struct obd_device *obd = d->opd_obd;
- struct llog_ctxt *ctxt;
- int rc;
+ struct osp_thread_info *osi = osp_env_info(env);
+ struct lu_fid *fid = &osi->osi_fid;
+ struct llog_handle *lgh = NULL;
+ struct obd_device *obd = d->opd_obd;
+ struct llog_ctxt *ctxt;
+ int rc;
ENTRY;
OBD_SET_CTXT_MAGIC(&obd->obd_lvfs_ctxt);
obd->obd_lvfs_ctxt.dt = d->opd_storage;
+ if (d->opd_connect_mdt)
+ lu_local_obj_fid(fid, SLAVE_LLOG_CATALOGS_OID);
+ else
+ lu_local_obj_fid(fid, LLOG_CATALOGS_OID);
+
rc = llog_osd_get_cat_list(env, d->opd_storage, d->opd_index, 1,
- &osi->osi_cid);
+ &osi->osi_cid, fid);
if (rc) {
CERROR("%s: can't get id from catalogs: rc = %d\n",
obd->obd_name, rc);
RETURN(rc);
}
- CDEBUG(D_INFO, "%s: Init llog for %d - catid "LPX64"/"LPX64":%x\n",
- obd->obd_name, d->opd_index, osi->osi_cid.lci_logid.lgl_oid,
- osi->osi_cid.lci_logid.lgl_oseq,
+ CDEBUG(D_INFO, "%s: Init llog for %d - catid "DOSTID":%x\n",
+ obd->obd_name, d->opd_index,
+ POSTID(&osi->osi_cid.lci_logid.lgl_oi),
osi->osi_cid.lci_logid.lgl_ogen);
- osp_mds_ost_orig_logops = llog_osd_ops;
rc = llog_setup(env, obd, &obd->obd_olg, LLOG_MDS_OST_ORIG_CTXT, obd,
&osp_mds_ost_orig_logops);
if (rc)
ctxt = llog_get_context(obd, LLOG_MDS_OST_ORIG_CTXT);
LASSERT(ctxt);
- if (likely(osi->osi_cid.lci_logid.lgl_oid != 0)) {
+ if (likely(logid_id(&osi->osi_cid.lci_logid) != 0)) {
rc = llog_open(env, ctxt, &lgh, &osi->osi_cid.lci_logid, NULL,
LLOG_OPEN_EXISTS);
/* re-create llog if it is missing */
if (rc == -ENOENT)
- osi->osi_cid.lci_logid.lgl_oid = 0;
+ logid_set_id(&osi->osi_cid.lci_logid, 0);
else if (rc < 0)
GOTO(out_cleanup, rc);
}
- if (unlikely(osi->osi_cid.lci_logid.lgl_oid == 0)) {
+ if (unlikely(logid_id(&osi->osi_cid.lci_logid) == 0)) {
rc = llog_open_create(env, ctxt, &lgh, NULL, NULL);
if (rc < 0)
GOTO(out_cleanup, rc);
osi->osi_cid.lci_logid = lgh->lgh_id;
}
+ LASSERT(lgh != NULL);
ctxt->loc_handle = lgh;
- lgh->lgh_logops->lop_add = llog_cat_add_rec;
- lgh->lgh_logops->lop_declare_add = llog_cat_declare_add_rec;
rc = llog_cat_init_and_process(env, lgh);
if (rc)
GOTO(out_close, rc);
rc = llog_osd_put_cat_list(env, d->opd_storage, d->opd_index, 1,
- &osi->osi_cid);
+ &osi->osi_cid, fid);
if (rc)
GOTO(out_close, rc);
memcpy(&osi->osi_gen.lgr_gen, &d->opd_syn_generation,
sizeof(osi->osi_gen.lgr_gen));
- rc = llog_cat_add(env, lgh, &osi->osi_gen.lgr_hdr, &osi->osi_cookie,
- NULL);
+ rc = llog_cat_add(env, lgh, &osi->osi_gen.lgr_hdr, &osi->osi_cookie);
if (rc < 0)
GOTO(out_close, rc);
llog_ctxt_put(ctxt);
struct llog_ctxt *ctxt;
ctxt = llog_get_context(d->opd_obd, LLOG_MDS_OST_ORIG_CTXT);
- llog_cat_close(env, ctxt->loc_handle);
+ if (ctxt != NULL)
+ llog_cat_close(env, ctxt->loc_handle);
llog_cleanup(env, ctxt);
}
int osp_sync_init(const struct lu_env *env, struct osp_device *d)
{
struct l_wait_info lwi = { 0 };
+ struct task_struct *task;
int rc;
ENTRY;
*/
d->opd_syn_max_rpc_in_flight = OSP_MAX_IN_FLIGHT;
d->opd_syn_max_rpc_in_progress = OSP_MAX_IN_PROGRESS;
- cfs_spin_lock_init(&d->opd_syn_lock);
- cfs_waitq_init(&d->opd_syn_waitq);
- cfs_waitq_init(&d->opd_syn_thread.t_ctl_waitq);
- CFS_INIT_LIST_HEAD(&d->opd_syn_committed_there);
-
- rc = cfs_create_thread(osp_sync_thread, d, 0);
- if (rc < 0) {
- CERROR("%s: can't start sync thread: rc = %d\n",
+ spin_lock_init(&d->opd_syn_lock);
+ init_waitqueue_head(&d->opd_syn_waitq);
+ init_waitqueue_head(&d->opd_syn_barrier_waitq);
+ init_waitqueue_head(&d->opd_syn_thread.t_ctl_waitq);
+ INIT_LIST_HEAD(&d->opd_syn_committed_there);
+
+ task = kthread_run(osp_sync_thread, d, "osp-syn-%u-%u",
+ d->opd_index, d->opd_group);
+ if (IS_ERR(task)) {
+ rc = PTR_ERR(task);
+ CERROR("%s: cannot start sync thread: rc = %d\n",
d->opd_obd->obd_name, rc);
GOTO(err_llog, rc);
}
ENTRY;
thread->t_flags = SVC_STOPPING;
- cfs_waitq_signal(&d->opd_syn_waitq);
- cfs_wait_event(thread->t_ctl_waitq, thread->t_flags & SVC_STOPPED);
+ wake_up(&d->opd_syn_waitq);
+ wait_event(thread->t_ctl_waitq, thread->t_flags & SVC_STOPPED);
/*
* unregister transaction callbacks only when sync thread
RETURN(0);
}
-static CFS_DEFINE_MUTEX(osp_id_tracker_sem);
-static CFS_LIST_HEAD(osp_id_tracker_list);
+static DEFINE_MUTEX(osp_id_tracker_sem);
+static struct list_head osp_id_tracker_list =
+ LIST_HEAD_INIT(osp_id_tracker_list);
static void osp_sync_tracker_commit_cb(struct thandle *th, void *cookie)
{
if (txn == NULL || txn->oti_current_id < tr->otr_committed_id)
return;
- cfs_spin_lock(&tr->otr_lock);
+ spin_lock(&tr->otr_lock);
if (likely(txn->oti_current_id > tr->otr_committed_id)) {
CDEBUG(D_OTHER, "committed: %u -> %u\n",
tr->otr_committed_id, txn->oti_current_id);
tr->otr_committed_id = txn->oti_current_id;
- cfs_list_for_each_entry(d, &tr->otr_wakeup_list,
- opd_syn_ontrack) {
+ list_for_each_entry(d, &tr->otr_wakeup_list,
+ opd_syn_ontrack) {
d->opd_syn_last_committed_id = tr->otr_committed_id;
- cfs_waitq_signal(&d->opd_syn_waitq);
+ wake_up(&d->opd_syn_waitq);
}
}
- cfs_spin_unlock(&tr->otr_lock);
+ spin_unlock(&tr->otr_lock);
}
static int osp_sync_id_traction_init(struct osp_device *d)
LASSERT(d);
LASSERT(d->opd_storage);
LASSERT(d->opd_syn_tracker == NULL);
- CFS_INIT_LIST_HEAD(&d->opd_syn_ontrack);
+ INIT_LIST_HEAD(&d->opd_syn_ontrack);
- cfs_mutex_lock(&osp_id_tracker_sem);
- cfs_list_for_each_entry(tr, &osp_id_tracker_list, otr_list) {
+ mutex_lock(&osp_id_tracker_sem);
+ list_for_each_entry(tr, &osp_id_tracker_list, otr_list) {
if (tr->otr_dev == d->opd_storage) {
- LASSERT(cfs_atomic_read(&tr->otr_refcount));
- cfs_atomic_inc(&tr->otr_refcount);
+ LASSERT(atomic_read(&tr->otr_refcount));
+ atomic_inc(&tr->otr_refcount);
d->opd_syn_tracker = tr;
found = tr;
break;
OBD_ALLOC_PTR(tr);
if (tr) {
d->opd_syn_tracker = tr;
- cfs_spin_lock_init(&tr->otr_lock);
+ spin_lock_init(&tr->otr_lock);
tr->otr_dev = d->opd_storage;
tr->otr_next_id = 1;
tr->otr_committed_id = 0;
- cfs_atomic_set(&tr->otr_refcount, 1);
- CFS_INIT_LIST_HEAD(&tr->otr_wakeup_list);
- cfs_list_add(&tr->otr_list, &osp_id_tracker_list);
+ atomic_set(&tr->otr_refcount, 1);
+ INIT_LIST_HEAD(&tr->otr_wakeup_list);
+ list_add(&tr->otr_list, &osp_id_tracker_list);
tr->otr_tx_cb.dtc_txn_commit =
osp_sync_tracker_commit_cb;
tr->otr_tx_cb.dtc_cookie = tr;
rc = 0;
}
}
- cfs_mutex_unlock(&osp_id_tracker_sem);
+ mutex_unlock(&osp_id_tracker_sem);
return rc;
}
osp_sync_remove_from_tracker(d);
- cfs_mutex_lock(&osp_id_tracker_sem);
- if (cfs_atomic_dec_and_test(&tr->otr_refcount)) {
+ mutex_lock(&osp_id_tracker_sem);
+ if (atomic_dec_and_test(&tr->otr_refcount)) {
dt_txn_callback_del(d->opd_storage, &tr->otr_tx_cb);
- LASSERT(cfs_list_empty(&tr->otr_wakeup_list));
- cfs_list_del(&tr->otr_list);
+ LASSERT(list_empty(&tr->otr_wakeup_list));
+ list_del(&tr->otr_list);
OBD_FREE_PTR(tr);
d->opd_syn_tracker = NULL;
}
- cfs_mutex_unlock(&osp_id_tracker_sem);
+ mutex_unlock(&osp_id_tracker_sem);
EXIT;
}
LASSERT(tr);
/* XXX: we can improve this introducing per-cpu preallocated ids? */
- cfs_spin_lock(&tr->otr_lock);
+ spin_lock(&tr->otr_lock);
if (unlikely(tr->otr_next_id <= d->opd_syn_last_used_id)) {
- cfs_spin_unlock(&tr->otr_lock);
+ spin_unlock(&tr->otr_lock);
CERROR("%s: next %u, last synced %lu\n",
d->opd_obd->obd_name, tr->otr_next_id,
d->opd_syn_last_used_id);
id = tr->otr_next_id++;
if (id > d->opd_syn_last_used_id)
d->opd_syn_last_used_id = id;
- if (cfs_list_empty(&d->opd_syn_ontrack))
- cfs_list_add(&d->opd_syn_ontrack, &tr->otr_wakeup_list);
- cfs_spin_unlock(&tr->otr_lock);
+ if (list_empty(&d->opd_syn_ontrack))
+ list_add(&d->opd_syn_ontrack, &tr->otr_wakeup_list);
+ spin_unlock(&tr->otr_lock);
CDEBUG(D_OTHER, "new id %u\n", (unsigned) id);
return id;
tr = d->opd_syn_tracker;
LASSERT(tr);
- if (cfs_list_empty(&d->opd_syn_ontrack))
+ if (list_empty(&d->opd_syn_ontrack))
return;
- cfs_spin_lock(&tr->otr_lock);
- cfs_list_del_init(&d->opd_syn_ontrack);
- cfs_spin_unlock(&tr->otr_lock);
+ spin_lock(&tr->otr_lock);
+ list_del_init(&d->opd_syn_ontrack);
+ spin_unlock(&tr->otr_lock);
}