* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, Whamcloud, Inc.
+ * Copyright (c) 2011, 2013, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
/********************** config llog list **********************/
static CFS_LIST_HEAD(config_llog_list);
-static cfs_spinlock_t config_list_lock = CFS_SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(config_list_lock);
/* Take a reference to a config log */
static int config_log_get(struct config_llog_data *cld)
/* spinlock to make sure no item with 0 refcount in the list */
if (cfs_atomic_dec_and_lock(&cld->cld_refcount, &config_list_lock)) {
cfs_list_del(&cld->cld_list_chain);
- cfs_spin_unlock(&config_list_lock);
+ spin_unlock(&config_list_lock);
CDEBUG(D_MGC, "dropping config log %s\n", cld->cld_logname);
LASSERT(logname != NULL);
instance = cfg ? cfg->cfg_instance : NULL;
- cfs_spin_lock(&config_list_lock);
+ spin_lock(&config_list_lock);
cfs_list_for_each_entry(cld, &config_llog_list, cld_list_chain) {
/* check if instance equals */
if (instance != cld->cld_cfg.cfg_instance)
cfs_atomic_inc(&found->cld_refcount);
LASSERT(found->cld_stopping == 0 || cld_is_sptlrpc(found) == 0);
}
- cfs_spin_unlock(&config_list_lock);
- RETURN(found);
+ spin_unlock(&config_list_lock);
+ RETURN(found);
}
static
strcpy(cld->cld_logname, logname);
if (cfg)
cld->cld_cfg = *cfg;
- cfs_mutex_init(&cld->cld_lock);
+ else
+ cld->cld_cfg.cfg_callback = class_config_llog_handler;
+ mutex_init(&cld->cld_lock);
cld->cld_cfg.cfg_last_idx = 0;
cld->cld_cfg.cfg_flags = 0;
cld->cld_cfg.cfg_sb = sb;
rc = mgc_logname2resid(logname, &cld->cld_resid, type);
- cfs_spin_lock(&config_list_lock);
- cfs_list_add(&cld->cld_list_chain, &config_llog_list);
- cfs_spin_unlock(&config_list_lock);
+ spin_lock(&config_list_lock);
+ cfs_list_add(&cld->cld_list_chain, &config_llog_list);
+ spin_unlock(&config_list_lock);
if (rc) {
config_log_put(cld);
if (cld_is_sptlrpc(cld)) {
rc = mgc_process_log(obd, cld);
- if (rc)
+ if (rc && rc != -ENOENT)
CERROR("failed processing sptlrpc log: %d\n", rc);
}
struct config_llog_data *cld;
char logname[32];
- if ((lsi->lsi_flags & LSI_SERVER) && !IS_MDT(lsi->lsi_ldd))
+ if (IS_OST(lsi))
return NULL;
+ /* for osp-on-ost, see lustre_start_osp() */
+ if (IS_MDT(lsi) && lcfg.cfg_instance)
+ return NULL;
+
/* we have to use different llog for clients and mdts for cmd
* where only clients are notified if one of cmd server restarts */
LASSERT(strlen(fsname) < sizeof(logname) / 2);
strcpy(logname, fsname);
- if (lsi->lsi_flags & LSI_SERVER) { /* mdt */
+ if (IS_SERVER(lsi)) { /* mdt */
LASSERT(lcfg.cfg_instance == NULL);
lcfg.cfg_instance = sb;
strcat(logname, "-mdtir");
RETURN(0);
}
-CFS_DEFINE_MUTEX(llog_process_lock);
+DEFINE_MUTEX(llog_process_lock);
/** Stop watching for updates on this log.
*/
if (cld == NULL)
RETURN(-ENOENT);
- cfs_mutex_lock(&cld->cld_lock);
+ mutex_lock(&cld->cld_lock);
/*
* if cld_stopping is set, it means we didn't start the log thus
* not owning the start ref. this can happen after previous umount:
* calling start_log.
*/
if (unlikely(cld->cld_stopping)) {
- cfs_mutex_unlock(&cld->cld_lock);
+ mutex_unlock(&cld->cld_lock);
/* drop the ref from the find */
config_log_put(cld);
RETURN(rc);
cld_recover = cld->cld_recover;
cld->cld_recover = NULL;
- cfs_mutex_unlock(&cld->cld_lock);
+ mutex_unlock(&cld->cld_lock);
- if (cld_recover) {
- cfs_mutex_lock(&cld_recover->cld_lock);
- cld_recover->cld_stopping = 1;
- cfs_mutex_unlock(&cld_recover->cld_lock);
- config_log_put(cld_recover);
- }
+ if (cld_recover) {
+ mutex_lock(&cld_recover->cld_lock);
+ cld_recover->cld_stopping = 1;
+ mutex_unlock(&cld_recover->cld_lock);
+ config_log_put(cld_recover);
+ }
- cfs_spin_lock(&config_list_lock);
- cld_sptlrpc = cld->cld_sptlrpc;
- cld->cld_sptlrpc = NULL;
- cfs_spin_unlock(&config_list_lock);
+ spin_lock(&config_list_lock);
+ cld_sptlrpc = cld->cld_sptlrpc;
+ cld->cld_sptlrpc = NULL;
+ spin_unlock(&config_list_lock);
if (cld_sptlrpc)
config_log_put(cld_sptlrpc);
ENTRY;
rc = snprintf(page, count, "imperative_recovery: %s\n",
- OCD_HAS_FLAG(ocd, IMP_RECOV) ? "ON" : "OFF");
+ OCD_HAS_FLAG(ocd, IMP_RECOV) ? "ENABLED" : "DISABLED");
rc += snprintf(page + rc, count - rc, "client_state:\n");
- cfs_spin_lock(&config_list_lock);
- cfs_list_for_each_entry(cld, &config_llog_list, cld_list_chain) {
- if (cld->cld_recover == NULL)
- continue;
- rc += snprintf(page + rc, count - rc,
- " - { client: %s, nidtbl_version: %u }\n",
- cld->cld_logname,
- cld->cld_recover->cld_cfg.cfg_last_idx);
- }
- cfs_spin_unlock(&config_list_lock);
-
- RETURN(rc);
+ spin_lock(&config_list_lock);
+ cfs_list_for_each_entry(cld, &config_llog_list, cld_list_chain) {
+ if (cld->cld_recover == NULL)
+ continue;
+ rc += snprintf(page + rc, count - rc,
+ " - { client: %s, nidtbl_version: %u }\n",
+ cld->cld_logname,
+ cld->cld_recover->cld_cfg.cfg_last_idx);
+ }
+ spin_unlock(&config_list_lock);
+
+ RETURN(rc);
}
/* reenqueue any lost locks */
#define RQ_STOP 0x8
static int rq_state = 0;
static cfs_waitq_t rq_waitq;
-static CFS_DECLARE_COMPLETION(rq_exit);
+static DECLARE_COMPLETION(rq_exit);
static void do_requeue(struct config_llog_data *cld)
{
/* Do not run mgc_process_log on a disconnected export or an
export which is being disconnected. Take the client
semaphore to make the check non-racy. */
- cfs_down_read(&cld->cld_mgcexp->exp_obd->u.cli.cl_sem);
+ down_read(&cld->cld_mgcexp->exp_obd->u.cli.cl_sem);
if (cld->cld_mgcexp->exp_obd->u.cli.cl_conn_count != 0) {
CDEBUG(D_MGC, "updating log %s\n", cld->cld_logname);
mgc_process_log(cld->cld_mgcexp->exp_obd, cld);
CDEBUG(D_MGC, "disconnecting, won't update log %s\n",
cld->cld_logname);
}
- cfs_up_read(&cld->cld_mgcexp->exp_obd->u.cli.cl_sem);
+ up_read(&cld->cld_mgcexp->exp_obd->u.cli.cl_sem);
EXIT;
}
static int mgc_requeue_thread(void *data)
{
- char name[] = "ll_cfg_requeue";
int rc = 0;
ENTRY;
- cfs_daemonize(name);
-
CDEBUG(D_MGC, "Starting requeue thread\n");
/* Keep trying failed locks periodically */
- cfs_spin_lock(&config_list_lock);
- rq_state |= RQ_RUNNING;
- while (1) {
- struct l_wait_info lwi;
- struct config_llog_data *cld, *cld_prev;
- int rand = cfs_rand() & MGC_TIMEOUT_RAND_CENTISEC;
- int stopped = !!(rq_state & RQ_STOP);
- int to;
-
- /* Any new or requeued lostlocks will change the state */
- rq_state &= ~(RQ_NOW | RQ_LATER);
- cfs_spin_unlock(&config_list_lock);
+ spin_lock(&config_list_lock);
+ rq_state |= RQ_RUNNING;
+ while (1) {
+ struct l_wait_info lwi;
+ struct config_llog_data *cld, *cld_prev;
+ int rand = cfs_rand() & MGC_TIMEOUT_RAND_CENTISEC;
+ int stopped = !!(rq_state & RQ_STOP);
+ int to;
+
+ /* Any new or requeued lostlocks will change the state */
+ rq_state &= ~(RQ_NOW | RQ_LATER);
+ spin_unlock(&config_list_lock);
/* Always wait a few seconds to allow the server who
caused the lock revocation to finish its setup, plus some
*/
cld_prev = NULL;
- cfs_spin_lock(&config_list_lock);
- cfs_list_for_each_entry(cld, &config_llog_list,
- cld_list_chain) {
- if (!cld->cld_lostlock)
- continue;
+ spin_lock(&config_list_lock);
+ cfs_list_for_each_entry(cld, &config_llog_list,
+ cld_list_chain) {
+ if (!cld->cld_lostlock)
+ continue;
- cfs_spin_unlock(&config_list_lock);
+ spin_unlock(&config_list_lock);
LASSERT(cfs_atomic_read(&cld->cld_refcount) > 0);
if (likely(!stopped))
do_requeue(cld);
- cfs_spin_lock(&config_list_lock);
- }
- cfs_spin_unlock(&config_list_lock);
- if (cld_prev)
- config_log_put(cld_prev);
-
- /* break after scanning the list so that we can drop
- * refcount to losing lock clds */
- if (unlikely(stopped)) {
- cfs_spin_lock(&config_list_lock);
- break;
- }
-
- /* Wait a bit to see if anyone else needs a requeue */
- lwi = (struct l_wait_info) { 0 };
- l_wait_event(rq_waitq, rq_state & (RQ_NOW | RQ_STOP),
- &lwi);
- cfs_spin_lock(&config_list_lock);
- }
- /* spinlock and while guarantee RQ_NOW and RQ_LATER are not set */
- rq_state &= ~RQ_RUNNING;
- cfs_spin_unlock(&config_list_lock);
-
- cfs_complete(&rq_exit);
-
- CDEBUG(D_MGC, "Ending requeue thread\n");
- RETURN(rc);
+ spin_lock(&config_list_lock);
+ }
+ spin_unlock(&config_list_lock);
+ if (cld_prev)
+ config_log_put(cld_prev);
+
+ /* break after scanning the list so that we can drop
+ * refcount to losing lock clds */
+ if (unlikely(stopped)) {
+ spin_lock(&config_list_lock);
+ break;
+ }
+
+ /* Wait a bit to see if anyone else needs a requeue */
+ lwi = (struct l_wait_info) { 0 };
+ l_wait_event(rq_waitq, rq_state & (RQ_NOW | RQ_STOP),
+ &lwi);
+ spin_lock(&config_list_lock);
+ }
+ /* spinlock and while guarantee RQ_NOW and RQ_LATER are not set */
+ rq_state &= ~RQ_RUNNING;
+ spin_unlock(&config_list_lock);
+
+ complete(&rq_exit);
+
+ CDEBUG(D_MGC, "Ending requeue thread\n");
+ RETURN(rc);
}
/* Add a cld to the list to requeue. Start the requeue thread if needed.
cld->cld_stopping, rq_state);
LASSERT(cfs_atomic_read(&cld->cld_refcount) > 0);
- cfs_mutex_lock(&cld->cld_lock);
- if (cld->cld_stopping || cld->cld_lostlock) {
- cfs_mutex_unlock(&cld->cld_lock);
- RETURN_EXIT;
- }
- /* this refcount will be released in mgc_requeue_thread. */
- config_log_get(cld);
- cld->cld_lostlock = 1;
- cfs_mutex_unlock(&cld->cld_lock);
-
- /* Hold lock for rq_state */
- cfs_spin_lock(&config_list_lock);
- if (rq_state & RQ_STOP) {
- cfs_spin_unlock(&config_list_lock);
- cld->cld_lostlock = 0;
- config_log_put(cld);
- } else {
- rq_state |= RQ_NOW;
- cfs_spin_unlock(&config_list_lock);
- cfs_waitq_signal(&rq_waitq);
- }
- EXIT;
+ mutex_lock(&cld->cld_lock);
+ if (cld->cld_stopping || cld->cld_lostlock) {
+ mutex_unlock(&cld->cld_lock);
+ RETURN_EXIT;
+ }
+ /* this refcount will be released in mgc_requeue_thread. */
+ config_log_get(cld);
+ cld->cld_lostlock = 1;
+ mutex_unlock(&cld->cld_lock);
+
+ /* Hold lock for rq_state */
+ spin_lock(&config_list_lock);
+ if (rq_state & RQ_STOP) {
+ spin_unlock(&config_list_lock);
+ cld->cld_lostlock = 0;
+ config_log_put(cld);
+ } else {
+ rq_state |= RQ_NOW;
+ spin_unlock(&config_list_lock);
+ cfs_waitq_signal(&rq_waitq);
+ }
+ EXIT;
}
/********************** class fns **********************/
LASSERT(lsi->lsi_srv_mnt == mnt);
/* The mgc fs exclusion sem. Only one fs can be setup at a time. */
- cfs_down(&cli->cl_mgc_sem);
+ down(&cli->cl_mgc_sem);
cfs_cleanup_group_info();
- obd->obd_fsops = fsfilt_get_ops(MT_STR(lsi->lsi_ldd));
+ obd->obd_fsops = fsfilt_get_ops(lsi->lsi_fstype);
if (IS_ERR(obd->obd_fsops)) {
- cfs_up(&cli->cl_mgc_sem);
- CERROR("No fstype %s rc=%ld\n", MT_STR(lsi->lsi_ldd),
- PTR_ERR(obd->obd_fsops));
+ up(&cli->cl_mgc_sem);
+ CERROR("%s: No fstype %s: rc = %ld\n", lsi->lsi_fstype,
+ obd->obd_name, PTR_ERR(obd->obd_fsops));
RETURN(PTR_ERR(obd->obd_fsops));
}
fsfilt_put_ops(obd->obd_fsops);
obd->obd_fsops = NULL;
cli->cl_mgc_vfsmnt = NULL;
- cfs_up(&cli->cl_mgc_sem);
+ up(&cli->cl_mgc_sem);
RETURN(err);
}
if (obd->obd_fsops)
fsfilt_put_ops(obd->obd_fsops);
- cfs_up(&cli->cl_mgc_sem);
+ up(&cli->cl_mgc_sem);
RETURN(rc);
}
if (cfs_atomic_dec_and_test(&mgc_count)) {
int running;
/* stop requeue thread */
- cfs_spin_lock(&config_list_lock);
- running = rq_state & RQ_RUNNING;
- if (running)
- rq_state |= RQ_STOP;
- cfs_spin_unlock(&config_list_lock);
- if (running) {
- cfs_waitq_signal(&rq_waitq);
- cfs_wait_for_completion(&rq_exit);
+ spin_lock(&config_list_lock);
+ running = rq_state & RQ_RUNNING;
+ if (running)
+ rq_state |= RQ_STOP;
+ spin_unlock(&config_list_lock);
+ if (running) {
+ cfs_waitq_signal(&rq_waitq);
+ wait_for_completion(&rq_exit);
}
}
obd_cleanup_client_import(obd);
sptlrpc_lprocfs_cliobd_attach(obd);
if (cfs_atomic_inc_return(&mgc_count) == 1) {
- rq_state = 0;
- cfs_waitq_init(&rq_waitq);
-
- /* start requeue thread */
- rc = cfs_create_thread(mgc_requeue_thread, NULL,
- CFS_DAEMON_FLAGS);
- if (rc < 0) {
- CERROR("%s: Cannot start requeue thread (%d),"
- "no more log updates!\n",
- obd->obd_name, rc);
- GOTO(err_cleanup, rc);
- }
- /* rc is the pid of mgc_requeue_thread. */
- rc = 0;
+ rq_state = 0;
+ cfs_waitq_init(&rq_waitq);
+
+ /* start requeue thread */
+ rc = PTR_ERR(kthread_run(mgc_requeue_thread, NULL,
+ "ll_cfg_requeue"));
+ if (IS_ERR_VALUE(rc)) {
+ CERROR("%s: Cannot start requeue thread (%d),"
+ "no more log updates!\n",
+ obd->obd_name, rc);
+ GOTO(err_cleanup, rc);
+ }
+ /* rc is the task_struct pointer of mgc_requeue_thread. */
+ rc = 0;
}
RETURN(rc);
/* mgs wants the lock, give it up... */
LDLM_DEBUG(lock, "MGC blocking CB");
ldlm_lock2handle(lock, &lockh);
- rc = ldlm_cli_cancel(&lockh);
- break;
- case LDLM_CB_CANCELING:
- /* We've given up the lock, prepare ourselves to update. */
- LDLM_DEBUG(lock, "MGC cancel CB");
+ rc = ldlm_cli_cancel(&lockh, LCF_ASYNC);
+ break;
+ case LDLM_CB_CANCELING:
+ /* We've given up the lock, prepare ourselves to update. */
+ LDLM_DEBUG(lock, "MGC cancel CB");
- CDEBUG(D_MGC, "Lock res "LPX64" (%.8s)\n",
- lock->l_resource->lr_name.name[0],
- (char *)&lock->l_resource->lr_name.name[0]);
+ CDEBUG(D_MGC, "Lock res "DLDLMRES" (%.8s)\n",
+ PLDLMRES(lock->l_resource),
+ (char *)&lock->l_resource->lr_name.name[0]);
if (!cld) {
CDEBUG(D_INFO, "missing data, won't requeue\n");
/* Take a config lock so we can get cancel notifications */
static int mgc_enqueue(struct obd_export *exp, struct lov_stripe_md *lsm,
__u32 type, ldlm_policy_data_t *policy, __u32 mode,
- int *flags, void *bl_cb, void *cp_cb, void *gl_cb,
- void *data, __u32 lvb_len, void *lvb_swabber,
- struct lustre_handle *lockh)
+ __u64 *flags, void *bl_cb, void *cp_cb, void *gl_cb,
+ void *data, __u32 lvb_len, void *lvb_swabber,
+ struct lustre_handle *lockh)
{
- struct config_llog_data *cld = (struct config_llog_data *)data;
- struct ldlm_enqueue_info einfo = { type, mode, mgc_blocking_ast,
- ldlm_completion_ast, NULL, NULL, NULL };
- struct ptlrpc_request *req;
- int short_limit = cld_is_sptlrpc(cld);
- int rc;
- ENTRY;
+ struct config_llog_data *cld = (struct config_llog_data *)data;
+ struct ldlm_enqueue_info einfo = {
+ .ei_type = type,
+ .ei_mode = mode,
+ .ei_cb_bl = mgc_blocking_ast,
+ .ei_cb_cp = ldlm_completion_ast,
+ };
+ struct ptlrpc_request *req;
+ int short_limit = cld_is_sptlrpc(cld);
+ int rc;
+ ENTRY;
CDEBUG(D_MGC, "Enqueue for %s (res "LPX64")\n", cld->cld_logname,
cld->cld_resid.name[0]);
LDLM_ENQUEUE);
if (req == NULL)
RETURN(-ENOMEM);
+
+ req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER, 0);
ptlrpc_request_set_replen(req);
/* check if this is server or client */
if (cld->cld_cfg.cfg_sb) {
struct lustre_sb_info *lsi = s2lsi(cld->cld_cfg.cfg_sb);
- if (lsi && (lsi->lsi_flags & LSI_SERVER))
+ if (lsi && IS_SERVER(lsi))
short_limit = 1;
}
/* Limit how long we will wait for the enqueue to complete */
req->rq_delay_limit = short_limit ? 5 : MGC_ENQUEUE_LIMIT;
rc = ldlm_cli_enqueue(exp, &req, &einfo, &cld->cld_resid, NULL, flags,
- NULL, 0, lockh, 0);
+ NULL, 0, LVB_T_NONE, lockh, 0);
/* A failed enqueue should still call the mgc_blocking_ast,
where it will be requeued if needed ("grant failed"). */
ptlrpc_req_finished(req);
static void mgc_notify_active(struct obd_device *unused)
{
- /* wakeup mgc_requeue_thread to requeue mgc lock */
- cfs_spin_lock(&config_list_lock);
- rq_state |= RQ_NOW;
- cfs_spin_unlock(&config_list_lock);
- cfs_waitq_signal(&rq_waitq);
+ /* wakeup mgc_requeue_thread to requeue mgc lock */
+ spin_lock(&config_list_lock);
+ rq_state |= RQ_NOW;
+ spin_unlock(&config_list_lock);
+ cfs_waitq_signal(&rq_waitq);
- /* TODO: Help the MGS rebuild nidtbl. -jay */
+ /* TODO: Help the MGS rebuild nidtbl. -jay */
}
/* Send target_reg message to MGS */
switch (event) {
case IMP_EVENT_DISCON:
/* MGC imports should not wait for recovery */
+ if (OCD_HAS_FLAG(&imp->imp_connect_data, IMP_RECOV))
+ ptlrpc_pinger_ir_down();
break;
case IMP_EVENT_INACTIVE:
break;
ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
break;
}
- case IMP_EVENT_ACTIVE:
- LCONSOLE_WARN("%s: Reactivating import\n", obd->obd_name);
- /* Clearing obd_no_recov allows us to continue pinging */
- obd->obd_no_recov = 0;
- mgc_notify_active(obd);
- break;
+ case IMP_EVENT_ACTIVE:
+ CDEBUG(D_INFO, "%s: Reactivating import\n", obd->obd_name);
+ /* Clearing obd_no_recov allows us to continue pinging */
+ obd->obd_no_recov = 0;
+ mgc_notify_active(obd);
+ if (OCD_HAS_FLAG(&imp->imp_connect_data, IMP_RECOV))
+ ptlrpc_pinger_ir_up();
+ break;
case IMP_EVENT_OCD:
break;
case IMP_EVENT_DEACTIVATE:
LASSERT(olg == &obd->obd_olg);
- rc = llog_setup(obd, olg, LLOG_CONFIG_ORIG_CTXT, tgt, 0, NULL,
- &llog_lvfs_ops);
- if (rc)
- RETURN(rc);
+#ifdef HAVE_LDISKFS_OSD
+ rc = llog_setup(NULL, obd, olg, LLOG_CONFIG_ORIG_CTXT, tgt,
+ &llog_lvfs_ops);
+ if (rc)
+ RETURN(rc);
+#endif
- rc = llog_setup(obd, olg, LLOG_CONFIG_REPL_CTXT, tgt, 0, NULL,
- &llog_client_ops);
- if (rc == 0) {
- ctxt = llog_get_context(obd, LLOG_CONFIG_REPL_CTXT);
- if (!ctxt) {
- ctxt = llog_get_context(obd, LLOG_CONFIG_ORIG_CTXT);
- if (ctxt)
- llog_cleanup(ctxt);
- RETURN(-ENODEV);
- }
- llog_initiator_connect(ctxt);
- llog_ctxt_put(ctxt);
- } else {
- ctxt = llog_get_context(obd, LLOG_CONFIG_ORIG_CTXT);
- if (ctxt)
- llog_cleanup(ctxt);
- }
+ rc = llog_setup(NULL, obd, olg, LLOG_CONFIG_REPL_CTXT, tgt,
+ &llog_client_ops);
+ if (rc)
+ GOTO(out, rc);
+ ctxt = llog_group_get_ctxt(olg, LLOG_CONFIG_REPL_CTXT);
+ if (!ctxt)
+ GOTO(out, rc = -ENODEV);
+
+ llog_initiator_connect(ctxt);
+ llog_ctxt_put(ctxt);
+
+ RETURN(0);
+out:
+ ctxt = llog_get_context(obd, LLOG_CONFIG_ORIG_CTXT);
+ if (ctxt)
+ llog_cleanup(NULL, ctxt);
RETURN(rc);
}
static int mgc_llog_finish(struct obd_device *obd, int count)
{
- struct llog_ctxt *ctxt;
- int rc = 0, rc2 = 0;
- ENTRY;
-
- ctxt = llog_get_context(obd, LLOG_CONFIG_REPL_CTXT);
- if (ctxt)
- rc = llog_cleanup(ctxt);
+ struct llog_ctxt *ctxt;
- ctxt = llog_get_context(obd, LLOG_CONFIG_ORIG_CTXT);
- if (ctxt)
- rc2 = llog_cleanup(ctxt);
+ ENTRY;
- if (!rc)
- rc = rc2;
+ ctxt = llog_get_context(obd, LLOG_CONFIG_REPL_CTXT);
+ if (ctxt)
+ llog_cleanup(NULL, ctxt);
- RETURN(rc);
+ ctxt = llog_get_context(obd, LLOG_CONFIG_ORIG_CTXT);
+ if (ctxt)
+ llog_cleanup(NULL, ctxt);
+ RETURN(0);
}
enum {
- CONFIG_READ_NRPAGES_INIT = 1 << (20 - CFS_PAGE_SHIFT),
+ CONFIG_READ_NRPAGES_INIT = 1 << (20 - PAGE_CACHE_SHIFT),
CONFIG_READ_NRPAGES = 4
};
static int mgc_apply_recover_logs(struct obd_device *mgc,
- struct config_llog_data *cld,
- __u64 max_version,
- void *data, int datalen, int need_swab)
+ struct config_llog_data *cld,
+ __u64 max_version,
+ void *data, int datalen, bool mne_swab)
{
struct config_llog_instance *cfg = &cld->cld_cfg;
struct lustre_sb_info *lsi = s2lsi(cfg->cfg_sb);
LASSERT(cfg->cfg_instance != NULL);
LASSERT(cfg->cfg_sb == cfg->cfg_instance);
- OBD_ALLOC(inst, CFS_PAGE_SIZE);
- if (inst == NULL)
- RETURN(-ENOMEM);
+ OBD_ALLOC(inst, PAGE_CACHE_SIZE);
+ if (inst == NULL)
+ RETURN(-ENOMEM);
- if (!(lsi->lsi_flags & LSI_SERVER)) {
- pos = sprintf(inst, "%p", cfg->cfg_instance);
+ if (!IS_SERVER(lsi)) {
+ pos = snprintf(inst, PAGE_CACHE_SIZE, "%p", cfg->cfg_instance);
+ if (pos >= PAGE_CACHE_SIZE) {
+ OBD_FREE(inst, PAGE_CACHE_SIZE);
+ return -E2BIG;
+ }
} else {
- LASSERT(IS_MDT(lsi->lsi_ldd));
- pos = sprintf(inst, "MDT%04x", lsi->lsi_ldd->ldd_svindex);
+ LASSERT(IS_MDT(lsi));
+ rc = server_name2svname(lsi->lsi_svname, inst, NULL,
+ PAGE_CACHE_SIZE);
+ if (rc) {
+ OBD_FREE(inst, PAGE_CACHE_SIZE);
+ RETURN(-EINVAL);
+ }
+ pos = strlen(inst);
}
++pos;
buf = inst + pos;
- bufsz = CFS_PAGE_SIZE - pos;
+ bufsz = PAGE_CACHE_SIZE - pos;
while (datalen > 0) {
int entry_len = sizeof(*entry);
if (datalen < entry_len) /* must have entry_len at least */
break;
- if (need_swab)
- lustre_swab_mgs_nidtbl_entry(entry);
- LASSERT(entry->mne_length <= CFS_PAGE_SIZE);
- if (entry->mne_length < entry_len)
- break;
+ /* Keep this swab for normal mixed endian handling. LU-1644 */
+ if (mne_swab)
+ lustre_swab_mgs_nidtbl_entry(entry);
+ if (entry->mne_length > PAGE_CACHE_SIZE) {
+ CERROR("MNE too large (%u)\n", entry->mne_length);
+ break;
+ }
+
+ if (entry->mne_length < entry_len)
+ break;
off += entry->mne_length;
datalen -= entry->mne_length;
if (obd == NULL) {
CDEBUG(D_INFO, "mgc %s: cannot find obdname %s\n",
mgc->obd_name, obdname);
-
+ rc = 0;
/* this is a safe race, when the ost is starting up...*/
continue;
}
pos += sprintf(params, "%s.import=%s", cname, "connection=");
uuid = buf + pos;
+ down_read(&obd->u.cli.cl_sem);
+ if (obd->u.cli.cl_import == NULL) {
+ /* client does not connect to the OST yet */
+ up_read(&obd->u.cli.cl_sem);
+ rc = 0;
+ continue;
+ }
+
/* TODO: iterate all nids to find one */
/* find uuid by nid */
rc = client_import_find_conn(obd->u.cli.cl_import,
entry->u.nids[0],
(struct obd_uuid *)uuid);
+ up_read(&obd->u.cli.cl_sem);
if (rc < 0) {
CERROR("mgc: cannot find uuid by nid %s\n",
libcfs_nid2str(entry->u.nids[0]));
/* continue, even one with error */
}
- OBD_FREE(inst, CFS_PAGE_SIZE);
+ OBD_FREE(inst, PAGE_CACHE_SIZE);
RETURN(rc);
}
struct mgs_config_body *body;
struct mgs_config_res *res;
struct ptlrpc_bulk_desc *desc;
- cfs_page_t **pages;
+ struct page **pages;
int nrpages;
bool eof = true;
+ bool mne_swab = false;
int i;
int ealen;
int rc;
GOTO(out, rc = -ENOMEM);
for (i = 0; i < nrpages; i++) {
- pages[i] = cfs_alloc_page(CFS_ALLOC_STD);
+ pages[i] = alloc_page(GFP_IOFS);
if (pages[i] == NULL)
GOTO(out, rc = -ENOMEM);
}
again:
LASSERT(cld_is_recover(cld));
- LASSERT(cfs_mutex_is_locked(&cld->cld_lock));
+ LASSERT(mutex_is_locked(&cld->cld_lock));
req = ptlrpc_request_alloc(class_exp2cliimp(cld->cld_mgcexp),
&RQF_MGS_CONFIG_READ);
if (req == NULL)
body = req_capsule_client_get(&req->rq_pill, &RMF_MGS_CONFIG_BODY);
LASSERT(body != NULL);
LASSERT(sizeof(body->mcb_name) > strlen(cld->cld_logname));
- strncpy(body->mcb_name, cld->cld_logname, sizeof(body->mcb_name));
+ if (strlcpy(body->mcb_name, cld->cld_logname, sizeof(body->mcb_name))
+ >= sizeof(body->mcb_name))
+ GOTO(out, rc = -E2BIG);
body->mcb_offset = cfg->cfg_last_idx + 1;
body->mcb_type = cld->cld_type;
- body->mcb_bits = CFS_PAGE_SHIFT;
+ body->mcb_bits = PAGE_CACHE_SHIFT;
body->mcb_units = nrpages;
- /* allocate bulk transfer descriptor */
- desc = ptlrpc_prep_bulk_imp(req, nrpages, BULK_PUT_SINK,
- MGS_BULK_PORTAL);
- if (desc == NULL)
- GOTO(out, rc = -ENOMEM);
+ /* allocate bulk transfer descriptor */
+ desc = ptlrpc_prep_bulk_imp(req, nrpages, 1, BULK_PUT_SINK,
+ MGS_BULK_PORTAL);
+ if (desc == NULL)
+ GOTO(out, rc = -ENOMEM);
- for (i = 0; i < nrpages; i++)
- ptlrpc_prep_bulk_page(desc, pages[i], 0, CFS_PAGE_SIZE);
+ for (i = 0; i < nrpages; i++)
+ ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_CACHE_SIZE);
ptlrpc_request_set_replen(req);
rc = ptlrpc_queue_wait(req);
if (ealen < 0)
GOTO(out, rc = ealen);
- if (ealen > nrpages << CFS_PAGE_SHIFT)
+ if (ealen > nrpages << PAGE_CACHE_SHIFT)
GOTO(out, rc = -EINVAL);
if (ealen == 0) { /* no logs transferred */
GOTO(out, rc);
}
+ mne_swab = !!ptlrpc_rep_need_swab(req);
+#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(3, 2, 50, 0)
+ /* This import flag means the server did an extra swab of IR MNE
+ * records (fixed in LU-1252), reverse it here if needed. LU-1644 */
+ if (unlikely(req->rq_import->imp_need_mne_swab))
+ mne_swab = !mne_swab;
+#else
+#warning "LU-1644: Remove old OBD_CONNECT_MNE_SWAB fixup and imp_need_mne_swab"
+#endif
+
for (i = 0; i < nrpages && ealen > 0; i++) {
int rc2;
void *ptr;
- ptr = cfs_kmap(pages[i]);
- rc2 = mgc_apply_recover_logs(obd, cld, res->mcr_offset, ptr,
- min_t(int, ealen, CFS_PAGE_SIZE),
- ptlrpc_rep_need_swab(req));
- cfs_kunmap(pages[i]);
- if (rc2 < 0) {
- CWARN("Process recover log %s error %d\n",
- cld->cld_logname, rc2);
- break;
+ ptr = kmap(pages[i]);
+ rc2 = mgc_apply_recover_logs(obd, cld, res->mcr_offset, ptr,
+ min_t(int, ealen, PAGE_CACHE_SIZE),
+ mne_swab);
+ kunmap(pages[i]);
+ if (rc2 < 0) {
+ CWARN("Process recover log %s error %d\n",
+ cld->cld_logname, rc2);
+ break;
}
- ealen -= CFS_PAGE_SIZE;
+ ealen -= PAGE_CACHE_SIZE;
}
out:
if (rc == 0 && !eof)
goto again;
- if (pages) {
- for (i = 0; i < nrpages; i++) {
- if (pages[i] == NULL)
- break;
- cfs_free_page(pages[i]);
- }
- OBD_FREE(pages, sizeof(*pages) * nrpages);
- }
- return rc;
+ if (pages) {
+ for (i = 0; i < nrpages; i++) {
+ if (pages[i] == NULL)
+ break;
+ __free_page(pages[i]);
+ }
+ OBD_FREE(pages, sizeof(*pages) * nrpages);
+ }
+ return rc;
}
+#ifdef HAVE_LDISKFS_OSD
+
+/*
+ * XXX: mgc_copy_llog() does not support osd-based llogs yet
+ */
+
/* identical to mgs_log_is_empty */
static int mgc_llog_is_empty(struct obd_device *obd, struct llog_ctxt *ctxt,
- char *name)
+ char *name)
{
- struct lvfs_run_ctxt saved;
- struct llog_handle *llh;
- int rc = 0;
-
- push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
- rc = llog_create(ctxt, &llh, NULL, name);
- if (rc == 0) {
- llog_init_handle(llh, LLOG_F_IS_PLAIN, NULL);
- rc = llog_get_size(llh);
- llog_close(llh);
- }
- pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
- /* header is record 1 */
- return(rc <= 1);
-}
-
-static int mgc_copy_handler(struct llog_handle *llh, struct llog_rec_hdr *rec,
- void *data)
-{
- struct llog_rec_hdr local_rec = *rec;
- struct llog_handle *local_llh = (struct llog_handle *)data;
- char *cfg_buf = (char*) (rec + 1);
- struct lustre_cfg *lcfg;
- int rc = 0;
- ENTRY;
-
- /* Append all records */
- local_rec.lrh_len -= sizeof(*rec) + sizeof(struct llog_rec_tail);
- rc = llog_write_rec(local_llh, &local_rec, NULL, 0,
- (void *)cfg_buf, -1);
-
- lcfg = (struct lustre_cfg *)cfg_buf;
- CDEBUG(D_INFO, "idx=%d, rc=%d, len=%d, cmd %x %s %s\n",
- rec->lrh_index, rc, rec->lrh_len, lcfg->lcfg_command,
- lustre_cfg_string(lcfg, 0), lustre_cfg_string(lcfg, 1));
-
- RETURN(rc);
+ struct lvfs_run_ctxt saved;
+ struct llog_handle *llh;
+ int rc = 0;
+
+ push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
+ rc = llog_open(NULL, ctxt, &llh, NULL, name, LLOG_OPEN_EXISTS);
+ if (rc == 0) {
+ rc = llog_init_handle(NULL, llh, LLOG_F_IS_PLAIN, NULL);
+ if (rc == 0)
+ rc = llog_get_size(llh);
+ llog_close(NULL, llh);
+ } else if (rc == -ENOENT) {
+ rc = 0;
+ }
+ pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
+ /* header is record 1 */
+ return (rc <= 1);
}
/* Copy a remote log locally */
sprintf(temp_log, "%sT", logname);
/* Make sure there's no old temp log */
- rc = llog_create(lctxt, &local_llh, NULL, temp_log);
- if (rc)
- GOTO(out, rc);
- rc = llog_init_handle(local_llh, LLOG_F_IS_PLAIN, NULL);
- if (rc)
- GOTO(out, rc);
- rc = llog_destroy(local_llh);
- llog_free_handle(local_llh);
- if (rc)
- GOTO(out, rc);
-
- /* open local log */
- rc = llog_create(lctxt, &local_llh, NULL, temp_log);
- if (rc)
- GOTO(out, rc);
-
- /* set the log header uuid for fun */
- OBD_ALLOC_PTR(uuid);
- obd_str2uuid(uuid, logname);
- rc = llog_init_handle(local_llh, LLOG_F_IS_PLAIN, uuid);
- OBD_FREE_PTR(uuid);
- if (rc)
- GOTO(out_closel, rc);
-
- /* open remote log */
- rc = llog_create(rctxt, &remote_llh, NULL, logname);
- if (rc)
- GOTO(out_closel, rc);
- rc = llog_init_handle(remote_llh, LLOG_F_IS_PLAIN, NULL);
- if (rc)
- GOTO(out_closer, rc);
-
- /* Copy remote log */
- rc = llog_process(remote_llh, mgc_copy_handler,(void *)local_llh, NULL);
+ rc = llog_erase(NULL, lctxt, NULL, temp_log);
+ if (rc < 0 && rc != -ENOENT)
+ GOTO(out, rc);
+
+ /* open local log */
+ rc = llog_open_create(NULL, lctxt, &local_llh, NULL, temp_log);
+ if (rc)
+ GOTO(out, rc);
+
+ /* set the log header uuid for fun */
+ OBD_ALLOC_PTR(uuid);
+ obd_str2uuid(uuid, logname);
+ rc = llog_init_handle(NULL, local_llh, LLOG_F_IS_PLAIN, uuid);
+ OBD_FREE_PTR(uuid);
+ if (rc)
+ GOTO(out_closel, rc);
+
+ /* open remote log */
+ rc = llog_open(NULL, rctxt, &remote_llh, NULL, logname,
+ LLOG_OPEN_EXISTS);
+ if (rc < 0) {
+ if (rc == -ENOENT)
+ rc = 0;
+ GOTO(out_closel, rc);
+ }
+
+ rc = llog_init_handle(NULL, remote_llh, LLOG_F_IS_PLAIN, NULL);
+ if (rc)
+ GOTO(out_closer, rc);
+
+ /* Copy remote log */
+ rc = llog_process(NULL, remote_llh, llog_copy_handler,
+ (void *)local_llh, NULL);
out_closer:
- rc2 = llog_close(remote_llh);
- if (!rc)
- rc = rc2;
+ rc2 = llog_close(NULL, remote_llh);
+ if (!rc)
+ rc = rc2;
out_closel:
- rc2 = llog_close(local_llh);
+ rc2 = llog_close(NULL, local_llh);
if (!rc)
rc = rc2;
/* We've copied the remote log to the local temp log, now
replace the old local log with the temp log. */
- if (!rc) {
+ if (rc == 0) {
struct client_obd *cli = &obd->u.cli;
+
LASSERT(cli);
LASSERT(cli->cl_mgc_configs_dir);
rc = lustre_rename(cli->cl_mgc_configs_dir, cli->cl_mgc_vfsmnt,
OBD_FREE(temp_log, strlen(logname) + 1);
RETURN(rc);
}
+#endif
/* local_only means it cannot get remote llogs */
static int mgc_process_cfg_log(struct obd_device *mgc,
int local_only)
{
struct llog_ctxt *ctxt, *lctxt = NULL;
+#ifdef HAVE_LDISKFS_OSD
struct client_obd *cli = &mgc->u.cli;
+#endif
struct lvfs_run_ctxt *saved_ctxt;
struct lustre_sb_info *lsi = NULL;
int rc = 0, must_pop = 0;
ENTRY;
LASSERT(cld);
- LASSERT(cfs_mutex_is_locked(&cld->cld_lock));
+ LASSERT(mutex_is_locked(&cld->cld_lock));
/*
* local copy of sptlrpc log is controlled elsewhere, don't try to
lctxt = llog_get_context(mgc, LLOG_CONFIG_ORIG_CTXT);
+#ifdef HAVE_LDISKFS_OSD
+ /*
+ * XXX: at the moment mgc_copy_llog() works with lvfs-based llogs
+ */
/* Copy the setup log locally if we can. Don't mess around if we're
running an MGS though (logs are already local). */
- if (lctxt && lsi && (lsi->lsi_flags & LSI_SERVER) &&
+ if (lctxt && lsi && IS_SERVER(lsi) &&
(lsi->lsi_srv_mnt == cli->cl_mgc_vfsmnt) &&
- !IS_MGS(lsi->lsi_ldd)) {
+ !IS_MGS(lsi) && lsi->lsi_srv_mnt) {
push_ctxt(saved_ctxt, &mgc->obd_lvfs_ctxt, NULL);
must_pop++;
if (!local_only)
llog_ctxt_put(ctxt);
ctxt = lctxt;
lctxt = NULL;
- } else if (local_only) { /* no local log at client side */
+ } else
+#endif
+ if (local_only) { /* no local log at client side */
GOTO(out_pop, rc = -EIO);
}
/* logname and instance info should be the same, so use our
copy of the instance for the update. The cfg_last_idx will
be updated here. */
- rc = class_config_parse_llog(ctxt, cld->cld_logname, &cld->cld_cfg);
+ rc = class_config_parse_llog(NULL, ctxt, cld->cld_logname,
+ &cld->cld_cfg);
EXIT;
out_pop:
int mgc_process_log(struct obd_device *mgc, struct config_llog_data *cld)
{
struct lustre_handle lockh = { 0 };
- int rc = 0, rcl, flags = LDLM_FL_NO_LRU;
+ __u64 flags = LDLM_FL_NO_LRU;
+ int rc = 0, rcl;
ENTRY;
LASSERT(cld);
sounds like badness. It actually might be fine, as long as
we're not trying to update from the same log
simultaneously (in which case we should use a per-log sem.) */
- cfs_mutex_lock(&cld->cld_lock);
- if (cld->cld_stopping) {
- cfs_mutex_unlock(&cld->cld_lock);
+ mutex_lock(&cld->cld_lock);
+ if (cld->cld_stopping) {
+ mutex_unlock(&cld->cld_lock);
RETURN(0);
}
CDEBUG(D_MGC, "%s: configuration from log '%s' %sed (%d).\n",
mgc->obd_name, cld->cld_logname, rc ? "fail" : "succeed", rc);
- cfs_mutex_unlock(&cld->cld_lock);
+ mutex_unlock(&cld->cld_lock);
/* Now drop the lock so MGS can revoke it */
if (!rcl) {