* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, Whamcloud, Inc.
+ * Copyright (c) 2011, 2013, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
/* spinlock to make sure no item with 0 refcount in the list */
if (cfs_atomic_dec_and_lock(&cld->cld_refcount, &config_list_lock)) {
cfs_list_del(&cld->cld_list_chain);
- cfs_spin_unlock(&config_list_lock);
+ spin_unlock(&config_list_lock);
CDEBUG(D_MGC, "dropping config log %s\n", cld->cld_logname);
LASSERT(logname != NULL);
instance = cfg ? cfg->cfg_instance : NULL;
- cfs_spin_lock(&config_list_lock);
+ spin_lock(&config_list_lock);
cfs_list_for_each_entry(cld, &config_llog_list, cld_list_chain) {
/* check if instance equals */
if (instance != cld->cld_cfg.cfg_instance)
cfs_atomic_inc(&found->cld_refcount);
LASSERT(found->cld_stopping == 0 || cld_is_sptlrpc(found) == 0);
}
- cfs_spin_unlock(&config_list_lock);
- RETURN(found);
+ spin_unlock(&config_list_lock);
+ RETURN(found);
}
static
cld->cld_cfg = *cfg;
else
cld->cld_cfg.cfg_callback = class_config_llog_handler;
- cfs_mutex_init(&cld->cld_lock);
+ mutex_init(&cld->cld_lock);
cld->cld_cfg.cfg_last_idx = 0;
cld->cld_cfg.cfg_flags = 0;
cld->cld_cfg.cfg_sb = sb;
rc = mgc_logname2resid(logname, &cld->cld_resid, type);
- cfs_spin_lock(&config_list_lock);
- cfs_list_add(&cld->cld_list_chain, &config_llog_list);
- cfs_spin_unlock(&config_list_lock);
+ spin_lock(&config_list_lock);
+ cfs_list_add(&cld->cld_list_chain, &config_llog_list);
+ spin_unlock(&config_list_lock);
if (rc) {
config_log_put(cld);
if (cld_is_sptlrpc(cld)) {
rc = mgc_process_log(obd, cld);
- if (rc)
+ if (rc && rc != -ENOENT)
CERROR("failed processing sptlrpc log: %d\n", rc);
}
RETURN(0);
}
-CFS_DEFINE_MUTEX(llog_process_lock);
+DEFINE_MUTEX(llog_process_lock);
/** Stop watching for updates on this log.
*/
if (cld == NULL)
RETURN(-ENOENT);
- cfs_mutex_lock(&cld->cld_lock);
+ mutex_lock(&cld->cld_lock);
/*
* if cld_stopping is set, it means we didn't start the log thus
* not owning the start ref. this can happen after previous umount:
* calling start_log.
*/
if (unlikely(cld->cld_stopping)) {
- cfs_mutex_unlock(&cld->cld_lock);
+ mutex_unlock(&cld->cld_lock);
/* drop the ref from the find */
config_log_put(cld);
RETURN(rc);
cld_recover = cld->cld_recover;
cld->cld_recover = NULL;
- cfs_mutex_unlock(&cld->cld_lock);
+ mutex_unlock(&cld->cld_lock);
- if (cld_recover) {
- cfs_mutex_lock(&cld_recover->cld_lock);
- cld_recover->cld_stopping = 1;
- cfs_mutex_unlock(&cld_recover->cld_lock);
- config_log_put(cld_recover);
- }
+ if (cld_recover) {
+ mutex_lock(&cld_recover->cld_lock);
+ cld_recover->cld_stopping = 1;
+ mutex_unlock(&cld_recover->cld_lock);
+ config_log_put(cld_recover);
+ }
- cfs_spin_lock(&config_list_lock);
- cld_sptlrpc = cld->cld_sptlrpc;
- cld->cld_sptlrpc = NULL;
- cfs_spin_unlock(&config_list_lock);
+ spin_lock(&config_list_lock);
+ cld_sptlrpc = cld->cld_sptlrpc;
+ cld->cld_sptlrpc = NULL;
+ spin_unlock(&config_list_lock);
if (cld_sptlrpc)
config_log_put(cld_sptlrpc);
OCD_HAS_FLAG(ocd, IMP_RECOV) ? "ENABLED" : "DISABLED");
rc += snprintf(page + rc, count - rc, "client_state:\n");
- cfs_spin_lock(&config_list_lock);
- cfs_list_for_each_entry(cld, &config_llog_list, cld_list_chain) {
- if (cld->cld_recover == NULL)
- continue;
- rc += snprintf(page + rc, count - rc,
- " - { client: %s, nidtbl_version: %u }\n",
- cld->cld_logname,
- cld->cld_recover->cld_cfg.cfg_last_idx);
- }
- cfs_spin_unlock(&config_list_lock);
+ spin_lock(&config_list_lock);
+ cfs_list_for_each_entry(cld, &config_llog_list, cld_list_chain) {
+ if (cld->cld_recover == NULL)
+ continue;
+ rc += snprintf(page + rc, count - rc,
+ " - { client: %s, nidtbl_version: %u }\n",
+ cld->cld_logname,
+ cld->cld_recover->cld_cfg.cfg_last_idx);
+ }
+ spin_unlock(&config_list_lock);
- RETURN(rc);
+ RETURN(rc);
}
/* reenqueue any lost locks */
#define RQ_STOP 0x8
static int rq_state = 0;
static cfs_waitq_t rq_waitq;
-static CFS_DECLARE_COMPLETION(rq_exit);
+static DECLARE_COMPLETION(rq_exit);
static void do_requeue(struct config_llog_data *cld)
{
/* Do not run mgc_process_log on a disconnected export or an
export which is being disconnected. Take the client
semaphore to make the check non-racy. */
- cfs_down_read(&cld->cld_mgcexp->exp_obd->u.cli.cl_sem);
+ down_read(&cld->cld_mgcexp->exp_obd->u.cli.cl_sem);
if (cld->cld_mgcexp->exp_obd->u.cli.cl_conn_count != 0) {
CDEBUG(D_MGC, "updating log %s\n", cld->cld_logname);
mgc_process_log(cld->cld_mgcexp->exp_obd, cld);
CDEBUG(D_MGC, "disconnecting, won't update log %s\n",
cld->cld_logname);
}
- cfs_up_read(&cld->cld_mgcexp->exp_obd->u.cli.cl_sem);
+ up_read(&cld->cld_mgcexp->exp_obd->u.cli.cl_sem);
EXIT;
}
static int mgc_requeue_thread(void *data)
{
- char name[] = "ll_cfg_requeue";
int rc = 0;
ENTRY;
- cfs_daemonize(name);
-
CDEBUG(D_MGC, "Starting requeue thread\n");
/* Keep trying failed locks periodically */
- cfs_spin_lock(&config_list_lock);
- rq_state |= RQ_RUNNING;
- while (1) {
- struct l_wait_info lwi;
- struct config_llog_data *cld, *cld_prev;
- int rand = cfs_rand() & MGC_TIMEOUT_RAND_CENTISEC;
- int stopped = !!(rq_state & RQ_STOP);
- int to;
-
- /* Any new or requeued lostlocks will change the state */
- rq_state &= ~(RQ_NOW | RQ_LATER);
- cfs_spin_unlock(&config_list_lock);
+ spin_lock(&config_list_lock);
+ rq_state |= RQ_RUNNING;
+ while (1) {
+ struct l_wait_info lwi;
+ struct config_llog_data *cld, *cld_prev;
+ int rand = cfs_rand() & MGC_TIMEOUT_RAND_CENTISEC;
+ int stopped = !!(rq_state & RQ_STOP);
+ int to;
+
+ /* Any new or requeued lostlocks will change the state */
+ rq_state &= ~(RQ_NOW | RQ_LATER);
+ spin_unlock(&config_list_lock);
/* Always wait a few seconds to allow the server who
caused the lock revocation to finish its setup, plus some
*/
cld_prev = NULL;
- cfs_spin_lock(&config_list_lock);
- cfs_list_for_each_entry(cld, &config_llog_list,
- cld_list_chain) {
- if (!cld->cld_lostlock)
- continue;
+ spin_lock(&config_list_lock);
+ cfs_list_for_each_entry(cld, &config_llog_list,
+ cld_list_chain) {
+ if (!cld->cld_lostlock)
+ continue;
- cfs_spin_unlock(&config_list_lock);
+ spin_unlock(&config_list_lock);
LASSERT(cfs_atomic_read(&cld->cld_refcount) > 0);
if (likely(!stopped))
do_requeue(cld);
- cfs_spin_lock(&config_list_lock);
- }
- cfs_spin_unlock(&config_list_lock);
- if (cld_prev)
- config_log_put(cld_prev);
-
- /* break after scanning the list so that we can drop
- * refcount to losing lock clds */
- if (unlikely(stopped)) {
- cfs_spin_lock(&config_list_lock);
- break;
- }
+ spin_lock(&config_list_lock);
+ }
+ spin_unlock(&config_list_lock);
+ if (cld_prev)
+ config_log_put(cld_prev);
+
+ /* break after scanning the list so that we can drop
+ * refcount to losing lock clds */
+ if (unlikely(stopped)) {
+ spin_lock(&config_list_lock);
+ break;
+ }
- /* Wait a bit to see if anyone else needs a requeue */
- lwi = (struct l_wait_info) { 0 };
- l_wait_event(rq_waitq, rq_state & (RQ_NOW | RQ_STOP),
- &lwi);
- cfs_spin_lock(&config_list_lock);
- }
- /* spinlock and while guarantee RQ_NOW and RQ_LATER are not set */
- rq_state &= ~RQ_RUNNING;
- cfs_spin_unlock(&config_list_lock);
+ /* Wait a bit to see if anyone else needs a requeue */
+ lwi = (struct l_wait_info) { 0 };
+ l_wait_event(rq_waitq, rq_state & (RQ_NOW | RQ_STOP),
+ &lwi);
+ spin_lock(&config_list_lock);
+ }
+ /* spinlock and while guarantee RQ_NOW and RQ_LATER are not set */
+ rq_state &= ~RQ_RUNNING;
+ spin_unlock(&config_list_lock);
- cfs_complete(&rq_exit);
+ complete(&rq_exit);
- CDEBUG(D_MGC, "Ending requeue thread\n");
- RETURN(rc);
+ CDEBUG(D_MGC, "Ending requeue thread\n");
+ RETURN(rc);
}
/* Add a cld to the list to requeue. Start the requeue thread if needed.
cld->cld_stopping, rq_state);
LASSERT(cfs_atomic_read(&cld->cld_refcount) > 0);
- cfs_mutex_lock(&cld->cld_lock);
- if (cld->cld_stopping || cld->cld_lostlock) {
- cfs_mutex_unlock(&cld->cld_lock);
- RETURN_EXIT;
- }
- /* this refcount will be released in mgc_requeue_thread. */
- config_log_get(cld);
- cld->cld_lostlock = 1;
- cfs_mutex_unlock(&cld->cld_lock);
-
- /* Hold lock for rq_state */
- cfs_spin_lock(&config_list_lock);
- if (rq_state & RQ_STOP) {
- cfs_spin_unlock(&config_list_lock);
- cld->cld_lostlock = 0;
- config_log_put(cld);
- } else {
- rq_state |= RQ_NOW;
- cfs_spin_unlock(&config_list_lock);
- cfs_waitq_signal(&rq_waitq);
- }
- EXIT;
+ mutex_lock(&cld->cld_lock);
+ if (cld->cld_stopping || cld->cld_lostlock) {
+ mutex_unlock(&cld->cld_lock);
+ RETURN_EXIT;
+ }
+ /* this refcount will be released in mgc_requeue_thread. */
+ config_log_get(cld);
+ cld->cld_lostlock = 1;
+ mutex_unlock(&cld->cld_lock);
+
+ /* Hold lock for rq_state */
+ spin_lock(&config_list_lock);
+ if (rq_state & RQ_STOP) {
+ spin_unlock(&config_list_lock);
+ cld->cld_lostlock = 0;
+ config_log_put(cld);
+ } else {
+ rq_state |= RQ_NOW;
+ spin_unlock(&config_list_lock);
+ cfs_waitq_signal(&rq_waitq);
+ }
+ EXIT;
}
/********************** class fns **********************/
LASSERT(lsi->lsi_srv_mnt == mnt);
/* The mgc fs exclusion sem. Only one fs can be setup at a time. */
- cfs_down(&cli->cl_mgc_sem);
+ down(&cli->cl_mgc_sem);
cfs_cleanup_group_info();
obd->obd_fsops = fsfilt_get_ops(lsi->lsi_fstype);
if (IS_ERR(obd->obd_fsops)) {
- cfs_up(&cli->cl_mgc_sem);
- CERROR("No fstype %s rc=%ld\n", lsi->lsi_fstype,
- PTR_ERR(obd->obd_fsops));
+ up(&cli->cl_mgc_sem);
+ CERROR("%s: No fstype %s: rc = %ld\n", lsi->lsi_fstype,
+ obd->obd_name, PTR_ERR(obd->obd_fsops));
RETURN(PTR_ERR(obd->obd_fsops));
}
fsfilt_put_ops(obd->obd_fsops);
obd->obd_fsops = NULL;
cli->cl_mgc_vfsmnt = NULL;
- cfs_up(&cli->cl_mgc_sem);
+ up(&cli->cl_mgc_sem);
RETURN(err);
}
if (obd->obd_fsops)
fsfilt_put_ops(obd->obd_fsops);
- cfs_up(&cli->cl_mgc_sem);
+ up(&cli->cl_mgc_sem);
RETURN(rc);
}
if (cfs_atomic_dec_and_test(&mgc_count)) {
int running;
/* stop requeue thread */
- cfs_spin_lock(&config_list_lock);
- running = rq_state & RQ_RUNNING;
- if (running)
- rq_state |= RQ_STOP;
- cfs_spin_unlock(&config_list_lock);
- if (running) {
- cfs_waitq_signal(&rq_waitq);
- cfs_wait_for_completion(&rq_exit);
+ spin_lock(&config_list_lock);
+ running = rq_state & RQ_RUNNING;
+ if (running)
+ rq_state |= RQ_STOP;
+ spin_unlock(&config_list_lock);
+ if (running) {
+ cfs_waitq_signal(&rq_waitq);
+ wait_for_completion(&rq_exit);
}
}
obd_cleanup_client_import(obd);
sptlrpc_lprocfs_cliobd_attach(obd);
if (cfs_atomic_inc_return(&mgc_count) == 1) {
- rq_state = 0;
- cfs_waitq_init(&rq_waitq);
-
- /* start requeue thread */
- rc = cfs_create_thread(mgc_requeue_thread, NULL,
- CFS_DAEMON_FLAGS);
- if (rc < 0) {
- CERROR("%s: Cannot start requeue thread (%d),"
- "no more log updates!\n",
- obd->obd_name, rc);
- GOTO(err_cleanup, rc);
- }
- /* rc is the pid of mgc_requeue_thread. */
- rc = 0;
+ rq_state = 0;
+ cfs_waitq_init(&rq_waitq);
+
+ /* start requeue thread */
+ rc = PTR_ERR(kthread_run(mgc_requeue_thread, NULL,
+ "ll_cfg_requeue"));
+ if (IS_ERR_VALUE(rc)) {
+ CERROR("%s: Cannot start requeue thread (%d),"
+ "no more log updates!\n",
+ obd->obd_name, rc);
+ GOTO(err_cleanup, rc);
+ }
+ /* rc is the task_struct pointer of mgc_requeue_thread. */
+ rc = 0;
}
RETURN(rc);
/* mgs wants the lock, give it up... */
LDLM_DEBUG(lock, "MGC blocking CB");
ldlm_lock2handle(lock, &lockh);
- rc = ldlm_cli_cancel(&lockh);
- break;
- case LDLM_CB_CANCELING:
- /* We've given up the lock, prepare ourselves to update. */
- LDLM_DEBUG(lock, "MGC cancel CB");
+ rc = ldlm_cli_cancel(&lockh, LCF_ASYNC);
+ break;
+ case LDLM_CB_CANCELING:
+ /* We've given up the lock, prepare ourselves to update. */
+ LDLM_DEBUG(lock, "MGC cancel CB");
- CDEBUG(D_MGC, "Lock res "LPX64" (%.8s)\n",
- lock->l_resource->lr_name.name[0],
- (char *)&lock->l_resource->lr_name.name[0]);
+ CDEBUG(D_MGC, "Lock res "DLDLMRES" (%.8s)\n",
+ PLDLMRES(lock->l_resource),
+ (char *)&lock->l_resource->lr_name.name[0]);
if (!cld) {
CDEBUG(D_INFO, "missing data, won't requeue\n");
void *data, __u32 lvb_len, void *lvb_swabber,
struct lustre_handle *lockh)
{
- struct config_llog_data *cld = (struct config_llog_data *)data;
- struct ldlm_enqueue_info einfo = { type, mode, mgc_blocking_ast,
- ldlm_completion_ast, NULL, NULL, NULL };
- struct ptlrpc_request *req;
- int short_limit = cld_is_sptlrpc(cld);
- int rc;
- ENTRY;
+ struct config_llog_data *cld = (struct config_llog_data *)data;
+ struct ldlm_enqueue_info einfo = {
+ .ei_type = type,
+ .ei_mode = mode,
+ .ei_cb_bl = mgc_blocking_ast,
+ .ei_cb_cp = ldlm_completion_ast,
+ };
+ struct ptlrpc_request *req;
+ int short_limit = cld_is_sptlrpc(cld);
+ int rc;
+ ENTRY;
CDEBUG(D_MGC, "Enqueue for %s (res "LPX64")\n", cld->cld_logname,
cld->cld_resid.name[0]);
LDLM_ENQUEUE);
if (req == NULL)
RETURN(-ENOMEM);
+
+ req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER, 0);
ptlrpc_request_set_replen(req);
/* check if this is server or client */
/* Limit how long we will wait for the enqueue to complete */
req->rq_delay_limit = short_limit ? 5 : MGC_ENQUEUE_LIMIT;
rc = ldlm_cli_enqueue(exp, &req, &einfo, &cld->cld_resid, NULL, flags,
- NULL, 0, lockh, 0);
+ NULL, 0, LVB_T_NONE, lockh, 0);
/* A failed enqueue should still call the mgc_blocking_ast,
where it will be requeued if needed ("grant failed"). */
ptlrpc_req_finished(req);
static void mgc_notify_active(struct obd_device *unused)
{
- /* wakeup mgc_requeue_thread to requeue mgc lock */
- cfs_spin_lock(&config_list_lock);
- rq_state |= RQ_NOW;
- cfs_spin_unlock(&config_list_lock);
- cfs_waitq_signal(&rq_waitq);
+ /* wakeup mgc_requeue_thread to requeue mgc lock */
+ spin_lock(&config_list_lock);
+ rq_state |= RQ_NOW;
+ spin_unlock(&config_list_lock);
+ cfs_waitq_signal(&rq_waitq);
- /* TODO: Help the MGS rebuild nidtbl. -jay */
+ /* TODO: Help the MGS rebuild nidtbl. -jay */
}
/* Send target_reg message to MGS */
switch (event) {
case IMP_EVENT_DISCON:
/* MGC imports should not wait for recovery */
+ if (OCD_HAS_FLAG(&imp->imp_connect_data, IMP_RECOV))
+ ptlrpc_pinger_ir_down();
break;
case IMP_EVENT_INACTIVE:
break;
ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
break;
}
- case IMP_EVENT_ACTIVE:
- LCONSOLE_WARN("%s: Reactivating import\n", obd->obd_name);
- /* Clearing obd_no_recov allows us to continue pinging */
- obd->obd_no_recov = 0;
- mgc_notify_active(obd);
- break;
+ case IMP_EVENT_ACTIVE:
+ CDEBUG(D_INFO, "%s: Reactivating import\n", obd->obd_name);
+ /* Clearing obd_no_recov allows us to continue pinging */
+ obd->obd_no_recov = 0;
+ mgc_notify_active(obd);
+ if (OCD_HAS_FLAG(&imp->imp_connect_data, IMP_RECOV))
+ ptlrpc_pinger_ir_up();
+ break;
case IMP_EVENT_OCD:
break;
case IMP_EVENT_DEACTIVATE:
}
enum {
- CONFIG_READ_NRPAGES_INIT = 1 << (20 - CFS_PAGE_SHIFT),
+ CONFIG_READ_NRPAGES_INIT = 1 << (20 - PAGE_CACHE_SHIFT),
CONFIG_READ_NRPAGES = 4
};
LASSERT(cfg->cfg_instance != NULL);
LASSERT(cfg->cfg_sb == cfg->cfg_instance);
- OBD_ALLOC(inst, CFS_PAGE_SIZE);
- if (inst == NULL)
- RETURN(-ENOMEM);
+ OBD_ALLOC(inst, PAGE_CACHE_SIZE);
+ if (inst == NULL)
+ RETURN(-ENOMEM);
if (!IS_SERVER(lsi)) {
- pos = sprintf(inst, "%p", cfg->cfg_instance);
+ pos = snprintf(inst, PAGE_CACHE_SIZE, "%p", cfg->cfg_instance);
+ if (pos >= PAGE_CACHE_SIZE) {
+ OBD_FREE(inst, PAGE_CACHE_SIZE);
+ return -E2BIG;
+ }
} else {
LASSERT(IS_MDT(lsi));
- rc = server_name2svname(lsi->lsi_svname, inst, NULL);
- if (rc)
+ rc = server_name2svname(lsi->lsi_svname, inst, NULL,
+ PAGE_CACHE_SIZE);
+ if (rc) {
+ OBD_FREE(inst, PAGE_CACHE_SIZE);
RETURN(-EINVAL);
+ }
pos = strlen(inst);
}
++pos;
buf = inst + pos;
- bufsz = CFS_PAGE_SIZE - pos;
+ bufsz = PAGE_CACHE_SIZE - pos;
while (datalen > 0) {
int entry_len = sizeof(*entry);
/* Keep this swab for normal mixed endian handling. LU-1644 */
if (mne_swab)
lustre_swab_mgs_nidtbl_entry(entry);
- if (entry->mne_length > CFS_PAGE_SIZE) {
+ if (entry->mne_length > PAGE_CACHE_SIZE) {
CERROR("MNE too large (%u)\n", entry->mne_length);
break;
}
pos += sprintf(params, "%s.import=%s", cname, "connection=");
uuid = buf + pos;
+ down_read(&obd->u.cli.cl_sem);
+ if (obd->u.cli.cl_import == NULL) {
+ /* client does not connect to the OST yet */
+ up_read(&obd->u.cli.cl_sem);
+ rc = 0;
+ continue;
+ }
+
/* TODO: iterate all nids to find one */
/* find uuid by nid */
rc = client_import_find_conn(obd->u.cli.cl_import,
entry->u.nids[0],
(struct obd_uuid *)uuid);
+ up_read(&obd->u.cli.cl_sem);
if (rc < 0) {
CERROR("mgc: cannot find uuid by nid %s\n",
libcfs_nid2str(entry->u.nids[0]));
/* continue, even one with error */
}
- OBD_FREE(inst, CFS_PAGE_SIZE);
+ OBD_FREE(inst, PAGE_CACHE_SIZE);
RETURN(rc);
}
struct mgs_config_body *body;
struct mgs_config_res *res;
struct ptlrpc_bulk_desc *desc;
- cfs_page_t **pages;
+ struct page **pages;
int nrpages;
bool eof = true;
bool mne_swab = false;
GOTO(out, rc = -ENOMEM);
for (i = 0; i < nrpages; i++) {
- pages[i] = cfs_alloc_page(CFS_ALLOC_STD);
+ pages[i] = alloc_page(GFP_IOFS);
if (pages[i] == NULL)
GOTO(out, rc = -ENOMEM);
}
again:
LASSERT(cld_is_recover(cld));
- LASSERT(cfs_mutex_is_locked(&cld->cld_lock));
+ LASSERT(mutex_is_locked(&cld->cld_lock));
req = ptlrpc_request_alloc(class_exp2cliimp(cld->cld_mgcexp),
&RQF_MGS_CONFIG_READ);
if (req == NULL)
body = req_capsule_client_get(&req->rq_pill, &RMF_MGS_CONFIG_BODY);
LASSERT(body != NULL);
LASSERT(sizeof(body->mcb_name) > strlen(cld->cld_logname));
- strncpy(body->mcb_name, cld->cld_logname, sizeof(body->mcb_name));
+ if (strlcpy(body->mcb_name, cld->cld_logname, sizeof(body->mcb_name))
+ >= sizeof(body->mcb_name))
+ GOTO(out, rc = -E2BIG);
body->mcb_offset = cfg->cfg_last_idx + 1;
body->mcb_type = cld->cld_type;
- body->mcb_bits = CFS_PAGE_SHIFT;
+ body->mcb_bits = PAGE_CACHE_SHIFT;
body->mcb_units = nrpages;
- /* allocate bulk transfer descriptor */
- desc = ptlrpc_prep_bulk_imp(req, nrpages, BULK_PUT_SINK,
- MGS_BULK_PORTAL);
- if (desc == NULL)
- GOTO(out, rc = -ENOMEM);
+ /* allocate bulk transfer descriptor */
+ desc = ptlrpc_prep_bulk_imp(req, nrpages, 1, BULK_PUT_SINK,
+ MGS_BULK_PORTAL);
+ if (desc == NULL)
+ GOTO(out, rc = -ENOMEM);
- for (i = 0; i < nrpages; i++)
- ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, CFS_PAGE_SIZE);
+ for (i = 0; i < nrpages; i++)
+ ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_CACHE_SIZE);
ptlrpc_request_set_replen(req);
rc = ptlrpc_queue_wait(req);
if (ealen < 0)
GOTO(out, rc = ealen);
- if (ealen > nrpages << CFS_PAGE_SHIFT)
+ if (ealen > nrpages << PAGE_CACHE_SHIFT)
GOTO(out, rc = -EINVAL);
if (ealen == 0) { /* no logs transferred */
int rc2;
void *ptr;
- ptr = cfs_kmap(pages[i]);
- rc2 = mgc_apply_recover_logs(obd, cld, res->mcr_offset, ptr,
- min_t(int, ealen, CFS_PAGE_SIZE),
+ ptr = kmap(pages[i]);
+ rc2 = mgc_apply_recover_logs(obd, cld, res->mcr_offset, ptr,
+ min_t(int, ealen, PAGE_CACHE_SIZE),
mne_swab);
- cfs_kunmap(pages[i]);
- if (rc2 < 0) {
- CWARN("Process recover log %s error %d\n",
- cld->cld_logname, rc2);
- break;
+ kunmap(pages[i]);
+ if (rc2 < 0) {
+ CWARN("Process recover log %s error %d\n",
+ cld->cld_logname, rc2);
+ break;
}
- ealen -= CFS_PAGE_SIZE;
+ ealen -= PAGE_CACHE_SIZE;
}
out:
if (rc == 0 && !eof)
goto again;
- if (pages) {
- for (i = 0; i < nrpages; i++) {
- if (pages[i] == NULL)
- break;
- cfs_free_page(pages[i]);
- }
- OBD_FREE(pages, sizeof(*pages) * nrpages);
- }
- return rc;
+ if (pages) {
+ for (i = 0; i < nrpages; i++) {
+ if (pages[i] == NULL)
+ break;
+ __free_page(pages[i]);
+ }
+ OBD_FREE(pages, sizeof(*pages) * nrpages);
+ }
+ return rc;
}
#ifdef HAVE_LDISKFS_OSD
push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
rc = llog_open(NULL, ctxt, &llh, NULL, name, LLOG_OPEN_EXISTS);
if (rc == 0) {
- llog_init_handle(NULL, llh, LLOG_F_IS_PLAIN, NULL);
- rc = llog_get_size(llh);
+ rc = llog_init_handle(NULL, llh, LLOG_F_IS_PLAIN, NULL);
+ if (rc == 0)
+ rc = llog_get_size(llh);
llog_close(NULL, llh);
} else if (rc == -ENOENT) {
rc = 0;
return (rc <= 1);
}
-static int mgc_copy_handler(const struct lu_env *env, struct llog_handle *llh,
- struct llog_rec_hdr *rec, void *data)
-{
- struct llog_rec_hdr local_rec = *rec;
- struct llog_handle *local_llh = (struct llog_handle *)data;
- char *cfg_buf = (char*) (rec + 1);
- struct lustre_cfg *lcfg;
- int rc = 0;
- ENTRY;
-
- /* Append all records */
- local_rec.lrh_len -= sizeof(*rec) + sizeof(struct llog_rec_tail);
- rc = llog_write(env, local_llh, &local_rec, NULL, 0,
- (void *)cfg_buf, -1);
-
- lcfg = (struct lustre_cfg *)cfg_buf;
- CDEBUG(D_INFO, "idx=%d, rc=%d, len=%d, cmd %x %s %s\n",
- rec->lrh_index, rc, rec->lrh_len, lcfg->lcfg_command,
- lustre_cfg_string(lcfg, 0), lustre_cfg_string(lcfg, 1));
-
- RETURN(rc);
-}
-
/* Copy a remote log locally */
static int mgc_copy_llog(struct obd_device *obd, struct llog_ctxt *rctxt,
struct llog_ctxt *lctxt, char *logname)
GOTO(out_closer, rc);
/* Copy remote log */
- rc = llog_process(NULL, remote_llh, mgc_copy_handler,
+ rc = llog_process(NULL, remote_llh, llog_copy_handler,
(void *)local_llh, NULL);
out_closer:
ENTRY;
LASSERT(cld);
- LASSERT(cfs_mutex_is_locked(&cld->cld_lock));
+ LASSERT(mutex_is_locked(&cld->cld_lock));
/*
* local copy of sptlrpc log is controlled elsewhere, don't try to
sounds like badness. It actually might be fine, as long as
we're not trying to update from the same log
simultaneously (in which case we should use a per-log sem.) */
- cfs_mutex_lock(&cld->cld_lock);
- if (cld->cld_stopping) {
- cfs_mutex_unlock(&cld->cld_lock);
+ mutex_lock(&cld->cld_lock);
+ if (cld->cld_stopping) {
+ mutex_unlock(&cld->cld_lock);
RETURN(0);
}
CDEBUG(D_MGC, "%s: configuration from log '%s' %sed (%d).\n",
mgc->obd_name, cld->cld_logname, rc ? "fail" : "succeed", rc);
- cfs_mutex_unlock(&cld->cld_lock);
+ mutex_unlock(&cld->cld_lock);
/* Now drop the lock so MGS can revoke it */
if (!rcl) {