* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*/
/*
" in log "LPX64"\n", hdr->lrh_index, rec->cur_hdr.lrh_index,
rec->cur_id, rec->cur_endrec, llh->lgh_id.lgl_oid);
- spin_lock(&mdd->mdd_cl.mc_user_lock);
+ cfs_spin_lock(&mdd->mdd_cl.mc_user_lock);
mdd->mdd_cl.mc_lastuser = rec->cur_id;
- spin_unlock(&mdd->mdd_cl.mc_user_lock);
+ cfs_spin_unlock(&mdd->mdd_cl.mc_user_lock);
RETURN(LLOG_PROC_BREAK);
}
int rc;
mdd->mdd_cl.mc_index = 0;
- spin_lock_init(&mdd->mdd_cl.mc_lock);
- cfs_waitq_init(&mdd->mdd_cl.mc_waitq);
+ cfs_spin_lock_init(&mdd->mdd_cl.mc_lock);
mdd->mdd_cl.mc_starttime = cfs_time_current_64();
mdd->mdd_cl.mc_flags = 0; /* off by default */
mdd->mdd_cl.mc_mask = CHANGELOG_DEFMASK;
- spin_lock_init(&mdd->mdd_cl.mc_user_lock);
+ cfs_spin_lock_init(&mdd->mdd_cl.mc_user_lock);
mdd->mdd_cl.mc_lastuser = 0;
rc = mdd_changelog_llog_init(mdd);
mdd2obd_dev(mdd)->obd_name);
rc = -ESRCH;
} else {
- spin_lock(&mdd->mdd_cl.mc_lock);
+ cfs_spin_lock(&mdd->mdd_cl.mc_lock);
mdd->mdd_cl.mc_flags |= CLM_ON;
- spin_unlock(&mdd->mdd_cl.mc_lock);
+ cfs_spin_unlock(&mdd->mdd_cl.mc_lock);
rc = mdd_changelog_write_header(mdd, CLM_START);
}
} else if ((on == 0) && ((mdd->mdd_cl.mc_flags & CLM_ON) == CLM_ON)) {
LCONSOLE_INFO("%s: changelog off\n",mdd2obd_dev(mdd)->obd_name);
rc = mdd_changelog_write_header(mdd, CLM_FINI);
- spin_lock(&mdd->mdd_cl.mc_lock);
+ cfs_spin_lock(&mdd->mdd_cl.mc_lock);
mdd->mdd_cl.mc_flags &= ~CLM_ON;
- spin_unlock(&mdd->mdd_cl.mc_lock);
+ cfs_spin_unlock(&mdd->mdd_cl.mc_lock);
}
return rc;
}
struct llog_ctxt *ctxt;
int rc;
- if ((mdd->mdd_cl.mc_mask & (1 << rec->cr.cr_type)) == 0)
- return 0;
-
rec->cr_hdr.lrh_len = llog_data_len(sizeof(*rec) + rec->cr.cr_namelen);
/* llog_lvfs_write_rec sets the llog tail len */
rec->cr_hdr.lrh_type = CHANGELOG_REC;
rec->cr.cr_time = cl_time();
- spin_lock(&mdd->mdd_cl.mc_lock);
+ cfs_spin_lock(&mdd->mdd_cl.mc_lock);
/* NB: I suppose it's possible llog_add adds out of order wrt cr_index,
but as long as the MDD transactions are ordered correctly for e.g.
rename conflicts, I don't think this should matter. */
rec->cr.cr_index = ++mdd->mdd_cl.mc_index;
- spin_unlock(&mdd->mdd_cl.mc_lock);
+ cfs_spin_unlock(&mdd->mdd_cl.mc_lock);
ctxt = llog_get_context(obd, LLOG_CHANGELOG_ORIG_CTXT);
if (ctxt == NULL)
return -ENXIO;
rc = llog_add(ctxt, &rec->cr_hdr, NULL, NULL, 0);
llog_ctxt_put(ctxt);
- cfs_waitq_signal(&mdd->mdd_cl.mc_waitq);
-
return rc;
}
if (ctxt == NULL)
return -ENXIO;
- spin_lock(&mdd->mdd_cl.mc_lock);
+ cfs_spin_lock(&mdd->mdd_cl.mc_lock);
cur = (long long)mdd->mdd_cl.mc_index;
- spin_unlock(&mdd->mdd_cl.mc_lock);
+ cfs_spin_unlock(&mdd->mdd_cl.mc_lock);
if (endrec > cur)
endrec = cur;
/* Status and action flags */
rec->cr.cr_markerflags = mdd->mdd_cl.mc_flags | markerflags;
- rc = mdd_changelog_llog_write(mdd, rec, NULL);
+ rc = (mdd->mdd_cl.mc_mask & (1 << CL_MARK)) ?
+ mdd_changelog_llog_write(mdd, rec, NULL) : 0;
/* assume on or off event; reset repeat-access time */
- mdd->mdd_cl.mc_starttime = rec->cr.cr_time;
+ mdd->mdd_cl.mc_starttime = cfs_time_current_64();
OBD_FREE(rec, reclen);
RETURN(rc);
return -ENOSYS;
}
+static int dot_file_lock(const struct lu_env *env, struct md_object *obj,
+ struct lov_mds_md *lmm, struct ldlm_extent *extent,
+ struct lustre_handle *lockh)
+{
+ return -ENOSYS;
+}
+
+static int dot_file_unlock(const struct lu_env *env, struct md_object *obj,
+ struct lov_mds_md *lmm, struct lustre_handle *lockh)
+{
+ return -ENOSYS;
+}
static struct md_object_operations mdd_dot_lustre_obj_ops = {
.moo_permission = dot_lustre_mdd_permission,
.moo_version_get = dot_lustre_mdd_version_get,
.moo_version_set = dot_lustre_mdd_version_set,
.moo_path = dot_lustre_mdd_path,
+ .moo_file_lock = dot_file_lock,
+ .moo_file_unlock = dot_file_unlock,
};
return 0;
if (ma->ma_need & MA_LOV_DEF) {
- rc = mdd_get_default_md(mdd_obj, ma->ma_lmm,
- &ma->ma_lmm_size);
+ rc = mdd_get_default_md(mdd_obj, ma->ma_lmm);
if (rc > 0) {
+ ma->ma_lmm_size = rc;
ma->ma_valid |= MA_LOV;
rc = 0;
}
ENTRY;
LASSERT(mds->mds_lov_objids != NULL);
- rc = obd_set_info_async(mds->mds_osc_exp, strlen(KEY_NEXT_ID),
+ rc = obd_set_info_async(mds->mds_lov_exp, strlen(KEY_NEXT_ID),
KEY_NEXT_ID, mds->mds_lov_desc.ld_tgt_count,
mds->mds_lov_objids, NULL);
}
#endif
/* Call that with obd_recovering = 1 just to update objids */
- obd_notify(obd->u.mds.mds_osc_obd, NULL, (obd->obd_async_recov ?
+ obd_notify(obd->u.mds.mds_lov_obd, NULL, (obd->obd_async_recov ?
OBD_NOTIFY_SYNC_NONBLOCK : OBD_NOTIFY_SYNC), NULL);
/* Drop obd_recovering to 0 and call o_postrecov to recover mds_lov */
+ cfs_spin_lock(&obd->obd_dev_lock);
obd->obd_recovering = 0;
+ cfs_spin_unlock(&obd->obd_dev_lock);
obd->obd_type->typ_dt_ops->o_postrecov(obd);
/* XXX: orphans handling. */
* No permission check is needed.
*/
static int mdd_statfs(const struct lu_env *env, struct md_device *m,
- struct kstatfs *sfs)
+ cfs_kstatfs_t *sfs)
{
struct mdd_device *mdd = lu2mdd_dev(&m->md_lu_dev);
int rc;
int rc;
ENTRY;
+ /* need barrier for mds_capa_keys access. */
+ cfs_down_write(&mds->mds_notify_lock);
mds->mds_capa_keys = keys;
+ cfs_up_write(&mds->mds_notify_lock);
+
rc = mdd_child_ops(mdd)->dt_init_capa_ctxt(env, mdd->mdd_child, mode,
timeout, alg, keys);
RETURN(rc);
{
struct mds_capa_info info = { .uuid = NULL, .capa = key };
struct mdd_device *mdd = lu2mdd_dev(&m->md_lu_dev);
- struct obd_export *lov_exp = mdd2obd_dev(mdd)->u.mds.mds_osc_exp;
+ struct obd_export *lov_exp = mdd2obd_dev(mdd)->u.mds.mds_lov_exp;
int rc;
ENTRY;
struct lu_device *next = &m->mdd_child->dd_lu_dev;
ENTRY;
- LASSERT(atomic_read(&lu->ld_ref) == 0);
+ LASSERT(cfs_atomic_read(&lu->ld_ref) == 0);
md_device_fini(&m->mdd_md_dev);
OBD_FREE_PTR(m);
RETURN(next);
rec->cur_hdr.lrh_len = sizeof(*rec);
rec->cur_hdr.lrh_type = CHANGELOG_USER_REC;
- spin_lock(&mdd->mdd_cl.mc_user_lock);
+ cfs_spin_lock(&mdd->mdd_cl.mc_user_lock);
if (mdd->mdd_cl.mc_lastuser == (unsigned int)(-1)) {
- spin_unlock(&mdd->mdd_cl.mc_user_lock);
+ cfs_spin_unlock(&mdd->mdd_cl.mc_user_lock);
CERROR("Maximum number of changelog users exceeded!\n");
GOTO(out, rc = -EOVERFLOW);
}
*id = rec->cur_id = ++mdd->mdd_cl.mc_lastuser;
rec->cur_endrec = mdd->mdd_cl.mc_index;
- spin_unlock(&mdd->mdd_cl.mc_user_lock);
+ cfs_spin_unlock(&mdd->mdd_cl.mc_user_lock);
rc = llog_add(ctxt, &rec->cur_hdr, NULL, NULL, 0);
int rc;
ENTRY;
- CDEBUG(D_IOCTL, "Purge request: id=%d, endrec="LPD64"\n", id, endrec);
+ CDEBUG(D_IOCTL, "Purge request: id=%d, endrec=%lld\n", id, endrec);
data.mcud_id = id;
data.mcud_minid = 0;
data.mcud_minrec = 0;
data.mcud_usercount = 0;
data.mcud_endrec = endrec;
- spin_lock(&mdd->mdd_cl.mc_lock);
+ cfs_spin_lock(&mdd->mdd_cl.mc_lock);
endrec = mdd->mdd_cl.mc_index;
- spin_unlock(&mdd->mdd_cl.mc_lock);
+ cfs_spin_unlock(&mdd->mdd_cl.mc_lock);
if ((data.mcud_endrec == 0) ||
((data.mcud_endrec > endrec) &&
(data.mcud_endrec != MCUD_UNREGISTER)))