/** Client index in last_rcvd file */
int ted_lr_idx;
- /** nodemap this export is a member of */
+ /**
+ * ted_nodemap_lock is used to ensure that the nodemap is not destroyed
+ * between the time that ted_nodemap is checked for NULL, and a
+ * reference is taken. Modifications to ted_nodemap require that the
+ * active_config_lock and the nodemap(s)'s nm_member_list_lock be
+ * taken, as well as ted_nodemap_lock, so the export can be properly
+ * added to or removed from the nodemap's member list. When an export
+ * is added to a nodemap, a reference on that nodemap must be taken.
+ * That reference can be put only after ted_nodemap no longer refers to
+ * it.
+ */
+ spinlock_t ted_nodemap_lock;
struct lu_nodemap *ted_nodemap;
struct list_head ted_nodemap_member;
struct dt_object *obj);
void nm_config_file_deregister(const struct lu_env *env,
struct nm_config_file *ncf);
+struct lu_nodemap *nodemap_get_from_exp(struct obd_export *exp);
+void nodemap_putref(struct lu_nodemap *nodemap);
#endif /* _LUSTRE_NODEMAP_H */
void mdt_pack_attr2body(struct mdt_thread_info *info, struct mdt_body *b,
const struct lu_attr *attr, const struct lu_fid *fid)
{
- struct md_attr *ma = &info->mti_attr;
- struct obd_export *exp = info->mti_exp;
- struct lu_nodemap *nodemap = exp->exp_target_data.ted_nodemap;
+ struct md_attr *ma = &info->mti_attr;
+ struct obd_export *exp = info->mti_exp;
+ struct lu_nodemap *nodemap = NULL;
LASSERT(ma->ma_valid & MA_INODE);
b->mbo_nlink = attr->la_nlink;
b->mbo_valid |= OBD_MD_FLNLINK;
}
+ if (attr->la_valid & (LA_UID|LA_GID)) {
+ nodemap = nodemap_get_from_exp(exp);
+ if (IS_ERR(nodemap))
+ goto out;
+ }
if (attr->la_valid & LA_UID) {
b->mbo_uid = nodemap_map_id(nodemap, NODEMAP_UID,
NODEMAP_FS_TO_CLIENT,
attr->la_gid);
b->mbo_valid |= OBD_MD_FLGID;
}
+
b->mbo_mode = attr->la_mode;
if (attr->la_valid & LA_MODE)
b->mbo_valid |= OBD_MD_FLMODE;
if (fid != NULL && (b->mbo_valid & OBD_MD_FLSIZE))
CDEBUG(D_VFSTRACE, DFID": returning size %llu\n",
PFID(fid), (unsigned long long)b->mbo_size);
+
+out:
+ if (!IS_ERR_OR_NULL(nodemap))
+ nodemap_putref(nodemap);
}
static inline int mdt_body_has_lov(const struct lu_attr *la,
struct mdt_body *repbody;
struct lu_buf *buffer = &info->mti_buf;
struct obd_export *exp = info->mti_exp;
- struct lu_nodemap *nodemap = exp->exp_target_data.ted_nodemap;
int rc;
int is_root;
ENTRY;
}
#ifdef CONFIG_FS_POSIX_ACL
else if ((exp_connect_flags(req->rq_export) & OBD_CONNECT_ACL) &&
- (reqbody->mbo_valid & OBD_MD_FLACL))
+ (reqbody->mbo_valid & OBD_MD_FLACL)) {
+ struct lu_nodemap *nodemap = nodemap_get_from_exp(exp);
+ if (IS_ERR(nodemap))
+ RETURN(PTR_ERR(nodemap));
+
rc = mdt_pack_acl2body(info, repbody, o, nodemap);
+ nodemap_putref(nodemap);
+ }
#endif
out:
int id, rc;
struct mdt_device *mdt = mdt_exp2dev(exp);
struct lu_device *qmt = mdt->mdt_qmt_dev;
- struct lu_nodemap *nodemap = exp->exp_target_data.ted_nodemap;
+ struct lu_nodemap *nodemap;
ENTRY;
oqctl = req_capsule_client_get(pill, &RMF_OBD_QUOTACTL);
if (rc)
RETURN(err_serious(rc));
+ nodemap = nodemap_get_from_exp(exp);
+ if (IS_ERR(nodemap))
+ RETURN(PTR_ERR(nodemap));
+
switch (oqctl->qc_cmd) {
/* master quotactl */
case Q_SETINFO:
case Q_SETQUOTA:
if (!nodemap_can_setquota(nodemap))
- RETURN(-EPERM);
+ GOTO(out_nodemap, rc = -EPERM);
case Q_GETINFO:
case Q_GETQUOTA:
if (qmt == NULL)
- RETURN(-EOPNOTSUPP);
+ GOTO(out_nodemap, rc = -EOPNOTSUPP);
/* slave quotactl */
case Q_GETOINFO:
case Q_GETOQUOTA:
break;
default:
CERROR("Unsupported quotactl command: %d\n", oqctl->qc_cmd);
- RETURN(-EFAULT);
+ GOTO(out_nodemap, rc = -EFAULT);
}
/* map uid/gid for remote client */
if (unlikely(oqctl->qc_cmd != Q_GETQUOTA &&
oqctl->qc_cmd != Q_GETINFO))
- RETURN(-EPERM);
+ GOTO(out_nodemap, rc = -EPERM);
if (oqctl->qc_type == USRQUOTA)
id = lustre_idmap_lookup_uid(NULL, idmap, 0,
id = lustre_idmap_lookup_gid(NULL, idmap, 0,
oqctl->qc_id);
else
- RETURN(-EINVAL);
+ GOTO(out_nodemap, rc = -EINVAL);
if (id == CFS_IDMAP_NOTFOUND) {
CDEBUG(D_QUOTA, "no mapping for id %u\n", oqctl->qc_id);
- RETURN(-EACCES);
+ GOTO(out_nodemap, rc = -EACCES);
}
}
repoqc = req_capsule_server_get(pill, &RMF_OBD_QUOTACTL);
if (repoqc == NULL)
- RETURN(err_serious(-EFAULT));
+ GOTO(out_nodemap, rc = err_serious(-EFAULT));
if (oqctl->qc_id != id)
swap(oqctl->qc_id, id);
default:
CERROR("Unsupported quotactl command: %d\n", oqctl->qc_cmd);
- RETURN(-EFAULT);
+ GOTO(out_nodemap, rc = -EFAULT);
}
if (oqctl->qc_id != id)
swap(oqctl->qc_id, id);
*repoqc = *oqctl;
- RETURN(rc);
+
+ EXIT;
+
+out_nodemap:
+ nodemap_putref(nodemap);
+
+ return rc;
}
/** clone llog ctxt from child (mdd)
}
static int old_init_ucred_common(struct mdt_thread_info *info,
- bool drop_fs_cap)
+ struct lu_nodemap *nodemap,
+ bool drop_fs_cap)
{
struct lu_ucred *uc = mdt_ucred(info);
struct mdt_device *mdt = info->mti_mdt;
struct md_identity *identity = NULL;
- struct lu_nodemap *nodemap =
- info->mti_exp->exp_target_data.ted_nodemap;
-
- if (nodemap == NULL) {
- CDEBUG(D_SEC, "%s: cli %s/%p nodemap not set.\n",
- mdt2obd_dev(mdt)->obd_name,
- info->mti_exp->exp_client_uuid.uuid, info->mti_exp);
- RETURN(-EACCES);
- }
if (!is_identity_get_disabled(mdt->mdt_identity_cache)) {
identity = mdt_identity_get(mdt->mdt_identity_cache,
}
uc->uc_identity = identity;
- if (uc->uc_o_uid == nodemap->nm_squash_uid) {
+ if (nodemap && uc->uc_o_uid == nodemap->nm_squash_uid) {
uc->uc_fsuid = nodemap->nm_squash_uid;
uc->uc_fsgid = nodemap->nm_squash_gid;
uc->uc_cap = 0;
static int old_init_ucred(struct mdt_thread_info *info,
struct mdt_body *body, bool drop_fs_cap)
{
- struct lu_ucred *uc = mdt_ucred(info);
- struct lu_nodemap *nodemap =
- info->mti_exp->exp_target_data.ted_nodemap;
- int rc;
+ struct lu_ucred *uc = mdt_ucred(info);
+ struct lu_nodemap *nodemap;
+ int rc;
ENTRY;
+ nodemap = nodemap_get_from_exp(info->mti_exp);
+ if (IS_ERR(nodemap))
+ RETURN(PTR_ERR(nodemap));
+
body->mbo_uid = nodemap_map_id(nodemap, NODEMAP_UID,
NODEMAP_CLIENT_TO_FS, body->mbo_uid);
body->mbo_gid = nodemap_map_id(nodemap, NODEMAP_GID,
uc->uc_ginfo = NULL;
uc->uc_cap = body->mbo_capability;
- rc = old_init_ucred_common(info, drop_fs_cap);
+ rc = old_init_ucred_common(info, nodemap, drop_fs_cap);
+ nodemap_putref(nodemap);
RETURN(rc);
}
static int old_init_ucred_reint(struct mdt_thread_info *info)
{
- struct lu_ucred *uc = mdt_ucred(info);
- struct lu_nodemap *nodemap =
- info->mti_exp->exp_target_data.ted_nodemap;
- int rc;
+ struct lu_ucred *uc = mdt_ucred(info);
+ struct lu_nodemap *nodemap;
+ int rc;
ENTRY;
+ nodemap = nodemap_get_from_exp(info->mti_exp);
+ if (IS_ERR(nodemap))
+ RETURN(PTR_ERR(nodemap));
+
LASSERT(uc != NULL);
uc->uc_fsuid = nodemap_map_id(nodemap, NODEMAP_UID,
uc->uc_o_gid = uc->uc_o_fsgid = uc->uc_gid = uc->uc_fsgid;
uc->uc_ginfo = NULL;
- rc = old_init_ucred_common(info, true); /* drop_fs_cap = true */
+ rc = old_init_ucred_common(info, nodemap, true); /* drop_fs_cap=true */
+ nodemap_putref(nodemap);
RETURN(rc);
}
struct req_capsule *pill = info->mti_pill;
struct mdt_reint_record *rr = &info->mti_rr;
struct mdt_rec_setattr *rec;
- struct lu_nodemap *nodemap =
- info->mti_exp->exp_target_data.ted_nodemap;
+ struct lu_nodemap *nodemap;
ENTRY;
CLASSERT(sizeof(struct mdt_rec_setattr)== sizeof(struct mdt_rec_reint));
la->la_valid = mdt_attr_valid_xlate(rec->sa_valid, rr, ma);
la->la_mode = rec->sa_mode;
la->la_flags = rec->sa_attr_flags;
+
+ nodemap = nodemap_get_from_exp(info->mti_exp);
+ if (IS_ERR(nodemap))
+ RETURN(PTR_ERR(nodemap));
+
la->la_uid = nodemap_map_id(nodemap, NODEMAP_UID,
NODEMAP_CLIENT_TO_FS, rec->sa_uid);
la->la_gid = nodemap_map_id(nodemap, NODEMAP_GID,
NODEMAP_CLIENT_TO_FS, rec->sa_gid);
+ nodemap_putref(nodemap);
+
la->la_size = rec->sa_size;
la->la_blocks = rec->sa_blocks;
la->la_ctime = rec->sa_ctime;
}
}
#ifdef CONFIG_FS_POSIX_ACL
- else if (exp_connect_flags(exp) & OBD_CONNECT_ACL)
- rc = mdt_pack_acl2body(info, repbody, o,
- exp->exp_target_data.ted_nodemap);
+ else if (exp_connect_flags(exp) & OBD_CONNECT_ACL) {
+ struct lu_nodemap *nodemap = nodemap_get_from_exp(exp);
+ if (IS_ERR(nodemap))
+ RETURN(PTR_ERR(nodemap));
+
+ rc = mdt_pack_acl2body(info, repbody, o, nodemap);
+ nodemap_putref(nodemap);
+ if (rc)
+ RETURN(rc);
+ }
#endif
/*
struct mdt_object *obj;
struct md_object *child;
struct obd_export *exp = info->mti_exp;
- struct lu_nodemap *nodemap = exp->exp_target_data.ted_nodemap;
__u64 valid = attr->la_valid;
const char *xattr_name = rr->rr_name.ln_name;
int xattr_len = rr->rr_eadatalen;
} else if ((valid & OBD_MD_FLXATTR) &&
(strcmp(xattr_name, XATTR_NAME_ACL_ACCESS) == 0 ||
strcmp(xattr_name, XATTR_NAME_ACL_DEFAULT) == 0)) {
+ struct lu_nodemap *nodemap;
/* currently lustre limit acl access size */
if (xattr_len > LUSTRE_POSIX_ACL_MAX_SIZE)
GOTO(out, rc = -ERANGE);
+ nodemap = nodemap_get_from_exp(exp);
+ if (IS_ERR(nodemap))
+ GOTO(out, rc = PTR_ERR(nodemap));
+
rc = nodemap_map_acl(nodemap, rr->rr_eadata, xattr_len,
NODEMAP_CLIENT_TO_FS);
+ nodemap_putref(nodemap);
if (rc < 0)
GOTO(out, rc);
*/
static int ofd_quotactl(struct tgt_session_info *tsi)
{
- struct obd_quotactl *oqctl, *repoqc;
- struct lu_nodemap *nodemap =
- tsi->tsi_exp->exp_target_data.ted_nodemap;
- int id;
- int rc;
+ struct obd_quotactl *oqctl, *repoqc;
+ struct lu_nodemap *nodemap;
+ int id;
+ int rc;
ENTRY;
*repoqc = *oqctl;
+ nodemap = nodemap_get_from_exp(tsi->tsi_exp);
+ if (IS_ERR(nodemap))
+ RETURN(PTR_ERR(nodemap));
+
id = repoqc->qc_id;
if (oqctl->qc_type == USRQUOTA)
id = nodemap_map_id(nodemap, NODEMAP_UID,
NODEMAP_CLIENT_TO_FS,
repoqc->qc_id);
+ nodemap_putref(nodemap);
+
if (repoqc->qc_id != id)
swap(repoqc->qc_id, id);
/**
* Functions used for the cfs_hash
*/
-static void nodemap_getref(struct lu_nodemap *nodemap)
+void nodemap_getref(struct lu_nodemap *nodemap)
{
atomic_inc(&nodemap->nm_refcount);
}
*/
void nodemap_putref(struct lu_nodemap *nodemap)
{
- LASSERT(nodemap != NULL);
+ if (!nodemap)
+ return;
+
LASSERT(atomic_read(&nodemap->nm_refcount) > 0);
if (atomic_dec_and_test(&nodemap->nm_refcount))
nodemap_destroy(nodemap);
}
+EXPORT_SYMBOL(nodemap_putref);
static __u32 nodemap_hashfn(struct cfs_hash *hash_body,
const void *key, unsigned mask)
*/
int nodemap_add_member(lnet_nid_t nid, struct obd_export *exp)
{
- struct lu_nodemap *nodemap;
+ struct lu_nodemap *nodemap;
int rc;
mutex_lock(&active_config_lock);
*/
void nodemap_del_member(struct obd_export *exp)
{
- struct lu_nodemap *nodemap = exp->exp_target_data.ted_nodemap;
+ struct lu_nodemap *nodemap;
+
+ ENTRY;
+
+ /* using ac lock to prevent nodemap reclassification while deleting */
+ mutex_lock(&active_config_lock);
+
+ /* use of ted_nodemap is protected by active_config_lock. we take an
+ * extra reference to make sure nodemap isn't destroyed under
+ * active_config_lock
+ */
+ nodemap = exp->exp_target_data.ted_nodemap;
+ if (nodemap == NULL)
+ goto out;
+ else
+ nodemap_getref(nodemap);
+
+ mutex_lock(&nodemap->nm_member_list_lock);
+ nm_member_del(nodemap, exp);
+ mutex_unlock(&nodemap->nm_member_list_lock);
+
+out:
+ mutex_unlock(&active_config_lock);
- if (nodemap != NULL)
- nm_member_del(nodemap, exp);
+ if (nodemap)
+ nodemap_putref(nodemap);
+
+ EXIT;
}
EXPORT_SYMBOL(nodemap_del_member);
EXPORT_SYMBOL(nodemap_del_idmap);
/**
+ * Get nodemap assigned to given export. Takes a reference on the nodemap.
+ *
+ * Note that this function may return either NULL, or an ERR_PTR()
+ * or a valid nodemap pointer. All of the functions accessing the
+ * returned nodemap can check IS_ERR(nodemap) to see if an error is
+ * returned. NULL is not considered an error, which is OK since this
+ * is a valid case if nodemap are not in use. All nodemap handling
+ * functions must check for nodemap == NULL and do nothing, and the
+ * nodemap returned from this function should not be dereferenced.
+ *
+ * \param export export to get nodemap for
+ *
+ * \retval pointer to nodemap on success
+ * \retval NULL nodemap subsystem disabled
+ * \retval -EACCES export does not have nodemap assigned
+ */
+struct lu_nodemap *nodemap_get_from_exp(struct obd_export *exp)
+{
+ struct lu_nodemap *nodemap;
+
+ ENTRY;
+
+ if (!nodemap_active)
+ RETURN(NULL);
+
+ spin_lock(&exp->exp_target_data.ted_nodemap_lock);
+ nodemap = exp->exp_target_data.ted_nodemap;
+ if (nodemap)
+ nodemap_getref(nodemap);
+ spin_unlock(&exp->exp_target_data.ted_nodemap_lock);
+
+ if (!nodemap) {
+ CDEBUG(D_INFO, "%s: nodemap null on export %s (at %s)\n",
+ exp->exp_obd->obd_name,
+ obd_uuid2str(&exp->exp_client_uuid),
+ obd_export_nid2str(exp));
+ RETURN(ERR_PTR(-EACCES));
+ }
+
+ RETURN(nodemap);
+}
+EXPORT_SYMBOL(nodemap_get_from_exp);
+
+/**
* mapping function for nodemap idmaps
*
* \param nodemap lu_nodemap structure defining nodemap
struct lu_idmap *idmap = NULL;
__u32 found_id;
+ ENTRY;
+
if (!nodemap_active)
goto out;
else
found_id = idmap->id_fs;
read_unlock(&nodemap->nm_idmap_lock);
- return found_id;
+ RETURN(found_id);
squash:
if (id_type == NODEMAP_UID)
- return nodemap->nm_squash_uid;
+ RETURN(nodemap->nm_squash_uid);
else
- return nodemap->nm_squash_gid;
+ RETURN(nodemap->nm_squash_gid);
out:
- return id;
+ RETURN(id);
}
EXPORT_SYMBOL(nodemap_map_id);
*/
bool nodemap_can_setquota(const struct lu_nodemap *nodemap)
{
- return !nodemap_active || nodemap->nmf_allow_root_access;
+ return !nodemap_active || (nodemap && nodemap->nmf_allow_root_access);
}
EXPORT_SYMBOL(nodemap_can_setquota);
*/
lprocfs_nodemap_remove(nodemap->nm_pde_data);
nodemap->nm_pde_data = NULL;
+
+ /* reclassify all member exports from nodemap, so they put their refs */
+ down_read(&active_config->nmc_range_tree_lock);
+ nm_member_reclassify_nodemap(nodemap);
+ up_read(&active_config->nmc_range_tree_lock);
+
+ if (!list_empty(&nodemap->nm_member_list))
+ CWARN("nodemap_del failed to reclassify all members\n");
+
mutex_unlock(&active_config_lock);
nodemap_putref(nodemap);
#define DEFAULT_NODEMAP "default"
+/* Turn on proc debug interface to allow OSS and
+ * MDS nodes to configure nodemap independently of
+ * MGS (since the nodemap distribution is not written
+ * yet */
+#define NODEMAP_PROC_DEBUG 1
+
/* Default nobody uid and gid values */
#define NODEMAP_NOBODY_UID 99
struct rb_node *nm_rb_next_postorder(const struct rb_node *node);
struct rb_node *nm_rb_first_postorder(const struct rb_root *root);
+void nodemap_getref(struct lu_nodemap *nodemap);
void nodemap_putref(struct lu_nodemap *nodemap);
#define nm_rbtree_postorder_for_each_entry_safe(pos, n, \
#include <interval_tree.h>
#include "nodemap_internal.h"
-/* Turn on proc debug interface to allow OSS and
- * MDS nodes to configure nodemap independently of
- * MGS (since the nodemap distribution is not written
- * yet */
-#define NODEMAP_PROC_DEBUG 1
-
static LIST_HEAD(nodemap_pde_list);
/**
#define HASH_NODEMAP_MEMBER_CUR_BITS 3
#define HASH_NODEMAP_MEMBER_MAX_BITS 7
+
/**
- * Delete a member from a member list
+ * Delete an export from a nodemap's member list. Called after client
+ * disconnects, or during system shutdown.
+ *
+ * Requires active_config_lock and nodemap's nm_member_list_lock.
*
* \param nodemap nodemap containing list
* \param exp export member to delete
*/
void nm_member_del(struct lu_nodemap *nodemap, struct obd_export *exp)
{
- mutex_lock(&nodemap->nm_member_list_lock);
+ ENTRY;
+
+ /* because all changes to ted_nodemap are with active_config_lock */
+ LASSERT(exp->exp_target_data.ted_nodemap == nodemap);
+
+ /* protected by nm_member_list_lock */
list_del_init(&exp->exp_target_data.ted_nodemap_member);
- mutex_unlock(&nodemap->nm_member_list_lock);
+ spin_lock(&exp->exp_target_data.ted_nodemap_lock);
exp->exp_target_data.ted_nodemap = NULL;
+ spin_unlock(&exp->exp_target_data.ted_nodemap_lock);
+
+ /* ref formerly held by ted_nodemap */
+ nodemap_putref(nodemap);
+
+ /* ref formerly held by ted_nodemap_member */
class_export_put(exp);
+
+ EXIT;
}
/**
* Delete a member list from a nodemap
*
+ * Requires active config lock.
+ *
* \param nodemap nodemap to remove the list from
*/
void nm_member_delete_list(struct lu_nodemap *nodemap)
mutex_lock(&nodemap->nm_member_list_lock);
list_for_each_entry_safe(exp, tmp, &nodemap->nm_member_list,
- exp_target_data.ted_nodemap_member) {
- exp->exp_target_data.ted_nodemap = NULL;
- list_del_init(&exp->exp_target_data.ted_nodemap_member);
- class_export_put(exp);
- }
+ exp_target_data.ted_nodemap_member)
+ nm_member_del(nodemap, exp);
mutex_unlock(&nodemap->nm_member_list_lock);
}
/**
* Add a member export to a nodemap
*
+ * Must be called under active_config_lock.
+ *
* \param nodemap nodemap to add to
* \param exp obd_export to add
* \retval -EEXIST export is already part of a different nodemap
*/
int nm_member_add(struct lu_nodemap *nodemap, struct obd_export *exp)
{
+ ENTRY;
+
if (exp == NULL) {
CWARN("attempted to add null export to nodemap %s\n",
nodemap->nm_name);
- return -EINVAL;
+ RETURN(-EINVAL);
}
+ mutex_lock(&nodemap->nm_member_list_lock);
if (exp->exp_target_data.ted_nodemap != NULL &&
!list_empty(&exp->exp_target_data.ted_nodemap_member)) {
+ mutex_unlock(&nodemap->nm_member_list_lock);
+
/* export is already member of nodemap */
if (exp->exp_target_data.ted_nodemap == nodemap)
- return 0;
+ RETURN(0);
/* possibly reconnecting while about to be reclassified */
CWARN("export %p %s already hashed, failed to add to "
nodemap->nm_name,
(exp->exp_target_data.ted_nodemap == NULL) ? "unknown" :
exp->exp_target_data.ted_nodemap->nm_name);
- return -EEXIST;
+ RETURN(-EEXIST);
}
class_export_get(exp);
+ nodemap_getref(nodemap);
+ /* ted_nodemap changes also require ac lock, member_list_lock */
+ spin_lock(&exp->exp_target_data.ted_nodemap_lock);
exp->exp_target_data.ted_nodemap = nodemap;
- mutex_lock(&nodemap->nm_member_list_lock);
+ spin_unlock(&exp->exp_target_data.ted_nodemap_lock);
list_add(&exp->exp_target_data.ted_nodemap_member,
&nodemap->nm_member_list);
mutex_unlock(&nodemap->nm_member_list_lock);
- return 0;
+ RETURN(0);
}
/**
struct obd_export *tmp;
struct lu_nodemap *new_nodemap;
+ ENTRY;
+
mutex_lock(&nodemap->nm_member_list_lock);
+
list_for_each_entry_safe(exp, tmp, &nodemap->nm_member_list,
exp_target_data.ted_nodemap_member) {
- struct ptlrpc_connection *conn = exp->exp_connection;
+ lnet_nid_t nid;
/* if no conn assigned to this exp, reconnect will reclassify */
- if (conn)
- /* nodemap_classify_nid requires nmc_range_tree_lock */
- new_nodemap = nodemap_classify_nid(conn->c_peer.nid);
- else
+ spin_lock(&exp->exp_lock);
+ if (exp->exp_connection) {
+ nid = exp->exp_connection->c_peer.nid;
+ } else {
+ spin_unlock(&exp->exp_lock);
continue;
+ }
+ spin_unlock(&exp->exp_lock);
+
+ /* nodemap_classify_nid requires nmc_range_tree_lock */
+ new_nodemap = nodemap_classify_nid(nid);
if (new_nodemap != nodemap) {
+ /* could deadlock if new_nodemap also reclassifying,
+ * active_config_lock serializes reclassifies
+ */
+ mutex_lock(&new_nodemap->nm_member_list_lock);
+
/* don't use member_del because ted_nodemap
- * should never be null
+ * should never be NULL with a live export
*/
list_del_init(&exp->exp_target_data.ted_nodemap_member);
+
+ /* keep the new_nodemap ref from classify */
+ spin_lock(&exp->exp_target_data.ted_nodemap_lock);
exp->exp_target_data.ted_nodemap = new_nodemap;
+ spin_unlock(&exp->exp_target_data.ted_nodemap_lock);
+ nodemap_putref(nodemap);
- /* could deadlock if new_nodemap also reclassifying */
- mutex_lock(&new_nodemap->nm_member_list_lock);
list_add(&exp->exp_target_data.ted_nodemap_member,
&new_nodemap->nm_member_list);
mutex_unlock(&new_nodemap->nm_member_list_lock);
nm_member_exp_revoke(exp);
+ } else {
+ nodemap_putref(new_nodemap);
}
-
- /* This put won't destroy new_nodemap because any nodemap_del
- * call done on new_nodemap blocks on our active_config_lock
- */
- nodemap_putref(new_nodemap);
}
mutex_unlock(&nodemap->nm_member_list_lock);
+
+ EXIT;
}
/**
/* acquires active config lock */
new_config = nodemap_config_alloc();
-
if (IS_ERR(new_config)) {
rc = PTR_ERR(new_config);
new_config = NULL;
if (rc)
RETURN(rc);
- nodemap = tsi->tsi_exp->exp_target_data.ted_nodemap;
+ nodemap = nodemap_get_from_exp(tsi->tsi_exp);
+ if (IS_ERR(nodemap))
+ RETURN(PTR_ERR(nodemap));
body->oa.o_uid = nodemap_map_id(nodemap, NODEMAP_UID,
NODEMAP_CLIENT_TO_FS,
body->oa.o_gid = nodemap_map_id(nodemap, NODEMAP_GID,
NODEMAP_CLIENT_TO_FS,
body->oa.o_gid);
+ nodemap_putref(nodemap);
tsi->tsi_ost_body = body;
tsi->tsi_fid = body->oa.o_oi.oi_fid;
ENTRY;
LASSERT(exp != exp->exp_obd->obd_self_export);
+ spin_lock_init(&exp->exp_target_data.ted_nodemap_lock);
+ INIT_LIST_HEAD(&exp->exp_target_data.ted_nodemap_member);
+
OBD_ALLOC_PTR(exp->exp_target_data.ted_lcd);
if (exp->exp_target_data.ted_lcd == NULL)
RETURN(-ENOMEM);
local qused_high=$((qused_orig + quota_fuzz))
local qused_low=$((qused_orig - quota_fuzz))
local testfile=$DIR/$tdir/$tfile
- chmod 777 $DIR/$tdir
$run_u dd if=/dev/zero of=$testfile bs=1M count=1 >& /dev/null
sync; sync_all_data || true
do_create_delete "$run_u" "$key"
done
+ # set test dir to 777 for quota test
+ do_facet mgs $LCTL nodemap_modify \
+ --name c$cli_i \
+ --property admin \
+ --value 1
+ do_servers_not_mgs $LCTL set_param \
+ nodemap.c$cli_i.admin_nodemap=1
+ do_node $client chmod 777 $DIR/$tdir ||
+ error unable to chmod 777 $DIR/$tdir
+ do_facet mgs $LCTL nodemap_modify \
+ --name c$cli_i \
+ --property admin \
+ --value $admin
+ do_servers_not_mgs $LCTL set_param \
+ nodemap.c$cli_i.admin_nodemap=$admin
+
# check quota
do_fops_quota_test "$run_u"
done