/* NID range list */
struct list_head nm_ranges;
/* lock for idmap red/black trees */
- rwlock_t nm_idmap_lock;
+ struct rw_semaphore nm_idmap_lock;
/* UID map keyed by local UID */
struct rb_root nm_fs_to_client_uidmap;
/* UID map keyed by remote UID */
nm_member_reclassify_nodemap(nodemap);
up_read(&active_config->nmc_range_tree_lock);
- write_lock(&nodemap->nm_idmap_lock);
+ down_write(&nodemap->nm_idmap_lock);
idmap_delete_tree(nodemap);
- write_unlock(&nodemap->nm_idmap_lock);
+ up_write(&nodemap->nm_idmap_lock);
mutex_unlock(&active_config_lock);
/**
* add an idmap to the proper nodemap trees
*
- * \param name name of nodemap
+ * \param nodemap nodemap to add idmap to
* \param id_type NODEMAP_UID or NODEMAP_GID
* \param map array[2] __u32 containing the map values
* map[0] is client id
* map[1] is the filesystem id
*
- * \retval 0 on success
+ * \retval 0 on success
+ * \retval < 0 if error occurs
*/
int nodemap_add_idmap_helper(struct lu_nodemap *nodemap,
enum nodemap_id_type id_type,
const __u32 map[2])
{
struct lu_idmap *idmap;
+ struct lu_idmap *temp;
int rc = 0;
idmap = idmap_create(map[0], map[1]);
if (idmap == NULL)
GOTO(out, rc = -ENOMEM);
- write_lock(&nodemap->nm_idmap_lock);
- idmap_insert(id_type, idmap, nodemap);
- write_unlock(&nodemap->nm_idmap_lock);
+ down_write(&nodemap->nm_idmap_lock);
+ temp = idmap_insert(id_type, idmap, nodemap);
+ /* If the new id_client or id_fs is matched, the old idmap and its
+ * index should be deleted according to its id_client before the new
+ * idmap is added again.
+ */
+ if (IS_ERR(temp))
+ GOTO(out_insert, rc = PTR_ERR(temp));
+ if (temp) {
+ __u32 del_map[2];
+
+ del_map[0] = temp->id_client;
+ idmap_delete(id_type, temp, nodemap);
+ rc = nodemap_idx_idmap_del(nodemap, id_type, del_map);
+ /* In case there is any corrupted idmap */
+ if (!rc || unlikely(rc == -ENOENT)) {
+ temp = idmap_insert(id_type, idmap, nodemap);
+ if (IS_ERR(temp))
+ rc = PTR_ERR(temp);
+ else if (!temp)
+ rc = 0;
+ else
+ rc = -EPERM;
+ }
+ }
+out_insert:
+ if (rc)
+ OBD_FREE_PTR(idmap);
+ up_write(&nodemap->nm_idmap_lock);
nm_member_revoke_locks(nodemap);
out:
struct lu_nodemap *nodemap = NULL;
int rc;
+ ENTRY;
+
mutex_lock(&active_config_lock);
nodemap = nodemap_lookup(name);
if (IS_ERR(nodemap)) {
nodemap_putref(nodemap);
out:
- return rc;
+ RETURN(rc);
}
EXPORT_SYMBOL(nodemap_add_idmap);
struct lu_idmap *idmap = NULL;
int rc = 0;
+ ENTRY;
+
mutex_lock(&active_config_lock);
nodemap = nodemap_lookup(name);
if (IS_ERR(nodemap)) {
if (is_default_nodemap(nodemap))
GOTO(out_putref, rc = -EINVAL);
- write_lock(&nodemap->nm_idmap_lock);
+ down_write(&nodemap->nm_idmap_lock);
idmap = idmap_search(nodemap, NODEMAP_CLIENT_TO_FS, id_type,
map[0]);
if (idmap == NULL) {
idmap_delete(id_type, idmap, nodemap);
rc = nodemap_idx_idmap_del(nodemap, id_type, map);
}
- write_unlock(&nodemap->nm_idmap_lock);
+ up_write(&nodemap->nm_idmap_lock);
out_putref:
mutex_unlock(&active_config_lock);
nodemap_putref(nodemap);
out:
- return rc;
+ RETURN(rc);
}
EXPORT_SYMBOL(nodemap_del_idmap);
* is, return 0. Otherwise, return the squash uid or gid.
*
* if the nodemap is configured to trusted the ids from the client system, just
- * return the passwd id without mapping.
+ * return the passed id without mapping.
*
* if by this point, we haven't returned and the nodemap in question is the
* default nodemap, return the squash uid or gid.
if (is_default_nodemap(nodemap))
goto squash;
- read_lock(&nodemap->nm_idmap_lock);
+ down_read(&nodemap->nm_idmap_lock);
idmap = idmap_search(nodemap, tree_type, id_type, id);
if (idmap == NULL) {
- read_unlock(&nodemap->nm_idmap_lock);
+ up_read(&nodemap->nm_idmap_lock);
goto squash;
}
found_id = idmap->id_client;
else
found_id = idmap->id_fs;
- read_unlock(&nodemap->nm_idmap_lock);
+ up_read(&nodemap->nm_idmap_lock);
RETURN(found_id);
squash:
*
* Creates an lu_nodemap structure and assigns sane default
* member values. If this is the default nodemap, the defaults
- * are the most restictive in xterms of mapping behavior. Otherwise
+ * are the most restrictive in terms of mapping behavior. Otherwise
* the default flags should be inherited from the default nodemap.
* The adds nodemap to nodemap_hash.
*
INIT_LIST_HEAD(&nodemap->nm_member_list);
mutex_init(&nodemap->nm_member_list_lock);
- rwlock_init(&nodemap->nm_idmap_lock);
+ init_rwsem(&nodemap->nm_idmap_lock);
nodemap->nm_fs_to_client_uidmap = RB_ROOT;
nodemap->nm_client_to_fs_uidmap = RB_ROOT;
nodemap->nm_fs_to_client_gidmap = RB_ROOT;
/**
* Insert idmap into the proper trees
*
- * \param node_type 0 for UID
- * 1 for GID
- * \param idmap lu_idmap structure to insert
- * \param nodemap nodemap to associate with the map
- *
- * \retval 0 on success
+ * \param id_type NODEMAP_UID or NODEMAP_GID
+ * \param idmap lu_idmap structure to insert
+ * \param nodemap nodemap to associate with the map
*
- * if the mapping exists, this function will delete it and replace
- * it with the new idmap structure
+ * \retval NULL on success
+ * \retval ERR_PTR(-EEXIST) if this idmap already exists
+ * \retval struct lu_idmap if only id_client or id_fs of this idmap
+ * is matched, return the matched idmap.
+ * The caller will delete this old idmap and
+ * its index before insert the new idmap again.
*/
-void idmap_insert(enum nodemap_id_type id_type, struct lu_idmap *idmap,
- struct lu_nodemap *nodemap)
+struct lu_idmap *idmap_insert(enum nodemap_id_type id_type,
+ struct lu_idmap *idmap,
+ struct lu_nodemap *nodemap)
{
- struct lu_idmap *cur = NULL;
+ struct lu_idmap *fwd_cur = NULL;
+ struct lu_idmap *bck_cur = NULL;
struct rb_node *fwd_parent = NULL;
struct rb_node *bck_parent = NULL;
struct rb_node **fwd_node;
struct rb_node **bck_node;
struct rb_root *fwd_root;
struct rb_root *bck_root;
- bool replace = false;
+ bool fwd_found = false;
+ bool bck_found = false;
+
+ ENTRY;
/* for purposes in idmap client to fs is forward
* mapping, fs to client is backward mapping
*/
while (*fwd_node) {
fwd_parent = *fwd_node;
- cur = rb_entry(*fwd_node, struct lu_idmap,
- id_client_to_fs);
+ fwd_cur = rb_entry(*fwd_node, struct lu_idmap,
+ id_client_to_fs);
- if (idmap->id_client < cur->id_client) {
+ if (idmap->id_client < fwd_cur->id_client) {
fwd_node = &((*fwd_node)->rb_left);
- } else if (idmap->id_client > cur->id_client) {
+ } else if (idmap->id_client > fwd_cur->id_client) {
fwd_node = &((*fwd_node)->rb_right);
} else {
- replace = true;
+ fwd_found = true;
break;
}
}
- if (!replace) {
- while (*bck_node) {
- bck_parent = *bck_node;
- cur = rb_entry(*bck_node, struct lu_idmap,
- id_fs_to_client);
+ while (*bck_node) {
+ bck_parent = *bck_node;
+ bck_cur = rb_entry(*bck_node, struct lu_idmap,
+ id_fs_to_client);
- if (idmap->id_fs < cur->id_fs) {
- bck_node = &((*bck_node)->rb_left);
- } else if (idmap->id_fs > cur->id_fs) {
- bck_node = &((*bck_node)->rb_right);
- } else {
- replace = true;
- break;
- }
+ if (idmap->id_fs < bck_cur->id_fs) {
+ bck_node = &((*bck_node)->rb_left);
+ } else if (idmap->id_fs > bck_cur->id_fs) {
+ bck_node = &((*bck_node)->rb_right);
+ } else {
+ bck_found = true;
+ break;
}
}
- if (!replace) {
+ /* Already exists */
+ if (fwd_found && bck_found)
+ RETURN(ERR_PTR(-EEXIST));
+
+ /* Insert a new idmap */
+ if (!fwd_found && !bck_found) {
+ CDEBUG(D_INFO, "Insert a new idmap %d:%d\n",
+ idmap->id_client, idmap->id_fs);
rb_link_node(&idmap->id_client_to_fs, fwd_parent, fwd_node);
rb_insert_color(&idmap->id_client_to_fs, fwd_root);
rb_link_node(&idmap->id_fs_to_client, bck_parent, bck_node);
rb_insert_color(&idmap->id_fs_to_client, bck_root);
- } else {
- rb_replace_node(&cur->id_client_to_fs,
- &idmap->id_client_to_fs,
- fwd_root);
- rb_replace_node(&cur->id_fs_to_client,
- &idmap->id_fs_to_client,
- bck_root);
-
- idmap_destroy(cur);
+ RETURN(NULL);
}
+
+ /* Only id_client or id_fs is matched */
+ RETURN(fwd_found ? fwd_cur : bck_cur);
}
/**
struct rb_root *root = NULL;
struct lu_idmap *idmap;
+ ENTRY;
+
if (id_type == NODEMAP_UID && tree_type == NODEMAP_FS_TO_CLIENT)
root = &nodemap->nm_fs_to_client_uidmap;
else if (id_type == NODEMAP_UID && tree_type == NODEMAP_CLIENT_TO_FS)
else if (id > idmap->id_fs)
node = node->rb_right;
else
- return idmap;
+ RETURN(idmap);
}
} else {
while (node) {
else if (id > idmap->id_client)
node = node->rb_right;
else
- return idmap;
+ RETURN(idmap);
}
}
- return NULL;
+ RETURN(NULL);
}
/*
lnet_nid_t *end_nid);
void range_init_tree(void);
struct lu_idmap *idmap_create(__u32 client_id, __u32 fs_id);
-void idmap_insert(enum nodemap_id_type id_type, struct lu_idmap *idmap,
- struct lu_nodemap *nodemap);
+struct lu_idmap *idmap_insert(enum nodemap_id_type id_type,
+ struct lu_idmap *idmap,
+ struct lu_nodemap *nodemap);
void idmap_delete(enum nodemap_id_type id_type, struct lu_idmap *idmap,
struct lu_nodemap *nodemap);
void idmap_delete_tree(struct lu_nodemap *nodemap);
}
seq_printf(m, "[\n");
- read_lock(&nodemap->nm_idmap_lock);
+ down_read(&nodemap->nm_idmap_lock);
for (node = rb_first(&nodemap->nm_client_to_fs_uidmap); node;
node = rb_next(node)) {
if (cont)
"fs_id: %u }", idmap->id_client,
idmap->id_fs);
}
- read_unlock(&nodemap->nm_idmap_lock);
+ up_read(&nodemap->nm_idmap_lock);
seq_printf(m, "\n");
seq_printf(m, "]\n");
CONFDIR=/etc/lustre
PERM_CONF=$CONFDIR/perm.conf
FAIL_ON_ERROR=false
-
HOSTNAME_CHECKSUM=$(hostname | sum | awk '{ print $1 }')
SUBNET_CHECKSUM=$(expr $HOSTNAME_CHECKSUM % 250 + 1)
-NODEMAP_COUNT=16
-NODEMAP_RANGE_COUNT=3
-NODEMAP_IPADDR_LIST="1 10 64 128 200 250"
-NODEMAP_MAX_ID=128
require_dsh_mds || exit 0
require_dsh_ost || exit 0
USER0=$(getent passwd | grep :$ID0:$ID0: | cut -d: -f1)
USER1=$(getent passwd | grep :$ID1:$ID1: | cut -d: -f1)
+NODEMAP_COUNT=16
+NODEMAP_RANGE_COUNT=3
+NODEMAP_IPADDR_LIST="1 10 64 128 200 250"
+NODEMAP_ID_COUNT=10
+NODEMAP_MAX_ID=$((ID0 + NODEMAP_ID_COUNT))
+
[ -z "$USER0" ] &&
skip "need to add user0 ($ID0:$ID0)" && exit 0
return 3
fi
- out=$(do_facet mgs $LCTL get_param nodemap.$csum.id)
+ out=$(do_facet mgs $LCTL get_param nodemap.$csum.id 2>/dev/null)
[[ $(echo $out | grep -c $csum) != 0 ]] && return 1
done
return 0
local cmd="$LCTL nodemap_add_idmap"
local rc=0
+ echo "Start to add idmaps ..."
for ((i = 0; i < NODEMAP_COUNT; i++)); do
local j
- for ((j = 500; j < NODEMAP_MAX_ID; j++)); do
+ for ((j = $ID0; j < NODEMAP_MAX_ID; j++)); do
local csum=${HOSTNAME_CHECKSUM}_${i}
local client_id=$j
local fs_id=$((j + 1))
return $rc
}
+update_idmaps() { #LU-10040
+ [ $(lustre_version_code mgs) -lt $(version_code 2.10.55) ] &&
+ skip "Need MGS >= 2.10.55" &&
+ return
+ local csum=${HOSTNAME_CHECKSUM}_0
+ local old_id_client=$ID0
+ local old_id_fs=$((ID0 + 1))
+ local new_id=$((ID0 + 100))
+ local tmp_id
+ local cmd
+ local run
+ local idtype
+ local rc=0
+
+ echo "Start to update idmaps ..."
+
+ #Inserting an existed idmap should return error
+ cmd="$LCTL nodemap_add_idmap --name $csum --idtype uid"
+ if do_facet mgs \
+ $cmd --idmap $old_id_client:$old_id_fs 2>/dev/null; then
+ error "insert idmap {$old_id_client:$old_id_fs} " \
+ "should return error"
+ rc=$((rc + 1))
+ return rc
+ fi
+
+ #Update id_fs and check it
+ if ! do_facet mgs $cmd --idmap $old_id_client:$new_id; then
+ error "$cmd --idmap $old_id_client:$new_id failed"
+ rc=$((rc + 1))
+ return $rc
+ fi
+ tmp_id=$(do_facet mgs $LCTL get_param -n nodemap.$csum.idmap |
+ awk '{ print $7 }' | sed -n '2p')
+ [ $tmp_id != $new_id ] && { error "new id_fs $tmp_id != $new_id"; \
+ rc=$((rc + 1)); return $rc; }
+
+ #Update id_client and check it
+ if ! do_facet mgs $cmd --idmap $new_id:$new_id; then
+ error "$cmd --idmap $new_id:$new_id failed"
+ rc=$((rc + 1))
+ return $rc
+ fi
+ tmp_id=$(do_facet mgs $LCTL get_param -n nodemap.$csum.idmap |
+ awk '{ print $5 }' | sed -n "$((NODEMAP_ID_COUNT + 1)) p")
+ tmp_id=$(echo ${tmp_id%,*}) #e.g. "501,"->"501"
+ [ $tmp_id != $new_id ] && { error "new id_client $tmp_id != $new_id"; \
+ rc=$((rc + 1)); return $rc; }
+
+ #Delete above updated idmap
+ cmd="$LCTL nodemap_del_idmap --name $csum --idtype uid"
+ if ! do_facet mgs $cmd --idmap $new_id:$new_id; then
+ error "$cmd --idmap $new_id:$new_id failed"
+ rc=$((rc + 1))
+ return $rc
+ fi
+
+ #restore the idmaps to make delete_idmaps work well
+ cmd="$LCTL nodemap_add_idmap --name $csum --idtype uid"
+ if ! do_facet mgs $cmd --idmap $old_id_client:$old_id_fs; then
+ error "$cmd --idmap $old_id_client:$old_id_fs failed"
+ rc=$((rc + 1))
+ return $rc
+ fi
+
+ return $rc
+}
+
delete_idmaps() {
local i
local cmd="$LCTL nodemap_del_idmap"
local rc=0
+ echo "Start to delete idmaps ..."
for ((i = 0; i < NODEMAP_COUNT; i++)); do
local j
- for ((j = 500; j < NODEMAP_MAX_ID; j++)); do
+ for ((j = $ID0; j < NODEMAP_MAX_ID; j++)); do
local csum=${HOSTNAME_CHECKSUM}_${i}
local client_id=$j
local fs_id=$((j + 1))
local cmd="$LCTL nodemap_test_id"
local rc=0
+ echo "Start to test idmaps ..."
## nodemap deactivated
if ! do_facet mgs $LCTL nodemap_activate 0; then
return 1
fi
- for ((id = 500; id < NODEMAP_MAX_ID; id++)); do
+ for ((id = $ID0; id < NODEMAP_MAX_ID; id++)); do
local j
for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
return 2
fi
- for ((id = 500; id < NODEMAP_MAX_ID; id++)); do
+ for ((id = $ID0; id < NODEMAP_MAX_ID; id++)); do
for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
fs_id=$(do_facet mgs $cmd --nid $nid \
fi
done
- for ((id = 500; id < NODEMAP_MAX_ID; id++)); do
+ for ((id = $ID0; id < NODEMAP_MAX_ID; id++)); do
for ((j = 0; j < NODEMAP_RANGE_COUNT; j++)); do
nid="$SUBNET_CHECKSUM.0.${j}.100@tcp"
fs_id=$(do_facet mgs $cmd --nid $nid \
[[ $rc != 0 ]] && error "nodemap_test_id failed with $rc" && return 4
rc=0
+ update_idmaps
+ rc=$?
+ [[ $rc != 0 ]] && error "update_idmaps failed with $rc" && return 5
+
+ rc=0
delete_idmaps
rc=$?
- [[ $rc != 0 ]] && error "nodemap_del_idmap failed with $rc" && return 5
+ [[ $rc != 0 ]] && error "nodemap_del_idmap failed with $rc" && return 6
rc=0
delete_nodemaps
rc=$?
- [[ $rc != 0 ]] && error "nodemap_delete failed with $rc" && return 6
+ [[ $rc != 0 ]] && error "nodemap_delete failed with $rc" && return 7
return 0
}