/*
* Copyright (C) 2015, Trustees of Indiana University
*
- * Copyright (c) 2014, Intel Corporation.
+ * Copyright (c) 2017, Intel Corporation.
*
* Author: Joshua Walgenbach <jjw@iu.edu>
* Author: Kit Westneat <cwestnea@iu.edu>
#include <linux/mutex.h>
#include <linux/string.h>
#include <linux/types.h>
-#include <lnet/types.h>
-#include <lustre/lustre_idl.h>
+#include <uapi/linux/lnet/lnet-types.h>
+#include <uapi/linux/lustre/lustre_idl.h>
#include <dt_object.h>
#include <lu_object.h>
#include <lustre_net.h>
/* MGS index is different than others, others are listeners to MGS idx */
static struct nm_config_file *nodemap_mgs_ncf;
-/* lu_nodemap flags */
-enum nm_flag_shifts {
- NM_FL_ALLOW_ROOT_ACCESS = 0x1,
- NM_FL_TRUST_CLIENT_IDS = 0x2,
- NM_FL_DENY_UNKNOWN = 0x4,
-};
-
static void nodemap_cluster_key_init(struct nodemap_key *nk, unsigned int nm_id)
{
nk->nk_nodemap_id = cpu_to_le32(nm_idx_set_type(nm_id,
static void nodemap_cluster_rec_init(union nodemap_rec *nr,
const struct lu_nodemap *nodemap)
{
- CLASSERT(sizeof(nr->ncr.ncr_name) == sizeof(nodemap->nm_name));
+ BUILD_BUG_ON(sizeof(nr->ncr.ncr_name) != sizeof(nodemap->nm_name));
- strncpy(nr->ncr.ncr_name, nodemap->nm_name, sizeof(nodemap->nm_name));
+ strncpy(nr->ncr.ncr_name, nodemap->nm_name, sizeof(nr->ncr.ncr_name));
nr->ncr.ncr_squash_uid = cpu_to_le32(nodemap->nm_squash_uid);
nr->ncr.ncr_squash_gid = cpu_to_le32(nodemap->nm_squash_gid);
- nr->ncr.ncr_flags = cpu_to_le32(
+ nr->ncr.ncr_squash_projid = cpu_to_le32(nodemap->nm_squash_projid);
+ nr->ncr.ncr_flags =
(nodemap->nmf_trust_client_ids ?
NM_FL_TRUST_CLIENT_IDS : 0) |
(nodemap->nmf_allow_root_access ?
NM_FL_ALLOW_ROOT_ACCESS : 0) |
(nodemap->nmf_deny_unknown ?
- NM_FL_DENY_UNKNOWN : 0));
+ NM_FL_DENY_UNKNOWN : 0) |
+ (nodemap->nmf_map_mode & NODEMAP_MAP_UID ?
+ NM_FL_MAP_UID : 0) |
+ (nodemap->nmf_map_mode & NODEMAP_MAP_GID ?
+ NM_FL_MAP_GID : 0) |
+ (nodemap->nmf_map_mode & NODEMAP_MAP_PROJID ?
+ NM_FL_MAP_PROJID : 0) |
+ (nodemap->nmf_enable_audit ?
+ NM_FL_ENABLE_AUDIT : 0) |
+ (nodemap->nmf_forbid_encryption ?
+ NM_FL_FORBID_ENCRYPT : 0);
+ nr->ncr.ncr_flags2 =
+ (nodemap->nmf_readonly_mount ?
+ NM_FL2_READONLY_MOUNT : 0);
}
static void nodemap_idmap_key_init(struct nodemap_key *nk, unsigned int nm_id,
if (id_type == NODEMAP_UID)
idx_type = NODEMAP_UIDMAP_IDX;
- else
+ else if (id_type == NODEMAP_GID)
idx_type = NODEMAP_GIDMAP_IDX;
+ else if (id_type == NODEMAP_PROJID)
+ idx_type = NODEMAP_PROJIDMAP_IDX;
+ else
+ idx_type = NODEMAP_EMPTY_IDX;
nk->nk_nodemap_id = cpu_to_le32(nm_idx_set_type(nm_id, idx_type));
nk->nk_id_client = cpu_to_le32(id_client);
* rewrite the config
*/
if (rc < 0) {
- lu_object_put(env, &nm_obj->do_lu);
+ dt_object_put(env, nm_obj);
if (create_new == NCFC_CREATE_NEW)
GOTO(out_root, nm_obj = ERR_PTR(rc));
}
out_root:
- lu_object_put(env, &root_obj->do_lu);
+ dt_object_put(env, root_obj);
out:
return nm_obj;
}
const struct nodemap_key *nk,
const union nodemap_rec *nr)
{
- struct thandle *th;
- struct dt_device *dev = lu2dt_dev(idx->do_lu.lo_dev);
- int rc;
+ struct thandle *th;
+ struct dt_device *dev = lu2dt_dev(idx->do_lu.lo_dev);
+ int rc;
- CLASSERT(sizeof(union nodemap_rec) == 32);
+ BUILD_BUG_ON(sizeof(union nodemap_rec) != 32);
th = dt_trans_create(env, dev);
dt_write_lock(env, idx, 0);
rc = dt_insert(env, idx, (const struct dt_rec *)nr,
- (const struct dt_key *)nk, th, 1);
+ (const struct dt_key *)nk, th);
nodemap_inc_version(env, idx, th);
dt_write_unlock(env, idx);
GOTO(out_lock, rc);
rc = dt_insert(env, idx, (const struct dt_rec *)nr,
- (const struct dt_key *)nk, th, 1);
+ (const struct dt_key *)nk, th);
if (rc != 0)
GOTO(out_lock, rc);
rc = rc2;
}
+ root = nodemap->nm_client_to_fs_projidmap;
+ nm_rbtree_postorder_for_each_entry_safe(idmap, temp, &root,
+ id_client_to_fs) {
+ nodemap_idmap_key_init(&nk, nodemap->nm_id, NODEMAP_PROJID,
+ idmap->id_client);
+ rc2 = nodemap_idx_delete(&env, nodemap_mgs_ncf->ncf_obj,
+ &nk, NULL);
+ if (rc2 < 0)
+ rc = rc2;
+ }
+
list_for_each_entry_safe(range, range_temp, &nodemap->nm_ranges,
rn_list) {
nodemap_range_key_init(&nk, nodemap->nm_id, range->rn_id);
const union nodemap_rec *rec,
struct lu_nodemap **recent_nodemap)
{
- struct lu_nodemap *nodemap = NULL;
- enum nodemap_idx_type type;
- enum nodemap_id_type id_type;
- u8 flags;
- u32 nodemap_id;
- lnet_nid_t nid[2];
- u32 map[2];
- int rc;
+ struct lu_nodemap *nodemap = NULL;
+ enum nodemap_idx_type type;
+ enum nodemap_id_type id_type;
+ enum nm_flag_bits flags;
+ enum nm_flag2_bits flags2;
+ u32 nodemap_id;
+ lnet_nid_t nid[2];
+ u32 map[2];
+ int rc;
ENTRY;
- CLASSERT(sizeof(union nodemap_rec) == 32);
+ BUILD_BUG_ON(sizeof(union nodemap_rec) != 32);
nodemap_id = le32_to_cpu(key->nk_nodemap_id);
type = nodemap_get_key_type(key);
/* find the correct nodemap in the load list */
if (type == NODEMAP_RANGE_IDX || type == NODEMAP_UIDMAP_IDX ||
- type == NODEMAP_GIDMAP_IDX) {
+ type == NODEMAP_GIDMAP_IDX || type == NODEMAP_PROJIDMAP_IDX) {
struct lu_nodemap *tmp = NULL;
nodemap = *recent_nodemap;
" nodemap_id=%d. nodemap config file corrupt?\n",
nodemap_id);
break;
- case NODEMAP_CLUSTER_IDX:
+ case NODEMAP_CLUSTER_IDX: {
+ struct lu_nodemap *old_nm = NULL;
+
nodemap = cfs_hash_lookup(config->nmc_nodemap_hash,
rec->ncr.ncr_name);
if (nodemap == NULL) {
if (nodemap_id == LUSTRE_NODEMAP_DEFAULT_ID) {
nodemap = nodemap_create(rec->ncr.ncr_name,
config, 1);
- config->nmc_default_nodemap = nodemap;
} else {
nodemap = nodemap_create(rec->ncr.ncr_name,
config, 0);
le32_to_cpu(rec->ncr.ncr_squash_uid);
nodemap->nm_squash_gid =
le32_to_cpu(rec->ncr.ncr_squash_gid);
+ nodemap->nm_squash_projid =
+ le32_to_cpu(rec->ncr.ncr_squash_projid);
- flags = le32_to_cpu(rec->ncr.ncr_flags);
+ flags = rec->ncr.ncr_flags;
nodemap->nmf_allow_root_access =
flags & NM_FL_ALLOW_ROOT_ACCESS;
nodemap->nmf_trust_client_ids =
flags & NM_FL_TRUST_CLIENT_IDS;
nodemap->nmf_deny_unknown =
flags & NM_FL_DENY_UNKNOWN;
+ nodemap->nmf_map_mode = (flags & NM_FL_MAP_UID ?
+ NODEMAP_MAP_UID : 0) |
+ (flags & NM_FL_MAP_GID ?
+ NODEMAP_MAP_GID : 0) |
+ (flags & NM_FL_MAP_PROJID ?
+ NODEMAP_MAP_PROJID : 0);
+ if (nodemap->nmf_map_mode == NODEMAP_MAP_BOTH_LEGACY)
+ nodemap->nmf_map_mode = NODEMAP_MAP_BOTH;
+ nodemap->nmf_enable_audit =
+ flags & NM_FL_ENABLE_AUDIT;
+ nodemap->nmf_forbid_encryption =
+ flags & NM_FL_FORBID_ENCRYPT;
+ flags2 = rec->ncr.ncr_flags2;
+ nodemap->nmf_readonly_mount =
+ flags2 & NM_FL2_READONLY_MOUNT;
+
+ /* The fileset should be saved otherwise it will be empty
+ * every time in case of "NODEMAP_CLUSTER_IDX". */
+ mutex_lock(&active_config_lock);
+ old_nm = nodemap_lookup(rec->ncr.ncr_name);
+ if (!IS_ERR(old_nm) && old_nm->nm_fileset[0] != '\0')
+ strlcpy(nodemap->nm_fileset, old_nm->nm_fileset,
+ sizeof(nodemap->nm_fileset));
+ mutex_unlock(&active_config_lock);
+ if (!IS_ERR(old_nm))
+ nodemap_putref(old_nm);
if (*recent_nodemap == NULL) {
*recent_nodemap = nodemap;
}
nodemap_putref(nodemap);
break;
+ }
case NODEMAP_RANGE_IDX:
nid[0] = le64_to_cpu(rec->nrr.nrr_start_nid);
nid[1] = le64_to_cpu(rec->nrr.nrr_end_nid);
break;
case NODEMAP_UIDMAP_IDX:
case NODEMAP_GIDMAP_IDX:
+ case NODEMAP_PROJIDMAP_IDX:
map[0] = le32_to_cpu(key->nk_id_client);
map[1] = le32_to_cpu(rec->nir.nir_id_fs);
if (type == NODEMAP_UIDMAP_IDX)
id_type = NODEMAP_UID;
- else
+ else if (type == NODEMAP_GIDMAP_IDX)
id_type = NODEMAP_GID;
+ else if (type == NODEMAP_PROJIDMAP_IDX)
+ id_type = NODEMAP_PROJID;
+ else
+ GOTO(out, rc = -EINVAL);
rc = nodemap_add_idmap_helper(nodemap, id_type, map);
if (rc != 0)
if (new_config->nmc_default_nodemap == NULL) {
/* new MGS won't have a default nm on disk, so create it here */
- new_config->nmc_default_nodemap =
+ struct lu_nodemap *nodemap =
nodemap_create(DEFAULT_NODEMAP, new_config, 1);
- if (IS_ERR(new_config->nmc_default_nodemap)) {
- rc = PTR_ERR(new_config->nmc_default_nodemap);
+ if (IS_ERR(nodemap)) {
+ rc = PTR_ERR(nodemap);
} else {
rc = nodemap_idx_nodemap_add_update(
new_config->nmc_default_nodemap,
list_for_each_entry_safe(range, range_temp, &nodemap->nm_ranges,
rn_list) {
lnet_nid_t nid[2] = {
- range->rn_node.in_extent.start,
- range->rn_node.in_extent.end
+ range->rn_start,
+ range->rn_end
};
nodemap_range_key_init(&nk, nodemap->nm_id,
range->rn_id);
if (rc2 < 0)
rc = rc2;
}
+
+ root = nodemap->nm_client_to_fs_projidmap;
+ nm_rbtree_postorder_for_each_entry_safe(idmap, id_tmp, &root,
+ id_client_to_fs) {
+ nodemap_idmap_key_init(&nk, nodemap->nm_id,
+ NODEMAP_PROJID,
+ idmap->id_client);
+ nodemap_idmap_rec_init(&nr, idmap->id_fs);
+ rc2 = nodemap_idx_insert(env, o, &nk, &nr);
+ if (rc2 < 0)
+ rc = rc2;
+ }
}
nodemap_global_key_init(&nk);
nodemap_global_rec_init(&nr, active_config->nmc_nodemap_is_active);
mutex_unlock(&active_config_lock);
if (rc < 0) {
- lu_object_put(env, &o->do_lu);
+ dt_object_put(env, o);
o = ERR_PTR(rc);
}
struct dt_object *o;
/* put current config file so save conf can rewrite it */
- lu_object_put_nocache(&env, &ncf->ncf_obj->do_lu);
+ dt_object_put_nocache(&env, ncf->ncf_obj);
ncf->ncf_obj = NULL;
o = nodemap_save_config_cache(&env, dev, ncf->ncf_los);
nodemap_mgs_ncf = NULL;
if (ncf->ncf_obj)
- lu_object_put(env, &ncf->ncf_obj->do_lu);
+ dt_object_put(env, ncf->ncf_obj);
OBD_FREE_PTR(ncf);
mutex_unlock(&ncf_list_lock);
if (ncf->ncf_obj)
- lu_object_put(env, &ncf->ncf_obj->do_lu);
+ dt_object_put(env, ncf->ncf_obj);
OBD_FREE_PTR(ncf);
}
EXPORT_SYMBOL(nodemap_process_idx_pages);
-static int nodemap_page_build(const struct lu_env *env, union lu_page *lp,
- size_t nob, const struct dt_it_ops *iops,
+static int nodemap_page_build(const struct lu_env *env, struct dt_object *obj,
+ union lu_page *lp, size_t bytes,
+ const struct dt_it_ops *iops,
struct dt_it *it, __u32 attr, void *arg)
{
struct idx_info *ii = (struct idx_info *)arg;
int rc;
ENTRY;
- if (nob < LIP_HDR_SIZE)
+ if (bytes < LIP_HDR_SIZE)
return -EINVAL;
/* initialize the header of the new container */
memset(lip, 0, LIP_HDR_SIZE);
lip->lip_magic = LIP_MAGIC;
- nob -= LIP_HDR_SIZE;
+ bytes -= LIP_HDR_SIZE;
entry = lip->lip_entries;
do {
- char *tmp_entry = entry;
- struct dt_key *key;
- __u64 hash;
+ char *tmp_entry = entry;
+ struct dt_key *key;
+ __u64 hash;
enum nodemap_idx_type key_type;
/* fetch 64-bit hash value */
GOTO(out, rc = 0);
}
- if (nob < size) {
+ if (bytes < size) {
if (lip->lip_nr == 0)
GOTO(out, rc = -EINVAL);
GOTO(out, rc = 0);
ii->ii_hash_start = hash;
entry = tmp_entry + ii->ii_recsize;
- nob -= size;
+ bytes -= size;
}
}
int nodemap_get_config_req(struct obd_device *mgs_obd,
struct ptlrpc_request *req)
{
+ const struct ptlrpc_bulk_frag_ops *frag_ops = &ptlrpc_bulk_kiov_pin_ops;
struct mgs_config_body *body;
struct mgs_config_res *res;
struct lu_rdpg rdpg;
struct idx_info nodemap_ii;
struct ptlrpc_bulk_desc *desc;
- struct l_wait_info lwi;
struct tg_export_data *rqexp_ted = &req->rq_export->exp_target_data;
int i;
int page_count;
if (!body)
RETURN(-EINVAL);
- if (body->mcb_type != CONFIG_T_NODEMAP)
+ if (body->mcb_type != MGS_CFG_T_NODEMAP)
RETURN(-EINVAL);
rdpg.rp_count = (body->mcb_units << body->mcb_bits);
body->mcb_name, rdpg.rp_count);
/* allocate pages to store the containers */
- OBD_ALLOC(rdpg.rp_pages, sizeof(*rdpg.rp_pages) * rdpg.rp_npages);
+ OBD_ALLOC_PTR_ARRAY(rdpg.rp_pages, rdpg.rp_npages);
if (rdpg.rp_pages == NULL)
RETURN(-ENOMEM);
for (i = 0; i < rdpg.rp_npages; i++) {
page_count = (bytes + PAGE_SIZE - 1) >> PAGE_SHIFT;
LASSERT(page_count <= rdpg.rp_count);
desc = ptlrpc_prep_bulk_exp(req, page_count, 1,
- PTLRPC_BULK_PUT_SOURCE |
- PTLRPC_BULK_BUF_KIOV,
- MGS_BULK_PORTAL,
- &ptlrpc_bulk_kiov_pin_ops);
+ PTLRPC_BULK_PUT_SOURCE,
+ MGS_BULK_PORTAL, frag_ops);
if (desc == NULL)
GOTO(out, rc = -ENOMEM);
for (i = 0; i < page_count && bytes > 0; i++) {
- ptlrpc_prep_bulk_page_pin(desc, rdpg.rp_pages[i], 0,
- min_t(int, bytes, PAGE_SIZE));
+ frag_ops->add_kiov_frag(desc, rdpg.rp_pages[i], 0,
+ min_t(int, bytes, PAGE_SIZE));
bytes -= PAGE_SIZE;
}
- rc = target_bulk_io(req->rq_export, desc, &lwi);
+ rc = target_bulk_io(req->rq_export, desc);
ptlrpc_free_bulk(desc);
out:
for (i = 0; i < rdpg.rp_npages; i++)
if (rdpg.rp_pages[i] != NULL)
__free_page(rdpg.rp_pages[i]);
- OBD_FREE(rdpg.rp_pages,
- rdpg.rp_npages * sizeof(rdpg.rp_pages[0]));
+ OBD_FREE_PTR_ARRAY(rdpg.rp_pages, rdpg.rp_npages);
}
return rc;
}