X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fptlrpc%2Fnodemap_storage.c;h=4a3de3d773c4ca7f1016069b6c0ee8b23c54545a;hb=7648c1c905b0976fc789cfd9c6bac382389385ee;hp=33e6570869de214481faf30e83fa5040e44468ee;hpb=6e9b8f592bfc4f51141e2c6bff663c079fcd2c1b;p=fs%2Flustre-release.git diff --git a/lustre/ptlrpc/nodemap_storage.c b/lustre/ptlrpc/nodemap_storage.c index 33e6570..4a3de3d 100644 --- a/lustre/ptlrpc/nodemap_storage.c +++ b/lustre/ptlrpc/nodemap_storage.c @@ -75,6 +75,7 @@ enum nm_flag_shifts { NM_FL_MAP_UID_ONLY = 0x8, NM_FL_MAP_GID_ONLY = 0x10, NM_FL_ENABLE_AUDIT = 0x20, + NM_FL_FORBID_ENCRYPT = 0x40, }; static void nodemap_cluster_key_init(struct nodemap_key *nk, unsigned int nm_id) @@ -87,9 +88,9 @@ static void nodemap_cluster_key_init(struct nodemap_key *nk, unsigned int nm_id) static void nodemap_cluster_rec_init(union nodemap_rec *nr, const struct lu_nodemap *nodemap) { - CLASSERT(sizeof(nr->ncr.ncr_name) == sizeof(nodemap->nm_name)); + BUILD_BUG_ON(sizeof(nr->ncr.ncr_name) != sizeof(nodemap->nm_name)); - strncpy(nr->ncr.ncr_name, nodemap->nm_name, sizeof(nodemap->nm_name)); + strncpy(nr->ncr.ncr_name, nodemap->nm_name, sizeof(nr->ncr.ncr_name)); nr->ncr.ncr_squash_uid = cpu_to_le32(nodemap->nm_squash_uid); nr->ncr.ncr_squash_gid = cpu_to_le32(nodemap->nm_squash_gid); nr->ncr.ncr_flags = cpu_to_le32( @@ -104,7 +105,9 @@ static void nodemap_cluster_rec_init(union nodemap_rec *nr, (nodemap->nmf_map_gid_only ? NM_FL_MAP_GID_ONLY : 0) | (nodemap->nmf_enable_audit ? - NM_FL_ENABLE_AUDIT : 0)); + NM_FL_ENABLE_AUDIT : 0) | + (nodemap->nmf_forbid_encryption ? + NM_FL_FORBID_ENCRYPT : 0)); } static void nodemap_idmap_key_init(struct nodemap_key *nk, unsigned int nm_id, @@ -249,7 +252,7 @@ static int nodemap_idx_insert(const struct lu_env *env, struct dt_device *dev = lu2dt_dev(idx->do_lu.lo_dev); int rc; - CLASSERT(sizeof(union nodemap_rec) == 32); + BUILD_BUG_ON(sizeof(union nodemap_rec) != 32); th = dt_trans_create(env, dev); @@ -671,7 +674,7 @@ static int nodemap_process_keyrec(struct nodemap_config *config, ENTRY; - CLASSERT(sizeof(union nodemap_rec) == 32); + BUILD_BUG_ON(sizeof(union nodemap_rec) != 32); nodemap_id = le32_to_cpu(key->nk_nodemap_id); type = nodemap_get_key_type(key); @@ -722,7 +725,6 @@ static int nodemap_process_keyrec(struct nodemap_config *config, if (nodemap_id == LUSTRE_NODEMAP_DEFAULT_ID) { nodemap = nodemap_create(rec->ncr.ncr_name, config, 1); - config->nmc_default_nodemap = nodemap; } else { nodemap = nodemap_create(rec->ncr.ncr_name, config, 0); @@ -758,6 +760,8 @@ static int nodemap_process_keyrec(struct nodemap_config *config, flags & NM_FL_MAP_GID_ONLY; nodemap->nmf_enable_audit = flags & NM_FL_ENABLE_AUDIT; + nodemap->nmf_forbid_encryption = + flags & NM_FL_FORBID_ENCRYPT; /* The fileset should be saved otherwise it will be empty * every time in case of "NODEMAP_CLUSTER_IDX". */ @@ -937,10 +941,10 @@ out: if (new_config->nmc_default_nodemap == NULL) { /* new MGS won't have a default nm on disk, so create it here */ - new_config->nmc_default_nodemap = + struct lu_nodemap *nodemap = nodemap_create(DEFAULT_NODEMAP, new_config, 1); - if (IS_ERR(new_config->nmc_default_nodemap)) { - rc = PTR_ERR(new_config->nmc_default_nodemap); + if (IS_ERR(nodemap)) { + rc = PTR_ERR(nodemap); } else { rc = nodemap_idx_nodemap_add_update( new_config->nmc_default_nodemap, @@ -1015,8 +1019,8 @@ struct dt_object *nodemap_save_config_cache(const struct lu_env *env, list_for_each_entry_safe(range, range_temp, &nodemap->nm_ranges, rn_list) { lnet_nid_t nid[2] = { - range->rn_node.in_extent.start, - range->rn_node.in_extent.end + range->rn_start, + range->rn_end }; nodemap_range_key_init(&nk, nodemap->nm_id, range->rn_id); @@ -1460,12 +1464,12 @@ EXPORT_SYMBOL(nodemap_index_read); int nodemap_get_config_req(struct obd_device *mgs_obd, struct ptlrpc_request *req) { + const struct ptlrpc_bulk_frag_ops *frag_ops = &ptlrpc_bulk_kiov_pin_ops; struct mgs_config_body *body; struct mgs_config_res *res; struct lu_rdpg rdpg; struct idx_info nodemap_ii; struct ptlrpc_bulk_desc *desc; - struct l_wait_info lwi; struct tg_export_data *rqexp_ted = &req->rq_export->exp_target_data; int i; int page_count; @@ -1476,7 +1480,7 @@ int nodemap_get_config_req(struct obd_device *mgs_obd, if (!body) RETURN(-EINVAL); - if (body->mcb_type != CONFIG_T_NODEMAP) + if (body->mcb_type != MGS_CFG_T_NODEMAP) RETURN(-EINVAL); rdpg.rp_count = (body->mcb_units << body->mcb_bits); @@ -1489,7 +1493,7 @@ int nodemap_get_config_req(struct obd_device *mgs_obd, body->mcb_name, rdpg.rp_count); /* allocate pages to store the containers */ - OBD_ALLOC(rdpg.rp_pages, sizeof(*rdpg.rp_pages) * rdpg.rp_npages); + OBD_ALLOC_PTR_ARRAY(rdpg.rp_pages, rdpg.rp_npages); if (rdpg.rp_pages == NULL) RETURN(-ENOMEM); for (i = 0; i < rdpg.rp_npages; i++) { @@ -1521,20 +1525,18 @@ int nodemap_get_config_req(struct obd_device *mgs_obd, page_count = (bytes + PAGE_SIZE - 1) >> PAGE_SHIFT; LASSERT(page_count <= rdpg.rp_count); desc = ptlrpc_prep_bulk_exp(req, page_count, 1, - PTLRPC_BULK_PUT_SOURCE | - PTLRPC_BULK_BUF_KIOV, - MGS_BULK_PORTAL, - &ptlrpc_bulk_kiov_pin_ops); + PTLRPC_BULK_PUT_SOURCE, + MGS_BULK_PORTAL, frag_ops); if (desc == NULL) GOTO(out, rc = -ENOMEM); for (i = 0; i < page_count && bytes > 0; i++) { - ptlrpc_prep_bulk_page_pin(desc, rdpg.rp_pages[i], 0, - min_t(int, bytes, PAGE_SIZE)); + frag_ops->add_kiov_frag(desc, rdpg.rp_pages[i], 0, + min_t(int, bytes, PAGE_SIZE)); bytes -= PAGE_SIZE; } - rc = target_bulk_io(req->rq_export, desc, &lwi); + rc = target_bulk_io(req->rq_export, desc); ptlrpc_free_bulk(desc); out: @@ -1542,8 +1544,7 @@ out: for (i = 0; i < rdpg.rp_npages; i++) if (rdpg.rp_pages[i] != NULL) __free_page(rdpg.rp_pages[i]); - OBD_FREE(rdpg.rp_pages, - rdpg.rp_npages * sizeof(rdpg.rp_pages[0])); + OBD_FREE_PTR_ARRAY(rdpg.rp_pages, rdpg.rp_npages); } return rc; }