From: James Simmons Date: Wed, 21 Sep 2016 13:58:24 +0000 (-0400) Subject: LU-6401 header: remove assert from interval_set() X-Git-Tag: 2.8.59~14 X-Git-Url: https://git.whamcloud.com/?a=commitdiff_plain;h=5207c488239e30e9aee51531ec3a363c649211de;p=fs%2Flustre-release.git LU-6401 header: remove assert from interval_set() Currently interval_tree.h is used by user land and kernel space. To unwind libcfs.h from user space we need to remove LASSERT from the uapi headers. In the case of interval_tree.h only interval_set() uses LASSERT which is removed in this patch and interval_set() instead reports a real error. Change-Id: Iac716f61542eb6e1a6e060e2810e6b7a175b25d9 Signed-off-by: James Simmons Reviewed-on: http://review.whamcloud.com/22522 Tested-by: Jenkins Reviewed-by: Frank Zago Tested-by: Maloo Reviewed-by: Dmitry Eremin Reviewed-by: John L. Hammond Reviewed-by: Oleg Drokin --- diff --git a/lustre/include/interval_tree.h b/lustre/include/interval_tree.h index 82ccea1..530f4f1 100644 --- a/lustre/include/interval_tree.h +++ b/lustre/include/interval_tree.h @@ -77,13 +77,15 @@ static inline __u64 interval_high(struct interval_node *node) return node->in_extent.end; } -static inline void interval_set(struct interval_node *node, - __u64 start, __u64 end) +static inline int interval_set(struct interval_node *node, + __u64 start, __u64 end) { - LASSERT(start <= end); - node->in_extent.start = start; - node->in_extent.end = end; - node->in_max_high = end; + if (start > end) + return -ERANGE; + node->in_extent.start = start; + node->in_extent.end = end; + node->in_max_high = end; + return 0; } static inline void interval_init(struct interval_node *node) diff --git a/lustre/ldlm/ldlm_extent.c b/lustre/ldlm/ldlm_extent.c index b38b14d..4c22137 100644 --- a/lustre/ldlm/ldlm_extent.c +++ b/lustre/ldlm/ldlm_extent.c @@ -1006,7 +1006,7 @@ void ldlm_extent_add_lock(struct ldlm_resource *res, struct interval_node *found, **root; struct ldlm_interval *node; struct ldlm_extent *extent; - int idx; + int idx, rc; LASSERT(lock->l_granted_mode == lock->l_req_mode); @@ -1020,7 +1020,9 @@ void ldlm_extent_add_lock(struct ldlm_resource *res, /* node extent initialize */ extent = &lock->l_policy_data.l_extent; - interval_set(&node->li_node, extent->start, extent->end); + + rc = interval_set(&node->li_node, extent->start, extent->end); + LASSERT(!rc); root = &res->lr_itree[idx].lit_root; found = interval_insert(&node->li_node, root); diff --git a/lustre/llite/range_lock.c b/lustre/llite/range_lock.c index 27fbbef..ead9321 100644 --- a/lustre/llite/range_lock.c +++ b/lustre/llite/range_lock.c @@ -61,17 +61,23 @@ void range_lock_tree_init(struct range_lock_tree *tree) * Pre: Caller should have allocated the range lock node. * Post: The range lock node is meant to cover [start, end] region */ -void range_lock_init(struct range_lock *lock, __u64 start, __u64 end) +int range_lock_init(struct range_lock *lock, __u64 start, __u64 end) { + int rc; + interval_init(&lock->rl_node); if (end != LUSTRE_EOF) end >>= PAGE_SHIFT; - interval_set(&lock->rl_node, start >> PAGE_SHIFT, end); + rc = interval_set(&lock->rl_node, start >> PAGE_SHIFT, end); + if (rc) + return rc; + INIT_LIST_HEAD(&lock->rl_next_lock); lock->rl_task = NULL; lock->rl_lock_count = 0; lock->rl_blocking_ranges = 0; lock->rl_sequence = 0; + return rc; } static inline struct range_lock *next_lock(struct range_lock *lock) diff --git a/lustre/llite/range_lock.h b/lustre/llite/range_lock.h index dfd350e..5266db7 100644 --- a/lustre/llite/range_lock.h +++ b/lustre/llite/range_lock.h @@ -81,7 +81,7 @@ struct range_lock_tree { }; void range_lock_tree_init(struct range_lock_tree *tree); -void range_lock_init(struct range_lock *lock, __u64 start, __u64 end); +int range_lock_init(struct range_lock *lock, __u64 start, __u64 end); int range_lock(struct range_lock_tree *tree, struct range_lock *lock); void range_unlock(struct range_lock_tree *tree, struct range_lock *lock); #endif diff --git a/lustre/mdt/mdt_hsm_cdt_requests.c b/lustre/mdt/mdt_hsm_cdt_requests.c index 2dcdcd9..0ac8135 100644 --- a/lustre/mdt/mdt_hsm_cdt_requests.c +++ b/lustre/mdt/mdt_hsm_cdt_requests.c @@ -202,7 +202,9 @@ static int hsm_update_work(struct cdt_req_progress *crp, v = crp->crp_node[crp->crp_cnt / NODE_VECTOR_SZ]; node = &v[crp->crp_cnt % NODE_VECTOR_SZ]; - interval_set(node, extent->offset, end); + rc = interval_set(node, extent->offset, end); + if (rc) + GOTO(out, rc); /* try to insert, if entry already exist ignore the new one * it can happen if ct sends 2 times the same progress */ if (interval_insert(node, &crp->crp_root) == NULL) diff --git a/lustre/ptlrpc/nodemap_range.c b/lustre/ptlrpc/nodemap_range.c index 56da8ea..da7143f 100644 --- a/lustre/ptlrpc/nodemap_range.c +++ b/lustre/ptlrpc/nodemap_range.c @@ -74,6 +74,7 @@ struct lu_nid_range *range_create(struct nodemap_range_tree *nm_range_tree, struct lu_nodemap *nodemap, unsigned range_id) { struct lu_nid_range *range; + int rc; if (LNET_NIDNET(start_nid) != LNET_NIDNET(end_nid) || LNET_NIDADDR(start_nid) > LNET_NIDADDR(end_nid)) @@ -96,7 +97,13 @@ struct lu_nid_range *range_create(struct nodemap_range_tree *nm_range_tree, range->rn_id = nm_range_tree->nmrt_range_highest_id; } range->rn_nodemap = nodemap; - interval_set(&range->rn_node, start_nid, end_nid); + + rc = interval_set(&range->rn_node, start_nid, end_nid); + if (rc < 0) { + OBD_FREE_PTR(range); + return NULL; + } + INIT_LIST_HEAD(&range->rn_list); return range; diff --git a/lustre/tests/it_test.c b/lustre/tests/it_test.c index d0a4168..1ba87b2 100644 --- a/lustre/tests/it_test.c +++ b/lustre/tests/it_test.c @@ -37,6 +37,7 @@ * * Author: jay */ +#include #include #include #include