return node->in_extent.end;
}
-static inline void interval_set(struct interval_node *node,
- __u64 start, __u64 end)
+static inline int interval_set(struct interval_node *node,
+ __u64 start, __u64 end)
{
- LASSERT(start <= end);
- node->in_extent.start = start;
- node->in_extent.end = end;
- node->in_max_high = end;
+ if (start > end)
+ return -ERANGE;
+ node->in_extent.start = start;
+ node->in_extent.end = end;
+ node->in_max_high = end;
+ return 0;
}
static inline void interval_init(struct interval_node *node)
struct interval_node *found, **root;
struct ldlm_interval *node;
struct ldlm_extent *extent;
- int idx;
+ int idx, rc;
LASSERT(lock->l_granted_mode == lock->l_req_mode);
/* node extent initialize */
extent = &lock->l_policy_data.l_extent;
- interval_set(&node->li_node, extent->start, extent->end);
+
+ rc = interval_set(&node->li_node, extent->start, extent->end);
+ LASSERT(!rc);
root = &res->lr_itree[idx].lit_root;
found = interval_insert(&node->li_node, root);
* Pre: Caller should have allocated the range lock node.
* Post: The range lock node is meant to cover [start, end] region
*/
-void range_lock_init(struct range_lock *lock, __u64 start, __u64 end)
+int range_lock_init(struct range_lock *lock, __u64 start, __u64 end)
{
+ int rc;
+
interval_init(&lock->rl_node);
if (end != LUSTRE_EOF)
end >>= PAGE_SHIFT;
- interval_set(&lock->rl_node, start >> PAGE_SHIFT, end);
+ rc = interval_set(&lock->rl_node, start >> PAGE_SHIFT, end);
+ if (rc)
+ return rc;
+
INIT_LIST_HEAD(&lock->rl_next_lock);
lock->rl_task = NULL;
lock->rl_lock_count = 0;
lock->rl_blocking_ranges = 0;
lock->rl_sequence = 0;
+ return rc;
}
static inline struct range_lock *next_lock(struct range_lock *lock)
};
void range_lock_tree_init(struct range_lock_tree *tree);
-void range_lock_init(struct range_lock *lock, __u64 start, __u64 end);
+int range_lock_init(struct range_lock *lock, __u64 start, __u64 end);
int range_lock(struct range_lock_tree *tree, struct range_lock *lock);
void range_unlock(struct range_lock_tree *tree, struct range_lock *lock);
#endif
v = crp->crp_node[crp->crp_cnt / NODE_VECTOR_SZ];
node = &v[crp->crp_cnt % NODE_VECTOR_SZ];
- interval_set(node, extent->offset, end);
+ rc = interval_set(node, extent->offset, end);
+ if (rc)
+ GOTO(out, rc);
/* try to insert, if entry already exist ignore the new one
* it can happen if ct sends 2 times the same progress */
if (interval_insert(node, &crp->crp_root) == NULL)
struct lu_nodemap *nodemap, unsigned range_id)
{
struct lu_nid_range *range;
+ int rc;
if (LNET_NIDNET(start_nid) != LNET_NIDNET(end_nid) ||
LNET_NIDADDR(start_nid) > LNET_NIDADDR(end_nid))
range->rn_id = nm_range_tree->nmrt_range_highest_id;
}
range->rn_nodemap = nodemap;
- interval_set(&range->rn_node, start_nid, end_nid);
+
+ rc = interval_set(&range->rn_node, start_nid, end_nid);
+ if (rc < 0) {
+ OBD_FREE_PTR(range);
+ return NULL;
+ }
+
INIT_LIST_HEAD(&range->rn_list);
return range;
*
* Author: jay <jxiong@clusterfs.com>
*/
+#include <errno.h>
#include <inttypes.h>
#include <limits.h>
#include <stdio.h>