1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (C) 2013, Trustees of Indiana University
6 * Author: Joshua Walgenbach <jjw@iu.edu>
9 #include <lustre_net.h>
10 #include "nodemap_internal.h"
11 #include <linux/interval_tree_generic.h>
16 * To classify clients when they connect, build a global range tree
17 * containing all admin defined ranges. Incoming clients can then be
18 * classified into their nodemaps, and the lu_nodemap structure will be
19 * set in the export structure for the connecting client. Pointers to
20 * the lu_nid_range nodes will be added to linked links within the
21 * lu_nodemap structure for reporting purposes. Access to range tree should be
22 * controlled to prevent read access during update operations.
25 #define START(node) (lnet_nid_to_nid4(&((node)->rn_start)))
26 #define LAST(node) (lnet_nid_to_nid4(&((node)->rn_end)))
28 INTERVAL_TREE_DEFINE(struct lu_nid_range, rn_rb, lnet_nid_t, rn_subtree_last,
29 START, LAST, static, nm_range)
31 static int __range_is_included(lnet_nid_t needle_start, lnet_nid_t needle_end,
32 struct lu_nid_range *haystack)
34 return LNET_NIDADDR(START(haystack)) <= LNET_NIDADDR(needle_start) &&
35 LNET_NIDADDR(LAST(haystack)) >= LNET_NIDADDR(needle_end);
38 static int range_is_included(struct lu_nid_range *needle,
39 struct lu_nid_range *haystack)
41 return __range_is_included(START(needle), LAST(needle), haystack);
47 * \param config nodemap config - used to set range id
48 * \param start_nid starting nid of the range
49 * \param end_nid ending nid of the range
50 * \param netmask network mask prefix length
51 * \param nodemap nodemap that contains this range
52 * \param range_id should be 0 unless loading from disk
53 * \retval lu_nid_range on success, NULL on failure
55 struct lu_nid_range *range_create(struct nodemap_config *config,
56 const struct lnet_nid *start_nid,
57 const struct lnet_nid *end_nid,
58 u8 netmask, struct lu_nodemap *nodemap,
59 unsigned int range_id)
61 struct nodemap_range_tree *nm_range_tree;
62 struct lu_nid_range *range;
63 LIST_HEAD(tmp_nidlist);
65 if (LNET_NID_NET(start_nid) != LNET_NID_NET(end_nid))
69 lnet_nid_t nid4[2] = {
70 lnet_nid_to_nid4(start_nid),
71 lnet_nid_to_nid4(end_nid)
74 if (LNET_NIDADDR(nid4[0]) > LNET_NIDADDR(nid4[1]))
76 } else if (!nid_same(start_nid, end_nid)) {
77 /* A netmask is used with start_nid to form a nidmask. If the
78 * start_nid and end_nid differ then this indicates the
79 * specified range was malformed
85 /* +4 for '/<prefix_length>' */
86 char nidstr[LNET_NIDSTR_SIZE + 4];
87 char net[LNET_NIDSTR_SIZE];
92 /* If the netmask is somehow more than three characters
93 * then the logic below could truncate it which could
94 * result in creating a valid netmask value from bad
96 * cfs_parse_nidlist() will check whether the netmask
97 * is valid for the address type
99 CERROR("Invalid netmask %u\n", netmask);
103 /* nidstr = <addr>@<net> */
104 snprintf(nidstr, sizeof(nidstr), "%s",
105 libcfs_nidstr(start_nid));
107 c = strchr(nidstr, '@');
109 CERROR("Invalid nid %s for netmask\n",
110 libcfs_nidstr(start_nid));
115 strscpy(net, c, sizeof(net));
119 /* nidstr = <addr>/<prefix_length> */
120 snprintf(c, sizeof(nidstr) - strlen(nidstr), "/%u", netmask);
122 /* nidstr = <addr>/<prefix_length>@<net>
123 * (-1 to ensure room for null byte)
125 strncat(nidstr, net, sizeof(nidstr) - strlen(nidstr) - 1);
127 rc = cfs_parse_nidlist(nidstr, strlen(nidstr), &tmp_nidlist);
129 CERROR("Invalid nidmask %s rc = %d\n", nidstr, rc);
134 OBD_ALLOC_PTR(range);
136 CERROR("cannot allocate lu_nid_range of size %zu bytes\n",
141 /* if we are loading from save, use on disk id num */
142 nm_range_tree = &config->nmc_range_tree;
144 if (nm_range_tree->nmrt_range_highest_id < range_id)
145 nm_range_tree->nmrt_range_highest_id = range_id;
146 range->rn_id = range_id;
148 nm_range_tree->nmrt_range_highest_id++;
149 range->rn_id = nm_range_tree->nmrt_range_highest_id;
151 range->rn_nodemap = nodemap;
153 range->rn_netmask = netmask;
154 range->rn_start = *start_nid;
155 range->rn_end = *end_nid;
157 INIT_LIST_HEAD(&range->rn_list);
158 INIT_LIST_HEAD(&range->rn_nidlist);
159 if (!list_empty(&tmp_nidlist))
160 list_splice(&tmp_nidlist, &range->rn_nidlist);
161 range->rn_subtree.nmrt_range_interval_root = INTERVAL_TREE_ROOT;
167 * find the exact range
169 * \param start_nid starting nid
170 * \param end_nid ending nid
171 * \retval matching range or NULL
174 struct lu_nid_range *__range_find(struct nodemap_range_tree *nm_range_tree,
175 const struct lnet_nid *start_nid,
176 const struct lnet_nid *end_nid)
178 struct lu_nid_range *range;
181 if (!nid_is_nid4(start_nid) || !nid_is_nid4(end_nid))
184 nid4[0] = lnet_nid_to_nid4(start_nid);
185 nid4[1] = lnet_nid_to_nid4(end_nid);
187 range = nm_range_iter_first(&nm_range_tree->nmrt_range_interval_root,
190 if (nid_same(&range->rn_start, start_nid) &&
191 nid_same(&range->rn_end, end_nid))
193 if (__range_is_included(nid4[0], nid4[1], range))
194 return __range_find(&range->rn_subtree,
196 range = nm_range_iter_next(range, nid4[0], nid4[1]);
202 struct lu_nid_range *range_find(struct nodemap_config *config,
203 const struct lnet_nid *start_nid,
204 const struct lnet_nid *end_nid,
207 struct lu_nid_range *range = NULL;
210 return __range_find(&config->nmc_range_tree,
214 if (!list_empty(&config->nmc_netmask_setup)) {
215 struct lu_nid_range *range_temp;
218 list_for_each_entry_safe(range, range_temp,
219 &config->nmc_netmask_setup,
221 len = cfs_nidmask_get_length(&range->rn_nidlist);
223 if (cfs_match_nid(start_nid, &range->rn_nidlist) &&
235 void range_destroy(struct lu_nid_range *range)
237 LASSERT(list_empty(&range->rn_list) == 0);
238 if (!list_empty(&range->rn_nidlist))
239 cfs_free_nidlist(&range->rn_nidlist);
245 * insert a nid range into the interval tree
247 * \param range range to insert
248 * \retval 0 on success
250 * This function checks that the given nid range
251 * does not overlap so that each nid can belong
252 * to exactly one range
254 static int __range_insert(struct nodemap_range_tree *nm_range_tree,
255 struct lu_nid_range *range,
256 struct lu_nid_range **parent_range, bool dynamic)
258 struct lu_nid_range *found = NULL;
261 found = nm_range_iter_first(&nm_range_tree->nmrt_range_interval_root,
262 lnet_nid_to_nid4(&range->rn_start),
263 lnet_nid_to_nid4(&range->rn_end));
265 if (dynamic && range_is_included(range, found)) {
266 rc = __range_insert(&found->rn_subtree,
267 range, parent_range, dynamic);
269 if (parent_range && !*parent_range)
270 *parent_range = found;
275 GOTO(out_insert, rc);
278 nm_range_insert(range,
279 &nm_range_tree->nmrt_range_interval_root);
285 int range_insert(struct nodemap_config *config, struct lu_nid_range *range,
286 struct lu_nid_range **parent_range, bool dynamic)
290 if (!range->rn_netmask) {
291 rc = __range_insert(&config->nmc_range_tree,
292 range, parent_range, dynamic);
294 if (range_find(config, &range->rn_start, &range->rn_end,
298 list_add(&range->rn_collect, &config->nmc_netmask_setup);
305 * delete a range from the interval tree and any
306 * associated nodemap references
308 * \param range range to remove
310 static void __range_delete(struct nodemap_range_tree *nm_range_tree,
311 struct lu_nid_range *range)
313 struct lu_nid_range *found;
316 nid4[0] = lnet_nid_to_nid4(&range->rn_start);
317 nid4[1] = lnet_nid_to_nid4(&range->rn_end);
319 found = nm_range_iter_first(&nm_range_tree->nmrt_range_interval_root,
322 if (nid_same(&found->rn_start, &range->rn_start) &&
323 nid_same(&found->rn_end, &range->rn_end))
325 if (__range_is_included(nid4[0], nid4[1], found)) {
326 __range_delete(&found->rn_subtree, range);
329 found = nm_range_iter_next(found, nid4[0], nid4[1]);
333 nm_range_remove(found,
334 &nm_range_tree->nmrt_range_interval_root);
337 void range_delete(struct nodemap_config *config, struct lu_nid_range *range)
339 list_del(&range->rn_list);
341 if (!range->rn_netmask)
342 __range_delete(&config->nmc_range_tree, range);
344 list_del(&range->rn_collect);
346 range_destroy(range);
350 * search the interval tree for a nid within a range
352 * \param nid nid to search for
355 struct lu_nid_range *__range_search(struct nodemap_range_tree *nm_range_tree,
356 struct lnet_nid *nid)
358 struct lu_nid_range *range, *subrange;
360 range = nm_range_iter_first(&nm_range_tree->nmrt_range_interval_root,
361 lnet_nid_to_nid4(nid),
362 lnet_nid_to_nid4(nid));
364 subrange = __range_search(&range->rn_subtree, nid);
372 struct lu_nid_range *range_search(struct nodemap_config *config,
373 struct lnet_nid *nid)
375 if (nid_is_nid4(nid)) {
376 return __range_search(&config->nmc_range_tree, nid);
379 if (!list_empty(&config->nmc_netmask_setup)) {
380 struct lu_nid_range *range, *range_temp;
382 list_for_each_entry_safe(range, range_temp,
383 &config->nmc_netmask_setup,
385 if (cfs_match_nid(nid, &range->rn_nidlist))