4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2013, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
34 * Memory Descriptor management routines
37 #define DEBUG_SUBSYSTEM S_LNET
39 #include <lnet/lib-lnet.h>
41 /* must be called with lnet_res_lock held */
43 lnet_md_unlink(lnet_libmd_t *md)
45 if ((md->md_flags & LNET_MD_FLAG_ZOMBIE) == 0) {
46 /* first unlink attempt... */
47 lnet_me_t *me = md->md_me;
49 md->md_flags |= LNET_MD_FLAG_ZOMBIE;
51 /* Disassociate from ME (if any), and unlink it if it was created
54 /* detach MD from portal */
55 lnet_ptl_detach_md(me, md);
56 if (me->me_unlink == LNET_UNLINK)
60 /* ensure all future handle lookups fail */
61 lnet_res_lh_invalidate(&md->md_lh);
64 if (md->md_refcount != 0) {
65 CDEBUG(D_NET, "Queueing unlink of md %p\n", md);
69 CDEBUG(D_NET, "Unlinking md %p\n", md);
71 if (md->md_eq != NULL) {
72 int cpt = lnet_cpt_of_cookie(md->md_lh.lh_cookie);
74 LASSERT(*md->md_eq->eq_refs[cpt] > 0);
75 (*md->md_eq->eq_refs[cpt])--;
78 LASSERT(!list_empty(&md->md_list));
79 list_del_init(&md->md_list);
84 lnet_cpt_of_md(lnet_libmd_t *md)
86 int cpt = CFS_CPT_ANY;
91 if ((md->md_options & LNET_MD_BULK_HANDLE) != 0 &&
92 !LNetHandleIsInvalid(md->md_bulk_handle)) {
93 md = lnet_handle2md(&md->md_bulk_handle);
99 if ((md->md_options & LNET_MD_KIOV) != 0) {
100 if (md->md_iov.kiov[0].kiov_page != NULL)
101 cpt = cfs_cpt_of_node(lnet_cpt_table(),
102 page_to_nid(md->md_iov.kiov[0].kiov_page));
103 } else if (md->md_iov.iov[0].iov_base != NULL) {
104 cpt = cfs_cpt_of_node(lnet_cpt_table(),
105 page_to_nid(virt_to_page(md->md_iov.iov[0].iov_base)));
112 lnet_md_build(lnet_libmd_t *lmd, lnet_md_t *umd, int unlink)
116 int total_length = 0;
119 lmd->md_start = umd->start;
121 lmd->md_max_size = umd->max_size;
122 lmd->md_options = umd->options;
123 lmd->md_user_ptr = umd->user_ptr;
125 lmd->md_threshold = umd->threshold;
126 lmd->md_refcount = 0;
127 lmd->md_flags = (unlink == LNET_UNLINK) ? LNET_MD_FLAG_AUTO_UNLINK : 0;
128 lmd->md_bulk_handle = umd->bulk_handle;
130 if ((umd->options & LNET_MD_IOVEC) != 0) {
132 if ((umd->options & LNET_MD_KIOV) != 0) /* Can't specify both */
135 lmd->md_niov = niov = umd->length;
136 memcpy(lmd->md_iov.iov, umd->start,
137 niov * sizeof(lmd->md_iov.iov[0]));
139 for (i = 0; i < (int)niov; i++) {
140 /* We take the base address on trust */
141 if (lmd->md_iov.iov[i].iov_len <= 0) /* invalid length */
144 total_length += lmd->md_iov.iov[i].iov_len;
147 lmd->md_length = total_length;
149 if ((umd->options & LNET_MD_MAX_SIZE) != 0 && /* max size used */
150 (umd->max_size < 0 ||
151 umd->max_size > total_length)) // illegal max_size
154 } else if ((umd->options & LNET_MD_KIOV) != 0) {
155 lmd->md_niov = niov = umd->length;
156 memcpy(lmd->md_iov.kiov, umd->start,
157 niov * sizeof(lmd->md_iov.kiov[0]));
159 for (i = 0; i < (int)niov; i++) {
160 /* We take the page pointer on trust */
161 if (lmd->md_iov.kiov[i].kiov_offset +
162 lmd->md_iov.kiov[i].kiov_len > PAGE_SIZE)
163 return -EINVAL; /* invalid length */
165 total_length += lmd->md_iov.kiov[i].kiov_len;
168 lmd->md_length = total_length;
170 if ((umd->options & LNET_MD_MAX_SIZE) != 0 && /* max size used */
171 (umd->max_size < 0 ||
172 umd->max_size > total_length)) // illegal max_size
174 } else { /* contiguous */
175 lmd->md_length = umd->length;
176 lmd->md_niov = niov = 1;
177 lmd->md_iov.iov[0].iov_base = umd->start;
178 lmd->md_iov.iov[0].iov_len = umd->length;
180 if ((umd->options & LNET_MD_MAX_SIZE) != 0 && /* max size used */
181 (umd->max_size < 0 ||
182 umd->max_size > (int)umd->length)) // illegal max_size
189 /* must be called with resource lock held */
191 lnet_md_link(lnet_libmd_t *md, lnet_handle_eq_t eq_handle, int cpt)
193 struct lnet_res_container *container = the_lnet.ln_md_containers[cpt];
195 /* NB we are passed an allocated, but inactive md.
196 * if we return success, caller may lnet_md_unlink() it.
197 * otherwise caller may only lnet_md_free() it.
199 /* This implementation doesn't know how to create START events or
200 * disable END events. Best to LASSERT our caller is compliant so
201 * we find out quickly... */
202 /* TODO - reevaluate what should be here in light of
203 * the removal of the start and end events
204 * maybe there we shouldn't even allow LNET_EQ_NONE!)
205 * LASSERT (eq == NULL);
207 if (!LNetHandleIsInvalid(eq_handle)) {
208 md->md_eq = lnet_handle2eq(&eq_handle);
210 if (md->md_eq == NULL)
213 (*md->md_eq->eq_refs[cpt])++;
216 lnet_res_lh_initialize(container, &md->md_lh);
218 LASSERT(list_empty(&md->md_list));
219 list_add(&md->md_list, &container->rec_active);
224 /* must be called with lnet_res_lock held */
226 lnet_md_deconstruct(lnet_libmd_t *lmd, lnet_md_t *umd)
228 /* NB this doesn't copy out all the iov entries so when a
229 * discontiguous MD is copied out, the target gets to know the
230 * original iov pointer (in start) and the number of entries it had
233 umd->start = lmd->md_start;
234 umd->length = ((lmd->md_options & (LNET_MD_IOVEC | LNET_MD_KIOV)) == 0) ?
235 lmd->md_length : lmd->md_niov;
236 umd->threshold = lmd->md_threshold;
237 umd->max_size = lmd->md_max_size;
238 umd->options = lmd->md_options;
239 umd->user_ptr = lmd->md_user_ptr;
240 lnet_eq2handle(&umd->eq_handle, lmd->md_eq);
244 lnet_md_validate(lnet_md_t *umd)
246 if (umd->start == NULL && umd->length != 0) {
247 CERROR("MD start pointer can not be NULL with length %u\n",
252 if ((umd->options & (LNET_MD_KIOV | LNET_MD_IOVEC)) != 0 &&
253 umd->length > LNET_MAX_IOV) {
254 CERROR("Invalid option: too many fragments %u, %d max\n",
255 umd->length, LNET_MAX_IOV);
263 * Create a memory descriptor and attach it to a ME
265 * \param meh A handle for a ME to associate the new MD with.
266 * \param umd Provides initial values for the user-visible parts of a MD.
267 * Other than its use for initialization, there is no linkage between this
268 * structure and the MD maintained by the LNet.
269 * \param unlink A flag to indicate whether the MD is automatically unlinked
270 * when it becomes inactive, either because the operation threshold drops to
271 * zero or because the available memory becomes less than \a umd.max_size.
272 * (Note that the check for unlinking a MD only occurs after the completion
273 * of a successful operation on the MD.) The value LNET_UNLINK enables auto
274 * unlinking; the value LNET_RETAIN disables it.
275 * \param handle On successful returns, a handle to the newly created MD is
276 * saved here. This handle can be used later in LNetMDUnlink().
278 * \retval 0 On success.
279 * \retval -EINVAL If \a umd is not valid.
280 * \retval -ENOMEM If new MD cannot be allocated.
281 * \retval -ENOENT Either \a meh or \a umd.eq_handle does not point to a
282 * valid object. Note that it's OK to supply a NULL \a umd.eq_handle by
283 * calling LNetInvalidateHandle() on it.
284 * \retval -EBUSY If the ME pointed to by \a meh is already associated with
288 LNetMDAttach(lnet_handle_me_t meh, lnet_md_t umd,
289 lnet_unlink_t unlink, lnet_handle_md_t *handle)
291 struct list_head matches = LIST_HEAD_INIT(matches);
292 struct list_head drops = LIST_HEAD_INIT(drops);
294 struct lnet_libmd *md;
298 LASSERT(the_lnet.ln_refcount > 0);
300 if (lnet_md_validate(&umd) != 0)
303 if ((umd.options & (LNET_MD_OP_GET | LNET_MD_OP_PUT)) == 0) {
304 CERROR("Invalid option: no MD_OP set\n");
308 md = lnet_md_alloc(&umd);
312 rc = lnet_md_build(md, &umd, unlink);
316 cpt = lnet_cpt_of_cookie(meh.cookie);
320 me = lnet_handle2me(&meh);
323 else if (me->me_md != NULL)
326 rc = lnet_md_link(md, umd.eq_handle, cpt);
331 /* attach this MD to portal of ME and check if it matches any
332 * blocked msgs on this portal */
333 lnet_ptl_attach_md(me, md, &matches, &drops);
335 lnet_md2handle(handle, md);
337 lnet_res_unlock(cpt);
339 lnet_drop_delayed_msg_list(&drops, "Bad match");
340 lnet_recv_delayed_msg_list(&matches);
345 lnet_res_unlock(cpt);
350 EXPORT_SYMBOL(LNetMDAttach);
353 * Create a "free floating" memory descriptor - a MD that is not associated
354 * with a ME. Such MDs are usually used in LNetPut() and LNetGet() operations.
356 * \param umd,unlink See the discussion for LNetMDAttach().
357 * \param handle On successful returns, a handle to the newly created MD is
358 * saved here. This handle can be used later in LNetMDUnlink(), LNetPut(),
359 * and LNetGet() operations.
361 * \retval 0 On success.
362 * \retval -EINVAL If \a umd is not valid.
363 * \retval -ENOMEM If new MD cannot be allocated.
364 * \retval -ENOENT \a umd.eq_handle does not point to a valid EQ. Note that
365 * it's OK to supply a NULL \a umd.eq_handle by calling
366 * LNetInvalidateHandle() on it.
369 LNetMDBind(lnet_md_t umd, lnet_unlink_t unlink, lnet_handle_md_t *handle)
375 LASSERT(the_lnet.ln_refcount > 0);
377 if (lnet_md_validate(&umd) != 0)
380 if ((umd.options & (LNET_MD_OP_GET | LNET_MD_OP_PUT)) != 0) {
381 CERROR("Invalid option: GET|PUT illegal on active MDs\n");
385 md = lnet_md_alloc(&umd);
389 rc = lnet_md_build(md, &umd, unlink);
393 cpt = lnet_res_lock_current();
395 rc = lnet_md_link(md, umd.eq_handle, cpt);
399 lnet_md2handle(handle, md);
401 lnet_res_unlock(cpt);
405 lnet_res_unlock(cpt);
411 EXPORT_SYMBOL(LNetMDBind);
414 * Unlink the memory descriptor from any ME it may be linked to and release
415 * the internal resources associated with it. As a result, active messages
416 * associated with the MD may get aborted.
418 * This function does not free the memory region associated with the MD;
419 * i.e., the memory the user allocated for this MD. If the ME associated with
420 * this MD is not NULL and was created with auto unlink enabled, the ME is
421 * unlinked as well (see LNetMEAttach()).
423 * Explicitly unlinking a MD via this function call has the same behavior as
424 * a MD that has been automatically unlinked, except that no LNET_EVENT_UNLINK
425 * is generated in the latter case.
427 * An unlinked event can be reported in two ways:
428 * - If there's no pending operations on the MD, it's unlinked immediately
429 * and an LNET_EVENT_UNLINK event is logged before this function returns.
430 * - Otherwise, the MD is only marked for deletion when this function
431 * returns, and the unlinked event will be piggybacked on the event of
432 * the completion of the last operation by setting the unlinked field of
433 * the event. No dedicated LNET_EVENT_UNLINK event is generated.
435 * Note that in both cases the unlinked field of the event is always set; no
436 * more event will happen on the MD after such an event is logged.
438 * \param mdh A handle for the MD to be unlinked.
440 * \retval 0 On success.
441 * \retval -ENOENT If \a mdh does not point to a valid MD object.
444 LNetMDUnlink (lnet_handle_md_t mdh)
450 LASSERT(the_lnet.ln_refcount > 0);
452 cpt = lnet_cpt_of_cookie(mdh.cookie);
455 md = lnet_handle2md(&mdh);
457 lnet_res_unlock(cpt);
461 md->md_flags |= LNET_MD_FLAG_ABORTED;
462 /* If the MD is busy, lnet_md_unlink just marks it for deletion, and
463 * when the LND is done, the completion event flags that the MD was
464 * unlinked. Otherwise, we enqueue an event now... */
465 if (md->md_eq != NULL && md->md_refcount == 0) {
466 lnet_build_unlink_event(md, &ev);
467 lnet_eq_enqueue_event(md->md_eq, &ev);
472 lnet_res_unlock(cpt);
475 EXPORT_SYMBOL(LNetMDUnlink);