4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
34 * Memory Descriptor management routines
37 #define DEBUG_SUBSYSTEM S_LNET
39 #include <lnet/lib-lnet.h>
41 /* must be called with lnet_res_lock held */
43 lnet_md_unlink(struct lnet_libmd *md)
45 if ((md->md_flags & LNET_MD_FLAG_ZOMBIE) == 0) {
46 /* first unlink attempt... */
47 struct lnet_me *me = md->md_me;
49 md->md_flags |= LNET_MD_FLAG_ZOMBIE;
51 /* Disassociate from ME (if any), and unlink it if it was created
54 /* detach MD from portal */
55 lnet_ptl_detach_md(me, md);
56 if (me->me_unlink == LNET_UNLINK)
60 /* ensure all future handle lookups fail */
61 lnet_res_lh_invalidate(&md->md_lh);
64 if (md->md_refcount != 0) {
65 CDEBUG(D_NET, "Queueing unlink of md %p\n", md);
69 CDEBUG(D_NET, "Unlinking md %p\n", md);
71 if (md->md_eq != NULL) {
72 int cpt = lnet_cpt_of_cookie(md->md_lh.lh_cookie);
74 LASSERT(*md->md_eq->eq_refs[cpt] > 0);
75 (*md->md_eq->eq_refs[cpt])--;
78 LASSERT(!list_empty(&md->md_list));
79 list_del_init(&md->md_list);
84 lnet_kvaddr_to_page(unsigned long vaddr)
86 if (is_vmalloc_addr((void *)vaddr))
87 return vmalloc_to_page((void *)vaddr);
91 #ifdef HAVE_KMAP_TO_PAGE
93 * This ifdef is added to handle the kernel versions
94 * which have kmap_to_page() function exported. If so,
95 * we should use it. Otherwise, remain with the legacy check.
97 return kmap_to_page((void *)vaddr);
100 if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
101 /* No highmem pages only used for bulk (kiov) I/O */
102 CERROR("find page for address in highmem\n");
105 return virt_to_page(vaddr);
106 #endif /* HAVE_KMAP_TO_PAGE */
109 return virt_to_page(vaddr);
110 #endif /* CONFIG_HIGHMEM */
112 EXPORT_SYMBOL(lnet_kvaddr_to_page);
115 lnet_cpt_of_md(struct lnet_libmd *md, unsigned int offset)
117 int cpt = CFS_CPT_ANY;
121 * if the md_options has a bulk handle then we want to look at the
122 * bulk md because that's the data which we will be DMAing
124 if (md && (md->md_options & LNET_MD_BULK_HANDLE) != 0 &&
125 !LNetMDHandleIsInvalid(md->md_bulk_handle))
126 md = lnet_handle2md(&md->md_bulk_handle);
128 if (!md || md->md_niov == 0)
134 * There are three cases to handle:
135 * 1. The MD is using lnet_kiov_t
136 * 2. The MD is using struct kvec
137 * 3. Contiguous buffer allocated via vmalloc
139 * in case 2 we can use virt_to_page() macro to get the page
140 * address of the memory kvec describes.
142 * in case 3 use is_vmalloc_addr() and vmalloc_to_page()
144 * The offset provided can be within the first iov/kiov entry or
145 * it could go beyond it. In that case we need to make sure to
146 * look at the page which actually contains the data that will be
149 if ((md->md_options & LNET_MD_KIOV) != 0) {
150 lnet_kiov_t *kiov = md->md_iov.kiov;
152 while (offset >= kiov->kiov_len) {
153 offset -= kiov->kiov_len;
157 CERROR("offset %d goes beyond kiov\n", offset);
162 cpt = cfs_cpt_of_node(lnet_cpt_table(),
163 page_to_nid(kiov->kiov_page));
165 struct kvec *iov = md->md_iov.iov;
169 while (offset >= iov->iov_len) {
170 offset -= iov->iov_len;
174 CERROR("offset %d goes beyond iov\n", offset);
179 vaddr = ((unsigned long)iov->iov_base) + offset;
180 page = lnet_kvaddr_to_page(vaddr);
182 CERROR("Couldn't resolve vaddr 0x%lx to page\n", vaddr);
185 cpt = cfs_cpt_of_node(lnet_cpt_table(), page_to_nid(page));
193 lnet_md_build(struct lnet_libmd *lmd, struct lnet_md *umd, int unlink)
197 int total_length = 0;
200 lmd->md_start = umd->start;
202 lmd->md_max_size = umd->max_size;
203 lmd->md_options = umd->options;
204 lmd->md_user_ptr = umd->user_ptr;
206 lmd->md_threshold = umd->threshold;
207 lmd->md_refcount = 0;
208 lmd->md_flags = (unlink == LNET_UNLINK) ? LNET_MD_FLAG_AUTO_UNLINK : 0;
209 lmd->md_bulk_handle = umd->bulk_handle;
211 if ((umd->options & LNET_MD_IOVEC) != 0) {
213 if ((umd->options & LNET_MD_KIOV) != 0) /* Can't specify both */
216 lmd->md_niov = niov = umd->length;
217 memcpy(lmd->md_iov.iov, umd->start,
218 niov * sizeof(lmd->md_iov.iov[0]));
220 for (i = 0; i < (int)niov; i++) {
221 /* We take the base address on trust */
222 if (lmd->md_iov.iov[i].iov_len <= 0) /* invalid length */
225 total_length += lmd->md_iov.iov[i].iov_len;
228 lmd->md_length = total_length;
230 if ((umd->options & LNET_MD_MAX_SIZE) != 0 && /* max size used */
231 (umd->max_size < 0 ||
232 umd->max_size > total_length)) // illegal max_size
235 } else if ((umd->options & LNET_MD_KIOV) != 0) {
236 lmd->md_niov = niov = umd->length;
237 memcpy(lmd->md_iov.kiov, umd->start,
238 niov * sizeof(lmd->md_iov.kiov[0]));
240 for (i = 0; i < (int)niov; i++) {
241 /* We take the page pointer on trust */
242 if (lmd->md_iov.kiov[i].kiov_offset +
243 lmd->md_iov.kiov[i].kiov_len > PAGE_SIZE)
244 return -EINVAL; /* invalid length */
246 total_length += lmd->md_iov.kiov[i].kiov_len;
249 lmd->md_length = total_length;
251 if ((umd->options & LNET_MD_MAX_SIZE) != 0 && /* max size used */
252 (umd->max_size < 0 ||
253 umd->max_size > total_length)) // illegal max_size
255 } else { /* contiguous */
256 lmd->md_length = umd->length;
257 lmd->md_niov = niov = 1;
258 lmd->md_iov.iov[0].iov_base = umd->start;
259 lmd->md_iov.iov[0].iov_len = umd->length;
261 if ((umd->options & LNET_MD_MAX_SIZE) != 0 && /* max size used */
262 (umd->max_size < 0 ||
263 umd->max_size > (int)umd->length)) // illegal max_size
270 /* must be called with resource lock held */
272 lnet_md_link(struct lnet_libmd *md, struct lnet_handle_eq eq_handle, int cpt)
274 struct lnet_res_container *container = the_lnet.ln_md_containers[cpt];
276 /* NB we are passed an allocated, but inactive md.
277 * if we return success, caller may lnet_md_unlink() it.
278 * otherwise caller may only lnet_md_free() it.
280 /* This implementation doesn't know how to create START events or
281 * disable END events. Best to LASSERT our caller is compliant so
282 * we find out quickly... */
283 /* TODO - reevaluate what should be here in light of
284 * the removal of the start and end events
285 * maybe there we shouldn't even allow LNET_EQ_NONE!)
286 * LASSERT (eq == NULL);
288 if (!LNetEQHandleIsInvalid(eq_handle)) {
289 md->md_eq = lnet_handle2eq(&eq_handle);
291 if (md->md_eq == NULL)
294 (*md->md_eq->eq_refs[cpt])++;
297 lnet_res_lh_initialize(container, &md->md_lh);
299 LASSERT(list_empty(&md->md_list));
300 list_add(&md->md_list, &container->rec_active);
305 /* must be called with lnet_res_lock held */
307 lnet_md_deconstruct(struct lnet_libmd *lmd, struct lnet_md *umd)
309 /* NB this doesn't copy out all the iov entries so when a
310 * discontiguous MD is copied out, the target gets to know the
311 * original iov pointer (in start) and the number of entries it had
314 umd->start = lmd->md_start;
315 umd->length = ((lmd->md_options & (LNET_MD_IOVEC | LNET_MD_KIOV)) == 0) ?
316 lmd->md_length : lmd->md_niov;
317 umd->threshold = lmd->md_threshold;
318 umd->max_size = lmd->md_max_size;
319 umd->options = lmd->md_options;
320 umd->user_ptr = lmd->md_user_ptr;
321 lnet_eq2handle(&umd->eq_handle, lmd->md_eq);
325 lnet_md_validate(struct lnet_md *umd)
327 if (umd->start == NULL && umd->length != 0) {
328 CERROR("MD start pointer can not be NULL with length %u\n",
333 if ((umd->options & (LNET_MD_KIOV | LNET_MD_IOVEC)) != 0 &&
334 umd->length > LNET_MAX_IOV) {
335 CERROR("Invalid option: too many fragments %u, %d max\n",
336 umd->length, LNET_MAX_IOV);
344 * Create a memory descriptor and attach it to a ME
346 * \param meh A handle for a ME to associate the new MD with.
347 * \param umd Provides initial values for the user-visible parts of a MD.
348 * Other than its use for initialization, there is no linkage between this
349 * structure and the MD maintained by the LNet.
350 * \param unlink A flag to indicate whether the MD is automatically unlinked
351 * when it becomes inactive, either because the operation threshold drops to
352 * zero or because the available memory becomes less than \a umd.max_size.
353 * (Note that the check for unlinking a MD only occurs after the completion
354 * of a successful operation on the MD.) The value LNET_UNLINK enables auto
355 * unlinking; the value LNET_RETAIN disables it.
356 * \param handle On successful returns, a handle to the newly created MD is
357 * saved here. This handle can be used later in LNetMDUnlink().
359 * \retval 0 On success.
360 * \retval -EINVAL If \a umd is not valid.
361 * \retval -ENOMEM If new MD cannot be allocated.
362 * \retval -ENOENT Either \a meh or \a umd.eq_handle does not point to a
363 * valid object. Note that it's OK to supply a NULL \a umd.eq_handle by
364 * calling LNetInvalidateHandle() on it.
365 * \retval -EBUSY If the ME pointed to by \a meh is already associated with
369 LNetMDAttach(struct lnet_handle_me meh, struct lnet_md umd,
370 enum lnet_unlink unlink, struct lnet_handle_md *handle)
372 struct list_head matches = LIST_HEAD_INIT(matches);
373 struct list_head drops = LIST_HEAD_INIT(drops);
375 struct lnet_libmd *md;
379 LASSERT(the_lnet.ln_refcount > 0);
381 if (lnet_md_validate(&umd) != 0)
384 if ((umd.options & (LNET_MD_OP_GET | LNET_MD_OP_PUT)) == 0) {
385 CERROR("Invalid option: no MD_OP set\n");
389 md = lnet_md_alloc(&umd);
393 rc = lnet_md_build(md, &umd, unlink);
397 cpt = lnet_cpt_of_cookie(meh.cookie);
401 me = lnet_handle2me(&meh);
404 else if (me->me_md != NULL)
407 rc = lnet_md_link(md, umd.eq_handle, cpt);
412 /* attach this MD to portal of ME and check if it matches any
413 * blocked msgs on this portal */
414 lnet_ptl_attach_md(me, md, &matches, &drops);
416 lnet_md2handle(handle, md);
418 lnet_res_unlock(cpt);
420 lnet_drop_delayed_msg_list(&drops, "Bad match");
421 lnet_recv_delayed_msg_list(&matches);
426 lnet_res_unlock(cpt);
431 EXPORT_SYMBOL(LNetMDAttach);
434 * Create a "free floating" memory descriptor - a MD that is not associated
435 * with a ME. Such MDs are usually used in LNetPut() and LNetGet() operations.
437 * \param umd,unlink See the discussion for LNetMDAttach().
438 * \param handle On successful returns, a handle to the newly created MD is
439 * saved here. This handle can be used later in LNetMDUnlink(), LNetPut(),
440 * and LNetGet() operations.
442 * \retval 0 On success.
443 * \retval -EINVAL If \a umd is not valid.
444 * \retval -ENOMEM If new MD cannot be allocated.
445 * \retval -ENOENT \a umd.eq_handle does not point to a valid EQ. Note that
446 * it's OK to supply a NULL \a umd.eq_handle by calling
447 * LNetInvalidateHandle() on it.
450 LNetMDBind(struct lnet_md umd, enum lnet_unlink unlink,
451 struct lnet_handle_md *handle)
453 struct lnet_libmd *md;
457 LASSERT(the_lnet.ln_refcount > 0);
459 if (lnet_md_validate(&umd) != 0)
462 if ((umd.options & (LNET_MD_OP_GET | LNET_MD_OP_PUT)) != 0) {
463 CERROR("Invalid option: GET|PUT illegal on active MDs\n");
467 md = lnet_md_alloc(&umd);
471 rc = lnet_md_build(md, &umd, unlink);
475 cpt = lnet_res_lock_current();
477 rc = lnet_md_link(md, umd.eq_handle, cpt);
481 lnet_md2handle(handle, md);
483 lnet_res_unlock(cpt);
487 lnet_res_unlock(cpt);
493 EXPORT_SYMBOL(LNetMDBind);
496 * Unlink the memory descriptor from any ME it may be linked to and release
497 * the internal resources associated with it. As a result, active messages
498 * associated with the MD may get aborted.
500 * This function does not free the memory region associated with the MD;
501 * i.e., the memory the user allocated for this MD. If the ME associated with
502 * this MD is not NULL and was created with auto unlink enabled, the ME is
503 * unlinked as well (see LNetMEAttach()).
505 * Explicitly unlinking a MD via this function call has the same behavior as
506 * a MD that has been automatically unlinked, except that no LNET_EVENT_UNLINK
507 * is generated in the latter case.
509 * An unlinked event can be reported in two ways:
510 * - If there's no pending operations on the MD, it's unlinked immediately
511 * and an LNET_EVENT_UNLINK event is logged before this function returns.
512 * - Otherwise, the MD is only marked for deletion when this function
513 * returns, and the unlinked event will be piggybacked on the event of
514 * the completion of the last operation by setting the unlinked field of
515 * the event. No dedicated LNET_EVENT_UNLINK event is generated.
517 * Note that in both cases the unlinked field of the event is always set; no
518 * more event will happen on the MD after such an event is logged.
520 * \param mdh A handle for the MD to be unlinked.
522 * \retval 0 On success.
523 * \retval -ENOENT If \a mdh does not point to a valid MD object.
526 LNetMDUnlink(struct lnet_handle_md mdh)
528 struct lnet_event ev;
529 struct lnet_libmd *md;
532 LASSERT(the_lnet.ln_refcount > 0);
534 cpt = lnet_cpt_of_cookie(mdh.cookie);
537 md = lnet_handle2md(&mdh);
539 lnet_res_unlock(cpt);
543 md->md_flags |= LNET_MD_FLAG_ABORTED;
544 /* If the MD is busy, lnet_md_unlink just marks it for deletion, and
545 * when the LND is done, the completion event flags that the MD was
546 * unlinked. Otherwise, we enqueue an event now... */
547 if (md->md_eq != NULL && md->md_refcount == 0) {
548 lnet_build_unlink_event(md, &ev);
549 lnet_eq_enqueue_event(md->md_eq, &ev);
554 lnet_res_unlock(cpt);
557 EXPORT_SYMBOL(LNetMDUnlink);