1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see [sun.com URL with a
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
38 * Memory Descriptor management routines
41 #define DEBUG_SUBSYSTEM S_LNET
43 #include <lnet/lib-lnet.h>
45 /* must be called with LNET_LOCK held */
47 lnet_md_unlink(lnet_libmd_t *md)
49 if ((md->md_flags & LNET_MD_FLAG_ZOMBIE) == 0) {
50 /* first unlink attempt... */
51 lnet_me_t *me = md->md_me;
53 md->md_flags |= LNET_MD_FLAG_ZOMBIE;
55 /* Disassociate from ME (if any), and unlink it if it was created
60 if (me->me_unlink == LNET_UNLINK)
64 /* ensure all future handle lookups fail */
65 lnet_invalidate_handle(&md->md_lh);
68 if (md->md_refcount != 0) {
69 CDEBUG(D_NET, "Queueing unlink of md %p\n", md);
73 CDEBUG(D_NET, "Unlinking md %p\n", md);
75 if (md->md_eq != NULL) {
76 md->md_eq->eq_refcount--;
77 LASSERT (md->md_eq->eq_refcount >= 0);
80 list_del (&md->md_list);
84 /* must be called with LNET_LOCK held */
86 lib_md_build(lnet_libmd_t *lmd, lnet_md_t *umd, int unlink)
93 /* NB we are passed an allocated, but uninitialised/active md.
94 * if we return success, caller may lnet_md_unlink() it.
95 * otherwise caller may only lnet_md_free() it.
98 if (!LNetHandleIsEqual (umd->eq_handle, LNET_EQ_NONE)) {
99 eq = lnet_handle2eq(&umd->eq_handle);
104 /* This implementation doesn't know how to create START events or
105 * disable END events. Best to LASSERT our caller is compliant so
106 * we find out quickly... */
107 /* TODO - reevaluate what should be here in light of
108 * the removal of the start and end events
109 * maybe there we shouldn't even allow LNET_EQ_NONE!)
110 LASSERT (eq == NULL);
114 lmd->md_start = umd->start;
116 lmd->md_max_size = umd->max_size;
117 lmd->md_options = umd->options;
118 lmd->md_user_ptr = umd->user_ptr;
120 lmd->md_threshold = umd->threshold;
121 lmd->md_refcount = 0;
122 lmd->md_flags = (unlink == LNET_UNLINK) ? LNET_MD_FLAG_AUTO_UNLINK : 0;
124 if ((umd->options & LNET_MD_IOVEC) != 0) {
126 if ((umd->options & LNET_MD_KIOV) != 0) /* Can't specify both */
129 lmd->md_niov = niov = umd->length;
130 memcpy(lmd->md_iov.iov, umd->start,
131 niov * sizeof (lmd->md_iov.iov[0]));
133 for (i = 0; i < niov; i++) {
134 /* We take the base address on trust */
135 if (lmd->md_iov.iov[i].iov_len <= 0) /* invalid length */
138 total_length += lmd->md_iov.iov[i].iov_len;
141 lmd->md_length = total_length;
143 if ((umd->options & LNET_MD_MAX_SIZE) != 0 && /* max size used */
144 (umd->max_size < 0 ||
145 umd->max_size > total_length)) // illegal max_size
148 } else if ((umd->options & LNET_MD_KIOV) != 0) {
152 lmd->md_niov = niov = umd->length;
153 memcpy(lmd->md_iov.kiov, umd->start,
154 niov * sizeof (lmd->md_iov.kiov[0]));
156 for (i = 0; i < niov; i++) {
157 /* We take the page pointer on trust */
158 if (lmd->md_iov.kiov[i].kiov_offset +
159 lmd->md_iov.kiov[i].kiov_len > CFS_PAGE_SIZE )
160 return -EINVAL; /* invalid length */
162 total_length += lmd->md_iov.kiov[i].kiov_len;
165 lmd->md_length = total_length;
167 if ((umd->options & LNET_MD_MAX_SIZE) != 0 && /* max size used */
168 (umd->max_size < 0 ||
169 umd->max_size > total_length)) // illegal max_size
172 } else { /* contiguous */
173 lmd->md_length = umd->length;
174 lmd->md_niov = niov = 1;
175 lmd->md_iov.iov[0].iov_base = umd->start;
176 lmd->md_iov.iov[0].iov_len = umd->length;
178 if ((umd->options & LNET_MD_MAX_SIZE) != 0 && /* max size used */
179 (umd->max_size < 0 ||
180 umd->max_size > umd->length)) // illegal max_size
187 /* It's good; let handle2md succeed and add to active mds */
188 lnet_initialise_handle (&lmd->md_lh, LNET_COOKIE_TYPE_MD);
189 list_add (&lmd->md_list, &the_lnet.ln_active_mds);
194 /* must be called with LNET_LOCK held */
196 lnet_md_deconstruct(lnet_libmd_t *lmd, lnet_md_t *umd)
198 /* NB this doesn't copy out all the iov entries so when a
199 * discontiguous MD is copied out, the target gets to know the
200 * original iov pointer (in start) and the number of entries it had
203 umd->start = lmd->md_start;
204 umd->length = ((lmd->md_options & (LNET_MD_IOVEC | LNET_MD_KIOV)) == 0) ?
205 lmd->md_length : lmd->md_niov;
206 umd->threshold = lmd->md_threshold;
207 umd->max_size = lmd->md_max_size;
208 umd->options = lmd->md_options;
209 umd->user_ptr = lmd->md_user_ptr;
210 lnet_eq2handle(&umd->eq_handle, lmd->md_eq);
214 LNetMDAttach(lnet_handle_me_t meh, lnet_md_t umd,
215 lnet_unlink_t unlink, lnet_handle_md_t *handle)
221 LASSERT (the_lnet.ln_init);
222 LASSERT (the_lnet.ln_refcount > 0);
224 if ((umd.options & (LNET_MD_KIOV | LNET_MD_IOVEC)) != 0 &&
225 umd.length > LNET_MAX_IOV) /* too many fragments */
228 md = lnet_md_alloc(&umd);
234 me = lnet_handle2me(&meh);
237 } else if (me->me_md != NULL) {
240 rc = lib_md_build(md, &umd, unlink);
245 lnet_md2handle(handle, md);
247 /* check if this MD matches any blocked msgs */
248 lnet_match_blocked_msg(md); /* expects LNET_LOCK held */
262 LNetMDBind(lnet_md_t umd, lnet_unlink_t unlink, lnet_handle_md_t *handle)
267 LASSERT (the_lnet.ln_init);
268 LASSERT (the_lnet.ln_refcount > 0);
270 if ((umd.options & (LNET_MD_KIOV | LNET_MD_IOVEC)) != 0 &&
271 umd.length > LNET_MAX_IOV) /* too many fragments */
274 md = lnet_md_alloc(&umd);
280 rc = lib_md_build(md, &umd, unlink);
283 lnet_md2handle(handle, md);
296 LNetMDUnlink (lnet_handle_md_t mdh)
301 LASSERT (the_lnet.ln_init);
302 LASSERT (the_lnet.ln_refcount > 0);
306 md = lnet_handle2md(&mdh);
312 /* If the MD is busy, lnet_md_unlink just marks it for deletion, and
313 * when the NAL is done, the completion event flags that the MD was
314 * unlinked. Otherwise, we enqueue an event now... */
316 if (md->md_eq != NULL &&
317 md->md_refcount == 0) {
318 lnet_build_unlink_event(md, &ev);
319 lnet_enq_event_locked(md->md_eq, &ev);