1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
5 * Memory Descriptor management routines
7 * Copyright (c) 2001-2003 Cluster File Systems, Inc.
9 * This file is part of Lustre, http://www.lustre.org
11 * Lustre is free software; you can redistribute it and/or
12 * modify it under the terms of version 2 of the GNU General Public
13 * License as published by the Free Software Foundation.
15 * Lustre is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with Lustre; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 #define DEBUG_SUBSYSTEM S_LNET
27 #include <lnet/lib-lnet.h>
29 /* must be called with LNET_LOCK held */
31 lnet_md_unlink(lnet_libmd_t *md)
33 if ((md->md_flags & LNET_MD_FLAG_ZOMBIE) == 0) {
34 /* first unlink attempt... */
35 lnet_me_t *me = md->md_me;
37 md->md_flags |= LNET_MD_FLAG_ZOMBIE;
39 /* Disassociate from ME (if any), and unlink it if it was created
44 if (me->me_unlink == LNET_UNLINK)
48 /* ensure all future handle lookups fail */
49 lnet_invalidate_handle(&md->md_lh);
52 if (md->md_refcount != 0) {
53 CDEBUG(D_NET, "Queueing unlink of md %p\n", md);
57 CDEBUG(D_NET, "Unlinking md %p\n", md);
59 if (md->md_eq != NULL) {
60 md->md_eq->eq_refcount--;
61 LASSERT (md->md_eq->eq_refcount >= 0);
64 list_del (&md->md_list);
68 /* must be called with LNET_LOCK held */
70 lib_md_build(lnet_libmd_t *lmd, lnet_md_t *umd, int unlink)
77 /* NB we are passed an allocated, but uninitialised/active md.
78 * if we return success, caller may lnet_md_unlink() it.
79 * otherwise caller may only lnet_md_free() it.
82 if (!LNetHandleIsEqual (umd->eq_handle, LNET_EQ_NONE)) {
83 eq = lnet_handle2eq(&umd->eq_handle);
88 /* This implementation doesn't know how to create START events or
89 * disable END events. Best to LASSERT our caller is compliant so
90 * we find out quickly... */
91 /* TODO - reevaluate what should be here in light of
92 * the removal of the start and end events
93 * maybe there we shouldn't even allow LNET_EQ_NONE!)
98 lmd->md_start = umd->start;
100 lmd->md_max_size = umd->max_size;
101 lmd->md_options = umd->options;
102 lmd->md_user_ptr = umd->user_ptr;
104 lmd->md_threshold = umd->threshold;
105 lmd->md_refcount = 0;
106 lmd->md_flags = (unlink == LNET_UNLINK) ? LNET_MD_FLAG_AUTO_UNLINK : 0;
108 if ((umd->options & LNET_MD_IOVEC) != 0) {
110 if ((umd->options & LNET_MD_KIOV) != 0) /* Can't specify both */
113 lmd->md_niov = niov = umd->length;
114 memcpy(lmd->md_iov.iov, umd->start,
115 niov * sizeof (lmd->md_iov.iov[0]));
117 for (i = 0; i < niov; i++) {
118 /* We take the base address on trust */
119 if (lmd->md_iov.iov[i].iov_len <= 0) /* invalid length */
122 total_length += lmd->md_iov.iov[i].iov_len;
125 lmd->md_length = total_length;
127 if ((umd->options & LNET_MD_MAX_SIZE) != 0 && /* max size used */
128 (umd->max_size < 0 ||
129 umd->max_size > total_length)) // illegal max_size
132 } else if ((umd->options & LNET_MD_KIOV) != 0) {
136 lmd->md_niov = niov = umd->length;
137 memcpy(lmd->md_iov.kiov, umd->start,
138 niov * sizeof (lmd->md_iov.kiov[0]));
140 for (i = 0; i < niov; i++) {
141 /* We take the page pointer on trust */
142 if (lmd->md_iov.kiov[i].kiov_offset +
143 lmd->md_iov.kiov[i].kiov_len > CFS_PAGE_SIZE )
144 return -EINVAL; /* invalid length */
146 total_length += lmd->md_iov.kiov[i].kiov_len;
149 lmd->md_length = total_length;
151 if ((umd->options & LNET_MD_MAX_SIZE) != 0 && /* max size used */
152 (umd->max_size < 0 ||
153 umd->max_size > total_length)) // illegal max_size
156 } else { /* contiguous */
157 lmd->md_length = umd->length;
158 lmd->md_niov = niov = 1;
159 lmd->md_iov.iov[0].iov_base = umd->start;
160 lmd->md_iov.iov[0].iov_len = umd->length;
162 if ((umd->options & LNET_MD_MAX_SIZE) != 0 && /* max size used */
163 (umd->max_size < 0 ||
164 umd->max_size > umd->length)) // illegal max_size
171 /* It's good; let handle2md succeed and add to active mds */
172 lnet_initialise_handle (&lmd->md_lh, LNET_COOKIE_TYPE_MD);
173 list_add (&lmd->md_list, &the_lnet.ln_active_mds);
178 /* must be called with LNET_LOCK held */
180 lnet_md_deconstruct(lnet_libmd_t *lmd, lnet_md_t *umd)
182 /* NB this doesn't copy out all the iov entries so when a
183 * discontiguous MD is copied out, the target gets to know the
184 * original iov pointer (in start) and the number of entries it had
187 umd->start = lmd->md_start;
188 umd->length = ((lmd->md_options & (LNET_MD_IOVEC | LNET_MD_KIOV)) == 0) ?
189 lmd->md_length : lmd->md_niov;
190 umd->threshold = lmd->md_threshold;
191 umd->max_size = lmd->md_max_size;
192 umd->options = lmd->md_options;
193 umd->user_ptr = lmd->md_user_ptr;
194 lnet_eq2handle(&umd->eq_handle, lmd->md_eq);
198 LNetMDAttach(lnet_handle_me_t meh, lnet_md_t umd,
199 lnet_unlink_t unlink, lnet_handle_md_t *handle)
205 LASSERT (the_lnet.ln_init);
206 LASSERT (the_lnet.ln_refcount > 0);
208 if ((umd.options & (LNET_MD_KIOV | LNET_MD_IOVEC)) != 0 &&
209 umd.length > LNET_MAX_IOV) /* too many fragments */
212 md = lnet_md_alloc(&umd);
218 me = lnet_handle2me(&meh);
221 } else if (me->me_md != NULL) {
224 rc = lib_md_build(md, &umd, unlink);
229 lnet_md2handle(handle, md);
231 /* check if this MD matches any blocked msgs */
232 lnet_match_blocked_msg(md); /* expects LNET_LOCK held */
246 LNetMDBind(lnet_md_t umd, lnet_unlink_t unlink, lnet_handle_md_t *handle)
251 LASSERT (the_lnet.ln_init);
252 LASSERT (the_lnet.ln_refcount > 0);
254 if ((umd.options & (LNET_MD_KIOV | LNET_MD_IOVEC)) != 0 &&
255 umd.length > LNET_MAX_IOV) /* too many fragments */
258 md = lnet_md_alloc(&umd);
264 rc = lib_md_build(md, &umd, unlink);
267 lnet_md2handle(handle, md);
280 LNetMDUnlink (lnet_handle_md_t mdh)
285 LASSERT (the_lnet.ln_init);
286 LASSERT (the_lnet.ln_refcount > 0);
290 md = lnet_handle2md(&mdh);
296 /* If the MD is busy, lnet_md_unlink just marks it for deletion, and
297 * when the NAL is done, the completion event flags that the MD was
298 * unlinked. Otherwise, we enqueue an event now... */
300 if (md->md_eq != NULL &&
301 md->md_refcount == 0) {
302 memset(&ev, 0, sizeof(ev));
304 ev.type = LNET_EVENT_UNLINK;
307 lnet_md_deconstruct(md, &ev.md);
308 lnet_md2handle(&ev.md_handle, md);
310 lnet_enq_event_locked(md->md_eq, &ev);