1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
5 * Memory Descriptor management routines
7 * Copyright (c) 2001-2003 Cluster File Systems, Inc.
9 * This file is part of Lustre, http://www.lustre.org
11 * Lustre is free software; you can redistribute it and/or
12 * modify it under the terms of version 2 of the GNU General Public
13 * License as published by the Free Software Foundation.
15 * Lustre is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with Lustre; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
28 # define DEBUG_SUBSYSTEM S_PORTALS
29 # include <linux/kp30.h>
32 #include <portals/lib-p30.h>
34 /* must be called with state lock held */
36 lib_md_unlink(lib_nal_t *nal, lib_md_t *md)
38 if ((md->md_flags & PTL_MD_FLAG_ZOMBIE) == 0) {
39 /* first unlink attempt... */
40 lib_me_t *me = md->me;
42 md->md_flags |= PTL_MD_FLAG_ZOMBIE;
44 /* Disassociate from ME (if any), and unlink it if it was created
48 if (me->unlink == PTL_UNLINK)
49 lib_me_unlink(nal, me);
52 /* emsure all future handle lookups fail */
53 lib_invalidate_handle(nal, &md->md_lh);
56 if (md->pending != 0) {
57 CDEBUG(D_NET, "Queueing unlink of md %p\n", md);
61 CDEBUG(D_NET, "Unlinking md %p\n", md);
63 if ((md->options & PTL_MD_KIOV) != 0) {
64 if (nal->libnal_unmap_pages != NULL)
65 nal->libnal_unmap_pages (nal,
69 } else if (nal->libnal_unmap != NULL) {
70 nal->libnal_unmap (nal,
71 md->md_niov, md->md_iov.iov,
76 md->eq->eq_refcount--;
77 LASSERT (md->eq->eq_refcount >= 0);
80 list_del (&md->md_list);
84 /* must be called with state lock held */
86 lib_md_build(lib_nal_t *nal, lib_md_t *lmd, ptl_md_t *umd, int unlink)
94 /* NB we are passed an allocated, but uninitialised/active md.
95 * if we return success, caller may lib_md_unlink() it.
96 * otherwise caller may only lib_md_free() it.
99 if (!PtlHandleIsEqual (umd->eq_handle, PTL_EQ_NONE)) {
100 eq = ptl_handle2eq(&umd->eq_handle, nal);
102 return PTL_EQ_INVALID;
105 /* This implementation doesn't know how to create START events or
106 * disable END events. Best to LASSERT our caller is compliant so
107 * we find out quickly... */
108 LASSERT (eq == NULL ||
109 ((umd->options & PTL_MD_EVENT_START_DISABLE) != 0 &&
110 (umd->options & PTL_MD_EVENT_END_DISABLE) == 0));
113 lmd->start = umd->start;
115 lmd->max_size = umd->max_size;
116 lmd->options = umd->options;
117 lmd->user_ptr = umd->user_ptr;
119 lmd->threshold = umd->threshold;
121 lmd->md_flags = (unlink == PTL_UNLINK) ? PTL_MD_FLAG_AUTO_UNLINK : 0;
123 if ((umd->options & PTL_MD_IOVEC) != 0) {
125 if ((umd->options & PTL_MD_KIOV) != 0) /* Can't specify both */
126 return PTL_MD_ILLEGAL;
128 lmd->md_niov = niov = umd->length;
129 memcpy(lmd->md_iov.iov, umd->start,
130 niov * sizeof (lmd->md_iov.iov[0]));
132 for (i = 0; i < niov; i++) {
133 /* We take the base address on trust */
134 if (lmd->md_iov.iov[i].iov_len <= 0) /* invalid length */
135 return PTL_MD_ILLEGAL;
137 total_length += lmd->md_iov.iov[i].iov_len;
140 lmd->length = total_length;
142 if ((umd->options & PTL_MD_MAX_SIZE) != 0 && /* max size used */
143 (umd->max_size < 0 ||
144 umd->max_size > total_length)) // illegal max_size
145 return PTL_MD_ILLEGAL;
147 if (nal->libnal_map != NULL) {
148 rc = nal->libnal_map (nal, niov, lmd->md_iov.iov,
153 } else if ((umd->options & PTL_MD_KIOV) != 0) {
155 return PTL_MD_ILLEGAL;
157 /* Trap attempt to use paged I/O if unsupported early. */
158 if (nal->libnal_send_pages == NULL ||
159 nal->libnal_recv_pages == NULL)
160 return PTL_MD_INVALID;
162 lmd->md_niov = niov = umd->length;
163 memcpy(lmd->md_iov.kiov, umd->start,
164 niov * sizeof (lmd->md_iov.kiov[0]));
166 for (i = 0; i < niov; i++) {
167 /* We take the page pointer on trust */
168 if (lmd->md_iov.kiov[i].kiov_offset +
169 lmd->md_iov.kiov[i].kiov_len > PAGE_SIZE )
170 return PTL_VAL_FAILED; /* invalid length */
172 total_length += lmd->md_iov.kiov[i].kiov_len;
175 lmd->length = total_length;
177 if ((umd->options & PTL_MD_MAX_SIZE) != 0 && /* max size used */
178 (umd->max_size < 0 ||
179 umd->max_size > total_length)) // illegal max_size
180 return PTL_MD_ILLEGAL;
182 if (nal->libnal_map_pages != NULL) {
183 rc = nal->libnal_map_pages (nal, niov, lmd->md_iov.kiov,
189 } else { /* contiguous */
190 lmd->length = umd->length;
191 lmd->md_niov = niov = 1;
192 lmd->md_iov.iov[0].iov_base = umd->start;
193 lmd->md_iov.iov[0].iov_len = umd->length;
195 if ((umd->options & PTL_MD_MAX_SIZE) != 0 && /* max size used */
196 (umd->max_size < 0 ||
197 umd->max_size > umd->length)) // illegal max_size
198 return PTL_MD_ILLEGAL;
200 if (nal->libnal_map != NULL) {
201 rc = nal->libnal_map (nal, niov, lmd->md_iov.iov,
211 /* It's good; let handle2md succeed and add to active mds */
212 lib_initialise_handle (nal, &lmd->md_lh, PTL_COOKIE_TYPE_MD);
213 list_add (&lmd->md_list, &nal->libnal_ni.ni_active_mds);
218 /* must be called with state lock held */
220 lib_md_deconstruct(lib_nal_t *nal, lib_md_t *lmd, ptl_md_t *umd)
222 /* NB this doesn't copy out all the iov entries so when a
223 * discontiguous MD is copied out, the target gets to know the
224 * original iov pointer (in start) and the number of entries it had
227 umd->start = lmd->start;
228 umd->length = ((lmd->options & (PTL_MD_IOVEC | PTL_MD_KIOV)) == 0) ?
229 lmd->length : lmd->md_niov;
230 umd->threshold = lmd->threshold;
231 umd->max_size = lmd->max_size;
232 umd->options = lmd->options;
233 umd->user_ptr = lmd->user_ptr;
234 ptl_eq2handle(&umd->eq_handle, nal, lmd->eq);
238 lib_api_md_attach(nal_t *apinal, ptl_handle_me_t *meh,
239 ptl_md_t *umd, ptl_unlink_t unlink,
240 ptl_handle_md_t *handle)
242 lib_nal_t *nal = apinal->nal_data;
248 if ((umd->options & (PTL_MD_KIOV | PTL_MD_IOVEC)) != 0 &&
249 umd->length > PTL_MD_MAX_IOV) /* too many fragments */
250 return PTL_IOV_INVALID;
252 md = lib_md_alloc(nal, umd);
256 LIB_LOCK(nal, flags);
258 me = ptl_handle2me(meh, nal);
261 } else if (me->md != NULL) {
264 rc = lib_md_build(nal, md, umd, unlink);
269 ptl_md2handle(handle, nal, md);
271 LIB_UNLOCK(nal, flags);
276 lib_md_free (nal, md);
278 LIB_UNLOCK(nal, flags);
283 lib_api_md_bind(nal_t *apinal,
284 ptl_md_t *umd, ptl_unlink_t unlink,
285 ptl_handle_md_t *handle)
287 lib_nal_t *nal = apinal->nal_data;
292 if ((umd->options & (PTL_MD_KIOV | PTL_MD_IOVEC)) != 0 &&
293 umd->length > PTL_MD_MAX_IOV) /* too many fragments */
294 return PTL_IOV_INVALID;
296 md = lib_md_alloc(nal, umd);
300 LIB_LOCK(nal, flags);
302 rc = lib_md_build(nal, md, umd, unlink);
305 ptl_md2handle(handle, nal, md);
307 LIB_UNLOCK(nal, flags);
311 lib_md_free (nal, md);
313 LIB_UNLOCK(nal, flags);
318 lib_api_md_unlink (nal_t *apinal, ptl_handle_md_t *mdh)
320 lib_nal_t *nal = apinal->nal_data;
325 LIB_LOCK(nal, flags);
327 md = ptl_handle2md(mdh, nal);
329 LIB_UNLOCK(nal, flags);
330 return PTL_MD_INVALID;
333 /* If the MD is busy, lib_md_unlink just marks it for deletion, and
334 * when the NAL is done, the completion event flags that the MD was
335 * unlinked. Otherwise, we enqueue an event now... */
337 if (md->eq != NULL &&
339 memset(&ev, 0, sizeof(ev));
341 ev.type = PTL_EVENT_UNLINK;
342 ev.ni_fail_type = PTL_OK;
344 lib_md_deconstruct(nal, md, &ev.md);
345 ptl_md2handle(&ev.md_handle, nal, md);
347 lib_enq_event_locked(nal, NULL, md->eq, &ev);
350 lib_md_unlink(nal, md);
352 LIB_UNLOCK(nal, flags);
357 lib_api_md_update (nal_t *apinal,
358 ptl_handle_md_t *mdh,
359 ptl_md_t *oldumd, ptl_md_t *newumd,
360 ptl_handle_eq_t *testqh)
362 lib_nal_t *nal = apinal->nal_data;
364 lib_eq_t *test_eq = NULL;
368 LIB_LOCK(nal, flags);
370 md = ptl_handle2md(mdh, nal);
377 lib_md_deconstruct(nal, md, oldumd);
379 if (newumd == NULL) {
384 /* XXX fttb, the new MD must be the same "shape" wrt fragmentation,
385 * since we simply overwrite the old lib-md */
386 if ((((newumd->options ^ md->options) &
387 (PTL_MD_IOVEC | PTL_MD_KIOV)) != 0) ||
388 ((newumd->options & (PTL_MD_IOVEC | PTL_MD_KIOV)) != 0 &&
389 newumd->length != md->md_niov)) {
390 rc = PTL_IOV_INVALID;
394 if (!PtlHandleIsEqual (*testqh, PTL_EQ_NONE)) {
395 test_eq = ptl_handle2eq(testqh, nal);
396 if (test_eq == NULL) {
402 if (md->pending != 0) {
403 rc = PTL_MD_NO_UPDATE;
407 if (test_eq == NULL ||
408 test_eq->eq_deq_seq == test_eq->eq_enq_seq) {
409 lib_me_t *me = md->me;
410 int unlink = (md->md_flags & PTL_MD_FLAG_AUTO_UNLINK) ?
411 PTL_UNLINK : PTL_RETAIN;
413 // #warning this does not track eq refcounts properly
414 rc = lib_md_build(nal, md, newumd, unlink);
418 rc = PTL_MD_NO_UPDATE;
422 LIB_UNLOCK(nal, flags);