1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
5 * Memory Descriptor management routines
7 * Copyright (c) 2001-2003 Cluster File Systems, Inc.
8 * Copyright (c) 2001-2002 Sandia National Laboratories
10 * This file is part of Lustre, http://www.sf.net/projects/lustre/
12 * Lustre is free software; you can redistribute it and/or
13 * modify it under the terms of version 2 of the GNU General Public
14 * License as published by the Free Software Foundation.
16 * Lustre is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with Lustre; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
29 # define DEBUG_SUBSYSTEM S_PORTALS
30 # include <linux/kp30.h>
33 #include <portals/lib-p30.h>
34 #include <portals/arg-blocks.h>
36 /* must be called with state lock held */
37 void lib_md_unlink(nal_cb_t * nal, lib_md_t * md)
39 if ((md->md_flags & PTL_MD_FLAG_ZOMBIE) == 0) {
40 /* first unlink attempt... */
41 lib_me_t *me = md->me;
43 md->md_flags |= PTL_MD_FLAG_ZOMBIE;
45 /* Disassociate from ME (if any), and unlink it if it was created
49 if (me->unlink == PTL_UNLINK)
50 lib_me_unlink(nal, me);
53 /* emsure all future handle lookups fail */
54 lib_invalidate_handle(nal, &md->md_lh);
57 if (md->pending != 0) {
58 CDEBUG(D_NET, "Queueing unlink of md %p\n", md);
62 CDEBUG(D_NET, "Unlinking md %p\n", md);
64 if ((md->options & PTL_MD_KIOV) != 0) {
65 if (nal->cb_unmap_pages != NULL)
66 nal->cb_unmap_pages (nal, md->md_niov, md->md_iov.kiov,
68 } else if (nal->cb_unmap != NULL) {
69 nal->cb_unmap (nal, md->md_niov, md->md_iov.iov,
74 md->eq->eq_refcount--;
75 LASSERT (md->eq->eq_refcount >= 0);
78 list_del (&md->md_list);
82 /* must be called with state lock held */
83 static int lib_md_build(nal_cb_t *nal, lib_md_t *new, void *private,
84 ptl_md_t *md, ptl_handle_eq_t *eqh, int unlink)
90 /* NB we are passed an allocated, but uninitialised/active md.
91 * if we return success, caller may lib_md_unlink() it.
92 * otherwise caller may only lib_md_free() it.
95 if (!PtlHandleIsEqual (*eqh, PTL_EQ_NONE)) {
96 eq = ptl_handle2eq(eqh, nal);
98 return PTL_EQ_INVALID;
101 /* Must check this _before_ allocation. Also, note that non-iov
102 * MDs must set md_niov to 0. */
103 LASSERT((md->options & (PTL_MD_IOVEC | PTL_MD_KIOV)) == 0 ||
104 md->niov <= PTL_MD_MAX_IOV);
106 /* This implementation doesn't know how to create START events or
107 * disable END events. Best to LASSERT our caller is compliant so
108 * we find out quickly... */
109 LASSERT (PtlHandleIsEqual (*eqh, PTL_EQ_NONE) ||
110 ((md->options & PTL_MD_EVENT_START_DISABLE) != 0 &&
111 (md->options & PTL_MD_EVENT_END_DISABLE) == 0));
113 if ((md->options & PTL_MD_MAX_SIZE) != 0 && /* max size used */
114 (md->max_size < 0 || md->max_size > md->length)) // illegal max_size
115 return PTL_MD_INVALID;
118 new->start = md->start;
119 new->length = md->length;
121 new->max_size = md->max_size;
122 new->options = md->options;
123 new->user_ptr = md->user_ptr;
125 new->threshold = md->threshold;
127 new->md_flags = (unlink == PTL_UNLINK) ? PTL_MD_FLAG_AUTO_UNLINK : 0;
129 if ((md->options & PTL_MD_IOVEC) != 0) {
130 int total_length = 0;
132 if ((md->options & PTL_MD_KIOV) != 0) /* Can't specify both */
133 return PTL_MD_INVALID;
135 new->md_niov = md->niov;
137 if (nal->cb_read (nal, private, new->md_iov.iov, md->start,
138 md->niov * sizeof (new->md_iov.iov[0])))
141 for (i = 0; i < new->md_niov; i++) {
142 /* We take the base address on trust */
143 if (new->md_iov.iov[i].iov_len <= 0) /* invalid length */
144 return PTL_VAL_FAILED;
146 total_length += new->md_iov.iov[i].iov_len;
149 if (md->length > total_length)
150 return PTL_IOV_TOO_SMALL;
152 if (nal->cb_map != NULL) {
153 rc = nal->cb_map (nal, new->md_niov, new->md_iov.iov,
158 } else if ((md->options & PTL_MD_KIOV) != 0) {
160 return PTL_MD_INVALID;
162 int total_length = 0;
164 /* Trap attempt to use paged I/O if unsupported early. */
165 if (nal->cb_send_pages == NULL ||
166 nal->cb_recv_pages == NULL)
167 return PTL_MD_INVALID;
169 new->md_niov = md->niov;
171 if (nal->cb_read (nal, private, new->md_iov.kiov, md->start,
172 md->niov * sizeof (new->md_iov.kiov[0])))
175 for (i = 0; i < new->md_niov; i++) {
176 /* We take the page pointer on trust */
177 if (new->md_iov.kiov[i].kiov_offset +
178 new->md_iov.kiov[i].kiov_len > PAGE_SIZE )
179 return PTL_VAL_FAILED; /* invalid length */
181 total_length += new->md_iov.kiov[i].kiov_len;
184 if (md->length > total_length)
185 return PTL_IOV_TOO_SMALL;
187 if (nal->cb_map_pages != NULL) {
188 rc = nal->cb_map_pages (nal, new->md_niov, new->md_iov.kiov,
194 } else { /* contiguous */
196 new->md_iov.iov[0].iov_base = md->start;
197 new->md_iov.iov[0].iov_len = md->length;
199 if (nal->cb_map != NULL) {
200 rc = nal->cb_map (nal, new->md_niov, new->md_iov.iov,
210 /* It's good; let handle2md succeed and add to active mds */
211 lib_initialise_handle (nal, &new->md_lh, PTL_COOKIE_TYPE_MD);
212 list_add (&new->md_list, &nal->ni.ni_active_mds);
217 /* must be called with state lock held */
218 void lib_md_deconstruct(nal_cb_t * nal, lib_md_t * md, ptl_md_t * new)
220 /* NB this doesn't copy out all the iov entries so when a
221 * discontiguous MD is copied out, the target gets to know the
222 * original iov pointer (in start) and the number of entries it had
225 new->start = md->start;
226 new->length = md->length;
227 new->threshold = md->threshold;
228 new->max_size = md->max_size;
229 new->options = md->options;
230 new->user_ptr = md->user_ptr;
231 ptl_eq2handle(&new->eventq, md->eq);
232 new->niov = ((md->options & (PTL_MD_IOVEC | PTL_MD_KIOV)) == 0) ? 0 : md->md_niov;
235 int do_PtlMDAttach(nal_cb_t * nal, void *private, void *v_args, void *v_ret)
239 * ptl_handle_me_t current_in
241 * ptl_unlink_t unlink_in
244 * ptl_handle_md_t * handle_out
247 PtlMDAttach_in *args = v_args;
248 PtlMDAttach_out *ret = v_ret;
253 if ((args->md_in.options & (PTL_MD_KIOV | PTL_MD_IOVEC)) != 0 &&
254 args->md_in.niov > PTL_MD_MAX_IOV) /* too many fragments */
255 return (ret->rc = PTL_IOV_TOO_MANY);
257 md = lib_md_alloc(nal, &args->md_in);
259 return (ret->rc = PTL_NO_SPACE);
261 state_lock(nal, &flags);
263 me = ptl_handle2me(&args->me_in, nal);
265 ret->rc = PTL_ME_INVALID;
266 } else if (me->md != NULL) {
267 ret->rc = PTL_ME_IN_USE;
269 ret->rc = lib_md_build(nal, md, private, &args->md_in,
270 &args->eq_in, args->unlink_in);
272 if (ret->rc == PTL_OK) {
276 ptl_md2handle(&ret->handle_out, md);
278 state_unlock (nal, &flags);
283 lib_md_free (nal, md);
285 state_unlock (nal, &flags);
289 int do_PtlMDBind(nal_cb_t * nal, void *private, void *v_args, void *v_ret)
293 * ptl_handle_ni_t ni_in
297 * ptl_handle_md_t * handle_out
300 PtlMDBind_in *args = v_args;
301 PtlMDBind_out *ret = v_ret;
305 if ((args->md_in.options & (PTL_MD_KIOV | PTL_MD_IOVEC)) != 0 &&
306 args->md_in.niov > PTL_MD_MAX_IOV) /* too many fragments */
307 return (ret->rc = PTL_IOV_TOO_MANY);
309 md = lib_md_alloc(nal, &args->md_in);
311 return (ret->rc = PTL_NO_SPACE);
313 state_lock(nal, &flags);
315 ret->rc = lib_md_build(nal, md, private, &args->md_in,
316 &args->eq_in, args->unlink_in);
318 if (ret->rc == PTL_OK) {
319 ptl_md2handle(&ret->handle_out, md);
321 state_unlock(nal, &flags);
325 lib_md_free (nal, md);
327 state_unlock(nal, &flags);
331 int do_PtlMDUnlink(nal_cb_t * nal, void *private, void *v_args, void *v_ret)
333 PtlMDUnlink_in *args = v_args;
334 PtlMDUnlink_out *ret = v_ret;
339 state_lock(nal, &flags);
341 md = ptl_handle2md(&args->md_in, nal);
343 state_unlock(nal, &flags);
344 return (ret->rc = PTL_MD_INVALID);
347 /* If the MD is busy, lib_md_unlink just marks it for deletion, and
348 * when the NAL is done, the completion event flags that the MD was
349 * unlinked. Otherwise, we enqueue an event now... */
351 if (md->eq != NULL &&
353 memset(&ev, 0, sizeof(ev));
355 ev.type = PTL_EVENT_UNLINK;
356 ev.ni_fail_type = PTL_OK;
358 lib_md_deconstruct(nal, md, &ev.mem_desc);
360 lib_enq_event_locked(nal, private, md->eq, &ev);
363 lib_md_deconstruct(nal, md, &ret->status_out);
364 lib_md_unlink(nal, md);
367 state_unlock(nal, &flags);
372 int do_PtlMDUpdate_internal(nal_cb_t * nal, void *private, void *v_args,
377 * ptl_handle_md_t md_in
378 * ptl_md_t * old_inout
379 * ptl_md_t * new_inout
380 * ptl_handle_eq_t testq_in
381 * ptl_seq_t sequence_in
384 * ptl_md_t * old_inout
385 * ptl_md_t * new_inout
387 PtlMDUpdate_internal_in *args = v_args;
388 PtlMDUpdate_internal_out *ret = v_ret;
390 lib_eq_t *test_eq = NULL;
391 ptl_md_t *new = &args->new_inout;
394 state_lock(nal, &flags);
396 md = ptl_handle2md(&args->md_in, nal);
398 ret->rc = PTL_MD_INVALID;
402 if (args->old_inout_valid)
403 lib_md_deconstruct(nal, md, &ret->old_inout);
405 if (!args->new_inout_valid) {
410 /* XXX fttb, the new MD must be the same type wrt fragmentation */
411 if (((new->options ^ md->options) &
412 (PTL_MD_IOVEC | PTL_MD_KIOV)) != 0) {
413 ret->rc = PTL_MD_INVALID;
417 if (new->niov > md->md_niov) {
418 ret->rc = PTL_IOV_TOO_MANY;
422 if (new->niov < md->md_niov) {
423 ret->rc = PTL_IOV_TOO_SMALL;
427 if (!PtlHandleIsEqual (args->testq_in, PTL_EQ_NONE)) {
428 test_eq = ptl_handle2eq(&args->testq_in, nal);
429 if (test_eq == NULL) {
430 ret->rc = PTL_EQ_INVALID;
435 if (md->pending != 0) {
436 ret->rc = PTL_MD_NO_UPDATE;
440 if (test_eq == NULL ||
441 test_eq->sequence == args->sequence_in) {
442 lib_me_t *me = md->me;
443 int unlink = (md->md_flags & PTL_MD_FLAG_AUTO_UNLINK) ?
444 PTL_UNLINK : PTL_RETAIN;
446 // #warning this does not track eq refcounts properly
447 ret->rc = lib_md_build(nal, md, private,
448 new, &new->eventq, unlink);
452 ret->rc = PTL_MD_NO_UPDATE;
456 state_unlock(nal, &flags);