1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
5 * Memory Descriptor management routines
7 * Copyright (c) 2001-2003 Cluster File Systems, Inc.
8 * Copyright (c) 2001-2002 Sandia National Laboratories
10 * This file is part of Lustre, http://www.sf.net/projects/lustre/
12 * Lustre is free software; you can redistribute it and/or
13 * modify it under the terms of version 2 of the GNU General Public
14 * License as published by the Free Software Foundation.
16 * Lustre is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with Lustre; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
29 # define DEBUG_SUBSYSTEM S_PORTALS
30 # include <linux/kp30.h>
33 #include <portals/lib-p30.h>
34 #include <portals/arg-blocks.h>
37 * must be called with state lock held
39 void lib_md_unlink(nal_cb_t * nal, lib_md_t * md)
41 lib_me_t *me = md->me;
43 if (md->pending != 0) {
44 CDEBUG(D_NET, "Queueing unlink of md %p\n", md);
45 md->md_flags |= PTL_MD_FLAG_UNLINK;
49 CDEBUG(D_NET, "Unlinking md %p\n", md);
51 if ((md->options & PTL_MD_KIOV) != 0) {
52 if (nal->cb_unmap_pages != NULL)
53 nal->cb_unmap_pages (nal, md->md_niov, md->md_iov.kiov,
55 } else if (nal->cb_unmap != NULL)
56 nal->cb_unmap (nal, md->md_niov, md->md_iov.iov,
61 if (me->unlink == PTL_UNLINK)
62 lib_me_unlink(nal, me);
67 md->eq->eq_refcount--;
68 LASSERT (md->eq->eq_refcount >= 0);
71 lib_invalidate_handle (nal, &md->md_lh);
72 list_del (&md->md_list);
76 /* must be called with state lock held */
77 static int lib_md_build(nal_cb_t *nal, lib_md_t *new, void *private,
78 ptl_md_t *md, ptl_handle_eq_t *eqh, int unlink)
80 const int max_size_opts = PTL_MD_AUTO_UNLINK |
86 /* NB we are passes an allocated, but uninitialised/active md.
87 * if we return success, caller may lib_md_unlink() it.
88 * otherwise caller may only lib_md_free() it.
91 if (!PtlHandleEqual (*eqh, PTL_EQ_NONE)) {
92 eq = ptl_handle2eq(eqh, nal);
97 if ((md->options & PTL_MD_IOV) != 0 && /* discontiguous MD */
98 md->niov > PTL_MD_MAX_IOV) /* too many fragments */
99 return PTL_IOV_TOO_MANY;
101 if ((md->options & max_size_opts) != 0 && /* max size used */
102 (md->max_size < 0 || md->max_size > md->length)) // illegal max_size
106 new->start = md->start;
107 new->length = md->length;
109 new->max_size = md->max_size;
110 new->unlink = unlink;
111 new->options = md->options;
112 new->user_ptr = md->user_ptr;
114 new->threshold = md->threshold;
118 if ((md->options & PTL_MD_IOV) != 0) {
119 int total_length = 0;
121 if ((md->options & PTL_MD_KIOV) != 0) /* Can't specify both */
124 new->md_niov = md->niov;
126 if (nal->cb_read (nal, private, new->md_iov.iov, md->start,
127 md->niov * sizeof (new->md_iov.iov[0])))
130 for (i = 0; i < new->md_niov; i++) {
131 /* We take the base address on trust */
132 if (new->md_iov.iov[i].iov_len <= 0) /* invalid length */
133 return PTL_VAL_FAILED;
135 total_length += new->md_iov.iov[i].iov_len;
138 if (md->length > total_length)
139 return PTL_IOV_TOO_SMALL;
141 if (nal->cb_map != NULL) {
142 rc = nal->cb_map (nal, new->md_niov, new->md_iov.iov,
147 } else if ((md->options & PTL_MD_KIOV) != 0) {
151 int total_length = 0;
153 /* Trap attempt to use paged I/O if unsupported early. */
154 if (nal->cb_send_pages == NULL ||
155 nal->cb_recv_pages == NULL)
158 new->md_niov = md->niov;
160 if (nal->cb_read (nal, private, new->md_iov.kiov, md->start,
161 md->niov * sizeof (new->md_iov.kiov[0])))
164 for (i = 0; i < new->md_niov; i++) {
165 /* We take the page pointer on trust */
166 if (new->md_iov.kiov[i].kiov_offset +
167 new->md_iov.kiov[i].kiov_len > PAGE_SIZE )
168 return PTL_VAL_FAILED; /* invalid length */
170 total_length += new->md_iov.kiov[i].kiov_len;
173 if (md->length > total_length)
174 return PTL_IOV_TOO_SMALL;
176 if (nal->cb_map_pages != NULL) {
177 rc = nal->cb_map_pages (nal, new->md_niov, new->md_iov.kiov,
183 } else { /* contiguous */
185 new->md_iov.iov[0].iov_base = md->start;
186 new->md_iov.iov[0].iov_len = md->length;
188 if (nal->cb_map != NULL) {
189 rc = nal->cb_map (nal, new->md_niov, new->md_iov.iov,
199 /* It's good; let handle2md succeed and add to active mds */
200 lib_initialise_handle (nal, &new->md_lh, PTL_COOKIE_TYPE_MD);
201 list_add (&new->md_list, &nal->ni.ni_active_mds);
206 /* must be called with state lock held */
207 void lib_md_deconstruct(nal_cb_t * nal, lib_md_t * md, ptl_md_t * new)
209 /* NB this doesn't copy out all the iov entries so when a
210 * discontiguous MD is copied out, the target gets to know the
211 * original iov pointer (in start) and the number of entries it had
214 new->start = md->start;
215 new->length = md->length;
216 new->threshold = md->threshold;
217 new->max_size = md->max_size;
218 new->options = md->options;
219 new->user_ptr = md->user_ptr;
220 ptl_eq2handle(&new->eventq, md->eq);
221 new->niov = ((md->options & (PTL_MD_IOV | PTL_MD_KIOV)) == 0) ? 0 : md->md_niov;
224 int do_PtlMDAttach(nal_cb_t * nal, void *private, void *v_args, void *v_ret)
228 * ptl_handle_me_t current_in
230 * ptl_unlink_t unlink_in
233 * ptl_handle_md_t * handle_out
236 PtlMDAttach_in *args = v_args;
237 PtlMDAttach_out *ret = v_ret;
242 md = lib_md_alloc (nal);
244 return (ret->rc = PTL_NOSPACE);
246 state_lock(nal, &flags);
248 me = ptl_handle2me(&args->me_in, nal);
250 ret->rc = PTL_INV_ME;
251 } else if (me->md != NULL) {
254 ret->rc = lib_md_build(nal, md, private, &args->md_in,
255 &args->eq_in, args->unlink_in);
257 if (ret->rc == PTL_OK) {
261 ptl_md2handle(&ret->handle_out, md);
263 state_unlock (nal, &flags);
268 lib_md_free (nal, md);
270 state_unlock (nal, &flags);
274 int do_PtlMDBind(nal_cb_t * nal, void *private, void *v_args, void *v_ret)
278 * ptl_handle_ni_t ni_in
282 * ptl_handle_md_t * handle_out
285 PtlMDBind_in *args = v_args;
286 PtlMDBind_out *ret = v_ret;
290 md = lib_md_alloc (nal);
292 return (ret->rc = PTL_NOSPACE);
294 state_lock(nal, &flags);
296 ret->rc = lib_md_build(nal, md, private,
297 &args->md_in, &args->eq_in, PTL_UNLINK);
299 if (ret->rc == PTL_OK) {
300 ptl_md2handle(&ret->handle_out, md);
302 state_unlock(nal, &flags);
306 lib_md_free (nal, md);
308 state_unlock(nal, &flags);
312 int do_PtlMDUnlink(nal_cb_t * nal, void *private, void *v_args, void *v_ret)
314 PtlMDUnlink_in *args = v_args;
315 PtlMDUnlink_out *ret = v_ret;
320 state_lock(nal, &flags);
322 md = ptl_handle2md(&args->md_in, nal);
324 ret->rc = PTL_INV_MD;
325 } else if (md->pending != 0) { /* being filled/spilled */
326 ret->rc = PTL_MD_INUSE;
328 /* Callers attempting to unlink a busy MD which will get
329 * unlinked once the net op completes should see INUSE,
330 * before completion and INV_MD thereafter. LASSERT we've
331 * got that right... */
332 LASSERT ((md->md_flags & PTL_MD_FLAG_UNLINK) == 0);
334 lib_md_deconstruct(nal, md, &ret->status_out);
335 lib_md_unlink(nal, md);
339 state_unlock(nal, &flags);
344 int do_PtlMDUpdate_internal(nal_cb_t * nal, void *private, void *v_args,
349 * ptl_handle_md_t md_in
350 * ptl_md_t * old_inout
351 * ptl_md_t * new_inout
352 * ptl_handle_eq_t testq_in
353 * ptl_seq_t sequence_in
356 * ptl_md_t * old_inout
357 * ptl_md_t * new_inout
359 PtlMDUpdate_internal_in *args = v_args;
360 PtlMDUpdate_internal_out *ret = v_ret;
362 lib_eq_t *test_eq = NULL;
363 ptl_md_t *new = &args->new_inout;
366 state_lock(nal, &flags);
368 md = ptl_handle2md(&args->md_in, nal);
370 ret->rc = PTL_INV_MD;
374 if (args->old_inout_valid)
375 lib_md_deconstruct(nal, md, &ret->old_inout);
377 if (!args->new_inout_valid) {
382 if (!PtlHandleEqual (args->testq_in, PTL_EQ_NONE)) {
383 test_eq = ptl_handle2eq(&args->testq_in, nal);
384 if (test_eq == NULL) {
385 ret->rc = PTL_INV_EQ;
390 if (md->pending != 0) {
391 ret->rc = PTL_NOUPDATE;
395 if (test_eq == NULL ||
396 test_eq->sequence == args->sequence_in) {
397 lib_me_t *me = md->me;
399 // #warning this does not track eq refcounts properly
400 ret->rc = lib_md_build(nal, md, private,
401 new, &new->eventq, md->unlink);
405 ret->rc = PTL_NOUPDATE;
409 state_unlock(nal, &flags);