1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (c) 2003 Los Alamos National Laboratory (LANL)
6 * This file is part of Lustre, http://www.lustre.org/
8 * Lustre is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Lustre is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Lustre; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 * This file implements the nal cb functions
30 int gmnal_cb_recv(nal_cb_t *nal_cb, void *private, lib_msg_t *cookie,
31 unsigned int niov, struct iovec *iov, size_t mlen,
34 gmnal_srxd_t *srxd = (gmnal_srxd_t*)private;
38 CDEBUG(D_TRACE, "gmnal_cb_recv nal_cb [%p], private[%p], cookie[%p], "
39 "niov[%d], iov [%p], mlen["LPSZ"], rlen["LPSZ"]\n",
40 nal_cb, private, cookie, niov, iov, mlen, rlen);
43 case(GMNAL_SMALL_MESSAGE):
44 CDEBUG(D_INFO, "gmnal_cb_recv got small message\n");
45 status = gmnal_small_rx(nal_cb, private, cookie, niov,
48 case(GMNAL_LARGE_MESSAGE_INIT):
49 CDEBUG(D_INFO, "gmnal_cb_recv got large message init\n");
50 status = gmnal_large_rx(nal_cb, private, cookie, niov,
55 CDEBUG(D_INFO, "gmnal_cb_recv gmnal_return status [%d]\n", status);
59 int gmnal_cb_recv_pages(nal_cb_t *nal_cb, void *private, lib_msg_t *cookie,
60 unsigned int kniov, ptl_kiov_t *kiov, size_t mlen,
63 gmnal_srxd_t *srxd = (gmnal_srxd_t*)private;
65 struct iovec *iovec = NULL, *iovec_dup = NULL;
67 ptl_kiov_t *kiov_dup = kiov;;
70 CDEBUG(D_TRACE, "gmnal_cb_recv_pages nal_cb [%p],private[%p], "
71 "cookie[%p], kniov[%d], kiov [%p], mlen["LPSZ"], rlen["LPSZ"]\n",
72 nal_cb, private, cookie, kniov, kiov, mlen, rlen);
74 if (srxd->type == GMNAL_SMALL_MESSAGE) {
75 PORTAL_ALLOC(iovec, sizeof(struct iovec)*kniov);
77 CDEBUG(D_ERROR, "Can't malloc\n");
78 return(GMNAL_STATUS_FAIL);
83 * map each page and create an iovec for it
85 for (i=0; i<kniov; i++) {
86 CDEBUG(D_INFO, "processing kniov [%d] [%p]\n", i, kiov);
87 CDEBUG(D_INFO, "kniov page [%p] len [%d] offset[%d]\n",
88 kiov->kiov_page, kiov->kiov_len,
90 iovec->iov_len = kiov->kiov_len;
91 CDEBUG(D_INFO, "Calling kmap[%p]", kiov->kiov_page);
93 iovec->iov_base = kmap(kiov->kiov_page) +
96 CDEBUG(D_INFO, "iov_base is [%p]\n", iovec->iov_base);
100 CDEBUG(D_INFO, "calling gmnal_small_rx\n");
101 status = gmnal_small_rx(nal_cb, private, cookie, kniov,
102 iovec_dup, mlen, rlen);
103 for (i=0; i<kniov; i++) {
104 kunmap(kiov_dup->kiov_page);
107 PORTAL_FREE(iovec_dup, sizeof(struct iovec)*kniov);
111 CDEBUG(D_INFO, "gmnal_return status [%d]\n", status);
116 int gmnal_cb_send(nal_cb_t *nal_cb, void *private, lib_msg_t *cookie,
117 ptl_hdr_t *hdr, int type, ptl_nid_t nid, ptl_pid_t pid,
118 unsigned int niov, struct iovec *iov, size_t len)
121 gmnal_data_t *nal_data;
124 CDEBUG(D_TRACE, "gmnal_cb_send niov[%d] len["LPSZ"] nid["LPU64"]\n",
126 nal_data = nal_cb->nal_data;
128 if (GMNAL_IS_SMALL_MESSAGE(nal_data, niov, iov, len)) {
129 CDEBUG(D_INFO, "This is a small message send\n");
130 gmnal_small_tx(nal_cb, private, cookie, hdr, type, nid, pid,
133 CDEBUG(D_ERROR, "Large message send it is not supported\n");
134 lib_finalize(nal_cb, private, cookie, PTL_FAIL);
136 gmnal_large_tx(nal_cb, private, cookie, hdr, type, nid, pid,
142 int gmnal_cb_send_pages(nal_cb_t *nal_cb, void *private, lib_msg_t *cookie,
143 ptl_hdr_t *hdr, int type, ptl_nid_t nid, ptl_pid_t pid, unsigned int kniov, ptl_kiov_t *kiov, size_t len)
147 gmnal_data_t *nal_data;
148 struct iovec *iovec = NULL, *iovec_dup = NULL;
149 ptl_kiov_t *kiov_dup = kiov;
151 CDEBUG(D_TRACE, "gmnal_cb_send_pages nid ["LPU64"] niov[%d] len["LPSZ"]\n", nid, kniov, len);
152 nal_data = nal_cb->nal_data;
153 PORTAL_ALLOC(iovec, kniov*sizeof(struct iovec));
155 if (GMNAL_IS_SMALL_MESSAGE(nal_data, 0, NULL, len)) {
156 CDEBUG(D_INFO, "This is a small message send\n");
158 for (i=0; i<kniov; i++) {
159 CDEBUG(D_INFO, "processing kniov [%d] [%p]\n", i, kiov);
160 CDEBUG(D_INFO, "kniov page [%p] len [%d] offset[%d]\n",
161 kiov->kiov_page, kiov->kiov_len,
164 iovec->iov_base = kmap(kiov->kiov_page)
167 iovec->iov_len = kiov->kiov_len;
171 gmnal_small_tx(nal_cb, private, cookie, hdr, type, nid,
172 pid, kniov, iovec_dup, len);
174 CDEBUG(D_ERROR, "Large message send it is not supported yet\n");
176 for (i=0; i<kniov; i++) {
177 CDEBUG(D_INFO, "processing kniov [%d] [%p]\n", i, kiov);
178 CDEBUG(D_INFO, "kniov page [%p] len [%d] offset[%d]\n",
179 kiov->kiov_page, kiov->kiov_len,
182 iovec->iov_base = kmap(kiov->kiov_page)
184 iovec->iov_len = kiov->kiov_len;
188 gmnal_large_tx(nal_cb, private, cookie, hdr, type, nid,
189 pid, kniov, iovec, len);
191 for (i=0; i<kniov; i++) {
192 kunmap(kiov_dup->kiov_page);
195 PORTAL_FREE(iovec_dup, kniov*sizeof(struct iovec));
199 int gmnal_cb_read(nal_cb_t *nal_cb, void *private, void *dst,
200 user_ptr src, size_t len)
202 gm_bcopy(src, dst, len);
206 int gmnal_cb_write(nal_cb_t *nal_cb, void *private, user_ptr dst,
207 void *src, size_t len)
209 gm_bcopy(src, dst, len);
213 int gmnal_cb_callback(nal_cb_t *nal_cb, void *private, lib_eq_t *eq,
217 if (eq->event_callback != NULL) {
218 CDEBUG(D_INFO, "found callback\n");
219 eq->event_callback(ev);
225 void *gmnal_cb_malloc(nal_cb_t *nal_cb, size_t len)
228 CDEBUG(D_TRACE, "gmnal_cb_malloc len["LPSZ"]\n", len);
229 PORTAL_ALLOC(ptr, len);
233 void gmnal_cb_free(nal_cb_t *nal_cb, void *buf, size_t len)
235 CDEBUG(D_TRACE, "gmnal_cb_free :: buf[%p] len["LPSZ"]\n", buf, len);
236 PORTAL_FREE(buf, len);
240 void gmnal_cb_unmap(nal_cb_t *nal_cb, unsigned int niov, struct iovec *iov,
246 int gmnal_cb_map(nal_cb_t *nal_cb, unsigned int niov, struct iovec *iov,
252 void gmnal_cb_printf(nal_cb_t *nal_cb, const char *fmt, ...)
254 CDEBUG(D_TRACE, "gmnal_cb_printf\n");
259 void gmnal_cb_cli(nal_cb_t *nal_cb, unsigned long *flags)
261 gmnal_data_t *nal_data = (gmnal_data_t*)nal_cb->nal_data;
263 spin_lock_irqsave(&nal_data->cb_lock, *flags);
267 void gmnal_cb_sti(nal_cb_t *nal_cb, unsigned long *flags)
269 gmnal_data_t *nal_data = (gmnal_data_t*)nal_cb->nal_data;
271 spin_unlock_irqrestore(&nal_data->cb_lock, *flags);
275 int gmnal_cb_dist(nal_cb_t *nal_cb, ptl_nid_t nid, unsigned long *dist)
277 CDEBUG(D_TRACE, "gmnal_cb_dist\n");