1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2002 Cluster File Systems, Inc.
6 * This file is part of Lustre, http://www.lustre.org.
8 * Lustre is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Lustre is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Lustre; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #define DEBUG_SUBSYSTEM S_RPC
25 #include <linux/obd_support.h>
26 #include <linux/lustre_net.h>
27 #include <linux/lustre_lib.h>
28 #include <linux/obd.h>
30 extern ptl_handle_eq_t request_out_eq, reply_in_eq, reply_out_eq,
31 bulk_source_eq, bulk_sink_eq;
33 static int ptl_send_buf(struct ptlrpc_request *request,
34 struct ptlrpc_connection *conn, int portal)
37 ptl_process_id_t remote_id;
42 request->rq_req_md.user_ptr = request;
44 switch (request->rq_type) {
45 case PTL_RPC_MSG_REQUEST:
46 request->rq_reqmsg->type = HTON__u32(request->rq_type);
47 request->rq_req_md.start = request->rq_reqmsg;
48 request->rq_req_md.length = request->rq_reqlen;
49 request->rq_req_md.eventq = request_out_eq;
52 case PTL_RPC_MSG_REPLY:
53 request->rq_repmsg->type = HTON__u32(request->rq_type);
54 request->rq_req_md.start = request->rq_repmsg;
55 request->rq_req_md.length = request->rq_replen;
56 request->rq_req_md.eventq = reply_out_eq;
60 return -1; /* notreached */
62 request->rq_req_md.threshold = 1;
63 request->rq_req_md.options = PTL_MD_OP_PUT;
64 request->rq_req_md.user_ptr = request;
66 rc = PtlMDBind(conn->c_peer.peer_ni, request->rq_req_md, &md_h);
68 CERROR("PtlMDBind failed: %d\n", rc);
73 remote_id.nid = conn->c_peer.peer_nid;
76 CDEBUG(D_NET, "Sending %d bytes to portal %d, xid "LPD64"\n",
77 request->rq_req_md.length, portal, request->rq_xid);
81 rc = PtlPut(md_h, PTL_NOACK_REQ, remote_id, portal, 0, request->rq_xid,
84 CERROR("PtlPut("LPU64", %d, "LPD64") failed: %d\n",
85 remote_id.nid, portal, request->rq_xid, rc);
92 static inline struct iovec *
93 ptlrpc_get_bulk_iov (struct ptlrpc_bulk_desc *desc)
97 if (desc->bd_page_count <= sizeof (desc->bd_iov)/sizeof (struct iovec))
98 return (desc->bd_iov);
100 OBD_ALLOC (iov, desc->bd_page_count * sizeof (struct iovec));
108 ptlrpc_put_bulk_iov (struct ptlrpc_bulk_desc *desc, struct iovec *iov)
110 if (desc->bd_page_count <= sizeof (desc->bd_iov)/sizeof (struct iovec))
113 OBD_FREE (iov, desc->bd_page_count * sizeof (struct iovec));
116 int ptlrpc_send_bulk(struct ptlrpc_bulk_desc *desc)
119 struct list_head *tmp, *next;
120 ptl_process_id_t remote_id;
125 iov = ptlrpc_get_bulk_iov (desc);
129 desc->bd_md.start = iov;
130 desc->bd_md.niov = 0;
131 desc->bd_md.length = 0;
132 desc->bd_md.eventq = bulk_source_eq;
133 desc->bd_md.threshold = 2; /* SENT and ACK */
134 desc->bd_md.options = PTL_MD_OP_PUT | PTL_MD_IOV;
135 desc->bd_md.user_ptr = desc;
137 atomic_set (&desc->bd_source_callback_count, 2);
139 list_for_each_safe(tmp, next, &desc->bd_page_list) {
140 struct ptlrpc_bulk_page *bulk;
141 bulk = list_entry(tmp, struct ptlrpc_bulk_page, bp_link);
143 LASSERT (desc->bd_md.niov < desc->bd_page_count);
145 if (desc->bd_md.niov == 0)
147 LASSERT (xid == bulk->bp_xid); /* should all be the same */
149 iov[desc->bd_md.niov].iov_base = bulk->bp_buf;
150 iov[desc->bd_md.niov].iov_len = bulk->bp_buflen;
152 desc->bd_md.length += bulk->bp_buflen;
155 LASSERT (desc->bd_md.niov == desc->bd_page_count);
156 LASSERT (desc->bd_md.niov != 0);
158 rc = PtlMDBind(desc->bd_connection->c_peer.peer_ni, desc->bd_md,
161 ptlrpc_put_bulk_iov (desc, iov); /* move down to reduce latency to send */
164 CERROR("PtlMDBind failed: %d\n", rc);
169 remote_id.nid = desc->bd_connection->c_peer.peer_nid;
172 CDEBUG(D_NET, "Sending %u pages %u bytes to portal %d nid "LPX64" pid %d xid %d\n",
173 desc->bd_md.niov, desc->bd_md.length,
174 desc->bd_portal, remote_id.nid, remote_id.pid, xid);
176 rc = PtlPut(desc->bd_md_h, PTL_ACK_REQ, remote_id,
177 desc->bd_portal, 0, xid, 0, 0);
179 CERROR("PtlPut("LPU64", %d, %d) failed: %d\n",
180 remote_id.nid, desc->bd_portal, xid, rc);
181 PtlMDUnlink(desc->bd_md_h);
189 int ptlrpc_register_bulk(struct ptlrpc_bulk_desc *desc)
191 struct list_head *tmp, *next;
195 ptl_process_id_t source_id;
198 if (desc->bd_page_count > PTL_MD_MAX_IOV) {
199 CERROR("iov longer than %d not supported\n", PTL_MD_MAX_IOV);
203 iov = ptlrpc_get_bulk_iov (desc);
207 desc->bd_md.start = iov;
208 desc->bd_md.niov = 0;
209 desc->bd_md.length = 0;
210 desc->bd_md.threshold = 1;
211 desc->bd_md.options = PTL_MD_OP_PUT | PTL_MD_IOV;
212 desc->bd_md.user_ptr = desc;
213 desc->bd_md.eventq = bulk_sink_eq;
215 list_for_each_safe(tmp, next, &desc->bd_page_list) {
216 struct ptlrpc_bulk_page *bulk;
217 bulk = list_entry(tmp, struct ptlrpc_bulk_page, bp_link);
219 LASSERT (desc->bd_md.niov < desc->bd_page_count);
221 if (desc->bd_md.niov == 0)
223 LASSERT (xid == bulk->bp_xid); /* should all be the same */
225 iov[desc->bd_md.niov].iov_base = bulk->bp_buf;
226 iov[desc->bd_md.niov].iov_len = bulk->bp_buflen;
228 desc->bd_md.length += bulk->bp_buflen;
231 LASSERT (desc->bd_md.niov == desc->bd_page_count);
232 LASSERT (desc->bd_md.niov != 0);
234 source_id.nid = desc->bd_connection->c_peer.peer_nid;
235 source_id.pid = PTL_PID_ANY;
237 rc = PtlMEAttach(desc->bd_connection->c_peer.peer_ni,
238 desc->bd_portal, source_id, xid, 0,
239 PTL_UNLINK, PTL_INS_AFTER, &desc->bd_me_h);
241 ptlrpc_put_bulk_iov (desc, iov);
244 CERROR("PtlMEAttach failed: %d\n", rc);
249 rc = PtlMDAttach(desc->bd_me_h, desc->bd_md, PTL_UNLINK,
252 CERROR("PtlMDAttach failed: %d\n", rc);
257 CDEBUG(D_NET, "Setup bulk sink buffers: %u pages %u bytes, xid %u, "
258 "portal %u\n", desc->bd_md.niov, desc->bd_md.length,
259 xid, desc->bd_portal);
264 ptlrpc_abort_bulk(desc);
269 int ptlrpc_abort_bulk(struct ptlrpc_bulk_desc *desc)
271 /* This should be safe: these handles are initialized to be
272 * invalid in ptlrpc_prep_bulk() */
273 PtlMDUnlink(desc->bd_md_h);
274 PtlMEUnlink(desc->bd_me_h);
279 int ptlrpc_reply(struct ptlrpc_service *svc, struct ptlrpc_request *req)
281 if (req->rq_repmsg == NULL) {
282 CERROR("bad: someone called ptlrpc_reply when they meant "
287 /* FIXME: we need to increment the count of handled events */
288 if (req->rq_type != PTL_RPC_MSG_ERR)
289 req->rq_type = PTL_RPC_MSG_REPLY;
290 //req->rq_repmsg->conn = req->rq_connection->c_remote_conn;
291 //req->rq_repmsg->token = req->rq_connection->c_remote_token;
292 req->rq_repmsg->status = HTON__u32(req->rq_status);
293 return ptl_send_buf(req, req->rq_connection, svc->srv_rep_portal);
296 int ptlrpc_error(struct ptlrpc_service *svc, struct ptlrpc_request *req)
301 if (req->rq_repmsg) {
302 CERROR("req already has repmsg\n");
306 rc = lustre_pack_msg(0, NULL, NULL, &req->rq_replen, &req->rq_repmsg);
310 req->rq_type = PTL_RPC_MSG_ERR;
312 rc = ptlrpc_reply(svc, req);
316 int ptl_send_rpc(struct ptlrpc_request *request)
320 ptl_process_id_t source_id;
324 if (request->rq_type != PTL_RPC_MSG_REQUEST) {
325 CERROR("wrong packet type sent %d\n",
326 NTOH__u32(request->rq_reqmsg->type));
331 source_id.nid = request->rq_connection->c_peer.peer_nid;
332 source_id.pid = PTL_PID_ANY;
334 if (request->rq_replen != 0) {
336 /* request->rq_repmsg is set only when the reply comes in, in
337 * client_packet_callback() */
338 if (request->rq_reply_md.start)
339 OBD_FREE(request->rq_reply_md.start, request->rq_replen);
341 OBD_ALLOC(repbuf, request->rq_replen);
347 rc = PtlMEAttach(request->rq_connection->c_peer.peer_ni,
348 request->rq_import->imp_client->cli_reply_portal,
349 source_id, request->rq_xid, 0, PTL_UNLINK,
350 PTL_INS_AFTER, &request->rq_reply_me_h);
352 CERROR("PtlMEAttach failed: %d\n", rc);
357 request->rq_reply_md.start = repbuf;
358 request->rq_reply_md.length = request->rq_replen;
359 request->rq_reply_md.threshold = 1;
360 request->rq_reply_md.options = PTL_MD_OP_PUT;
361 request->rq_reply_md.user_ptr = request;
362 request->rq_reply_md.eventq = reply_in_eq;
364 rc = PtlMDAttach(request->rq_reply_me_h, request->rq_reply_md,
365 PTL_UNLINK, &request->rq_reply_md_h);
367 CERROR("PtlMDAttach failed: %d\n", rc);
372 CDEBUG(D_NET, "Setup reply buffer: %u bytes, xid "LPU64
374 request->rq_replen, request->rq_xid,
375 request->rq_import->imp_client->cli_reply_portal);
378 rc = ptl_send_buf(request, request->rq_connection,
379 request->rq_import->imp_client->cli_request_portal);
383 PtlMEUnlink(request->rq_reply_me_h);
385 OBD_FREE(repbuf, request->rq_replen);
386 // up(&request->rq_client->cli_rpc_sem);
391 void ptlrpc_link_svc_me(struct ptlrpc_request_buffer_desc *rqbd)
393 struct ptlrpc_service *service = rqbd->rqbd_service;
394 static ptl_process_id_t match_id = {PTL_NID_ANY, PTL_PID_ANY};
397 ptl_handle_md_t md_h;
399 LASSERT (atomic_read (&rqbd->rqbd_refcount) == 0);
401 /* Attach the leading ME on which we build the ring */
402 rc = PtlMEAttach(service->srv_self.peer_ni, service->srv_req_portal,
404 PTL_UNLINK, PTL_INS_AFTER, &rqbd->rqbd_me_h);
406 CERROR("PtlMEAttach failed: %d\n", rc);
410 dummy.start = rqbd->rqbd_buffer;
411 dummy.length = service->srv_buf_size;
412 dummy.max_size = service->srv_max_req_size;
413 dummy.threshold = PTL_MD_THRESH_INF;
414 dummy.options = PTL_MD_OP_PUT | PTL_MD_MAX_SIZE | PTL_MD_AUTO_UNLINK;
415 dummy.user_ptr = rqbd;
416 dummy.eventq = service->srv_eq_h;
418 atomic_inc (&service->srv_nrqbds_receiving);
419 atomic_set (&rqbd->rqbd_refcount, 1); /* 1 ref for portals */
421 rc = PtlMDAttach(rqbd->rqbd_me_h, dummy, PTL_UNLINK, &md_h);
423 CERROR("PtlMDAttach failed: %d\n", rc);
425 #warning proper cleanup required
426 PtlMEUnlink (rqbd->rqbd_me_h);
427 atomic_set (&rqbd->rqbd_refcount, 0);
428 atomic_dec (&service->srv_nrqbds_receiving);