1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (c) 2002, 2003 Cluster File Systems, Inc.
6 * This file is part of Lustre, http://www.lustre.org.
8 * Lustre is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Lustre is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Lustre; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #define DEBUG_SUBSYSTEM S_RPC
25 #include <linux/obd_support.h>
26 #include <linux/lustre_net.h>
27 #include <linux/lustre_lib.h>
28 #include <linux/obd.h>
30 extern ptl_handle_eq_t request_out_eq, reply_in_eq, reply_out_eq,
31 bulk_put_source_eq, bulk_put_sink_eq,
32 bulk_get_source_eq, bulk_get_sink_eq;
34 static int ptl_send_buf(struct ptlrpc_request *request,
35 struct ptlrpc_connection *conn, int portal)
38 ptl_process_id_t remote_id;
43 request->rq_req_md.user_ptr = request;
45 switch (request->rq_type) {
46 case PTL_RPC_MSG_REQUEST:
47 request->rq_reqmsg->type = HTON__u32(request->rq_type);
48 request->rq_req_md.start = request->rq_reqmsg;
49 request->rq_req_md.length = request->rq_reqlen;
50 request->rq_req_md.eventq = request_out_eq;
53 case PTL_RPC_MSG_REPLY:
54 request->rq_repmsg->type = HTON__u32(request->rq_type);
55 request->rq_req_md.start = request->rq_repmsg;
56 request->rq_req_md.length = request->rq_replen;
57 request->rq_req_md.eventq = reply_out_eq;
61 return -1; /* notreached */
63 request->rq_req_md.threshold = 1;
64 request->rq_req_md.options = PTL_MD_OP_PUT;
65 request->rq_req_md.user_ptr = request;
67 rc = PtlMDBind(conn->c_peer.peer_ni, request->rq_req_md, &md_h);
69 CERROR("PtlMDBind failed: %d\n", rc);
74 remote_id.nid = conn->c_peer.peer_nid;
77 CDEBUG(D_NET, "Sending %d bytes to portal %d, xid "LPD64"\n",
78 request->rq_req_md.length, portal, request->rq_xid);
82 rc = PtlPut(md_h, PTL_NOACK_REQ, remote_id, portal, 0, request->rq_xid,
85 CERROR("PtlPut("LPU64", %d, "LPD64") failed: %d\n",
86 remote_id.nid, portal, request->rq_xid, rc);
93 static inline struct iovec *
94 ptlrpc_get_bulk_iov (struct ptlrpc_bulk_desc *desc)
98 if (desc->bd_page_count <= sizeof (desc->bd_iov)/sizeof (struct iovec))
99 return (desc->bd_iov);
101 OBD_ALLOC (iov, desc->bd_page_count * sizeof (struct iovec));
109 ptlrpc_put_bulk_iov (struct ptlrpc_bulk_desc *desc, struct iovec *iov)
111 if (desc->bd_page_count <= sizeof (desc->bd_iov)/sizeof (struct iovec))
114 OBD_FREE (iov, desc->bd_page_count * sizeof (struct iovec));
117 int ptlrpc_bulk_put(struct ptlrpc_bulk_desc *desc)
120 struct list_head *tmp, *next;
121 ptl_process_id_t remote_id;
126 iov = ptlrpc_get_bulk_iov (desc);
130 desc->bd_md.start = iov;
131 desc->bd_md.niov = 0;
132 desc->bd_md.length = 0;
133 desc->bd_md.eventq = bulk_put_source_eq;
134 desc->bd_md.threshold = 2; /* SENT and ACK */
135 desc->bd_md.options = PTL_MD_OP_PUT | PTL_MD_IOV;
136 desc->bd_md.user_ptr = desc;
138 atomic_set(&desc->bd_source_callback_count, 2);
140 list_for_each_safe(tmp, next, &desc->bd_page_list) {
141 struct ptlrpc_bulk_page *bulk;
142 bulk = list_entry(tmp, struct ptlrpc_bulk_page, bp_link);
144 LASSERT(desc->bd_md.niov < desc->bd_page_count);
146 if (desc->bd_md.niov == 0)
148 LASSERT(xid == bulk->bp_xid); /* should all be the same */
150 iov[desc->bd_md.niov].iov_base = bulk->bp_buf;
151 iov[desc->bd_md.niov].iov_len = bulk->bp_buflen;
152 if (iov[desc->bd_md.niov].iov_len <= 0) {
153 CERROR("bad bp_buflen[%d] @ %p: %d\n", desc->bd_md.niov,
154 bulk->bp_buf, bulk->bp_buflen);
155 CERROR("desc: xid %u, pages %d, ptl %d, ref %d\n",
156 xid, desc->bd_page_count, desc->bd_portal,
157 atomic_read(&desc->bd_refcount));
161 desc->bd_md.length += bulk->bp_buflen;
164 LASSERT(desc->bd_md.niov == desc->bd_page_count);
165 LASSERT(desc->bd_md.niov != 0);
167 rc = PtlMDBind(desc->bd_connection->c_peer.peer_ni, desc->bd_md,
170 ptlrpc_put_bulk_iov (desc, iov); /*move down to reduce latency to send*/
173 CERROR("PtlMDBind failed: %d\n", rc);
178 remote_id.nid = desc->bd_connection->c_peer.peer_nid;
181 CDEBUG(D_NET, "Sending %u pages %u bytes to portal %d nid "LPX64" pid "
182 "%d xid %d\n", desc->bd_md.niov, desc->bd_md.length,
183 desc->bd_portal, remote_id.nid, remote_id.pid, xid);
185 rc = PtlPut(desc->bd_md_h, PTL_ACK_REQ, remote_id,
186 desc->bd_portal, 0, xid, 0, 0);
188 CERROR("PtlPut("LPU64", %d, %d) failed: %d\n",
189 remote_id.nid, desc->bd_portal, xid, rc);
190 PtlMDUnlink(desc->bd_md_h);
198 int ptlrpc_bulk_get(struct ptlrpc_bulk_desc *desc)
201 struct list_head *tmp, *next;
202 ptl_process_id_t remote_id;
207 iov = ptlrpc_get_bulk_iov (desc);
211 desc->bd_md.start = iov;
212 desc->bd_md.niov = 0;
213 desc->bd_md.length = 0;
214 desc->bd_md.eventq = bulk_get_sink_eq;
215 desc->bd_md.threshold = 2; /* SENT and REPLY */
216 desc->bd_md.options = PTL_MD_OP_GET | PTL_MD_IOV;
217 desc->bd_md.user_ptr = desc;
219 atomic_set(&desc->bd_source_callback_count, 2);
221 list_for_each_safe(tmp, next, &desc->bd_page_list) {
222 struct ptlrpc_bulk_page *bulk;
223 bulk = list_entry(tmp, struct ptlrpc_bulk_page, bp_link);
225 LASSERT(desc->bd_md.niov < desc->bd_page_count);
227 if (desc->bd_md.niov == 0)
229 LASSERT(xid == bulk->bp_xid); /* should all be the same */
231 iov[desc->bd_md.niov].iov_base = bulk->bp_buf;
232 iov[desc->bd_md.niov].iov_len = bulk->bp_buflen;
233 if (iov[desc->bd_md.niov].iov_len <= 0) {
234 CERROR("bad bp_buflen[%d] @ %p: %d\n", desc->bd_md.niov,
235 bulk->bp_buf, bulk->bp_buflen);
236 CERROR("desc: xid %u, pages %d, ptl %d, ref %d\n",
237 xid, desc->bd_page_count, desc->bd_portal,
238 atomic_read(&desc->bd_refcount));
242 desc->bd_md.length += bulk->bp_buflen;
245 LASSERT(desc->bd_md.niov == desc->bd_page_count);
246 LASSERT(desc->bd_md.niov != 0);
248 rc = PtlMDBind(desc->bd_connection->c_peer.peer_ni, desc->bd_md,
251 ptlrpc_put_bulk_iov (desc, iov); /*move down to reduce latency to send*/
254 CERROR("PtlMDBind failed: %d\n", rc);
259 remote_id.nid = desc->bd_connection->c_peer.peer_nid;
262 CDEBUG(D_NET, "Sending %u pages %u bytes to portal %d nid "LPX64" pid "
263 "%d xid %d\n", desc->bd_md.niov, desc->bd_md.length,
264 desc->bd_portal, remote_id.nid, remote_id.pid, xid);
266 rc = PtlGet(desc->bd_md_h, remote_id, desc->bd_portal, 0, xid, 0);
268 CERROR("PtlGet("LPU64", %d, %d) failed: %d\n",
269 remote_id.nid, desc->bd_portal, xid, rc);
270 PtlMDUnlink(desc->bd_md_h);
278 static int ptlrpc_register_bulk_shared(struct ptlrpc_bulk_desc *desc)
280 struct list_head *tmp, *next;
284 ptl_process_id_t source_id;
287 if (desc->bd_page_count > PTL_MD_MAX_IOV) {
288 CERROR("iov longer than %d pages not supported (count=%d)\n",
289 PTL_MD_MAX_IOV, desc->bd_page_count);
293 iov = ptlrpc_get_bulk_iov (desc);
297 desc->bd_md.start = iov;
298 desc->bd_md.niov = 0;
299 desc->bd_md.length = 0;
300 desc->bd_md.threshold = 1;
301 desc->bd_md.user_ptr = desc;
303 list_for_each_safe(tmp, next, &desc->bd_page_list) {
304 struct ptlrpc_bulk_page *bulk;
305 bulk = list_entry(tmp, struct ptlrpc_bulk_page, bp_link);
307 LASSERT(desc->bd_md.niov < desc->bd_page_count);
309 if (desc->bd_md.niov == 0)
311 LASSERT(xid == bulk->bp_xid); /* should all be the same */
313 iov[desc->bd_md.niov].iov_base = bulk->bp_buf;
314 iov[desc->bd_md.niov].iov_len = bulk->bp_buflen;
316 desc->bd_md.length += bulk->bp_buflen;
319 LASSERT(desc->bd_md.niov == desc->bd_page_count);
320 LASSERT(desc->bd_md.niov != 0);
322 source_id.nid = desc->bd_connection->c_peer.peer_nid;
323 source_id.pid = PTL_PID_ANY;
325 rc = PtlMEAttach(desc->bd_connection->c_peer.peer_ni,
326 desc->bd_portal, source_id, xid, 0,
327 PTL_UNLINK, PTL_INS_AFTER, &desc->bd_me_h);
330 CERROR("PtlMEAttach failed: %d\n", rc);
335 rc = PtlMDAttach(desc->bd_me_h, desc->bd_md, PTL_UNLINK,
338 CERROR("PtlMDAttach failed: %d\n", rc);
343 ptlrpc_put_bulk_iov (desc, iov);
345 CDEBUG(D_NET, "Setup bulk sink buffers: %u pages %u bytes, xid %u, "
346 "portal %u\n", desc->bd_md.niov, desc->bd_md.length,
347 xid, desc->bd_portal);
352 ptlrpc_put_bulk_iov (desc, iov);
353 ptlrpc_abort_bulk(desc);
358 int ptlrpc_register_bulk_get(struct ptlrpc_bulk_desc *desc)
360 desc->bd_md.options = PTL_MD_OP_GET | PTL_MD_IOV;
361 desc->bd_md.eventq = bulk_get_source_eq;
363 return ptlrpc_register_bulk_shared(desc);
366 int ptlrpc_register_bulk_put(struct ptlrpc_bulk_desc *desc)
368 desc->bd_md.options = PTL_MD_OP_PUT | PTL_MD_IOV;
369 desc->bd_md.eventq = bulk_put_sink_eq;
371 return ptlrpc_register_bulk_shared(desc);
374 int ptlrpc_abort_bulk(struct ptlrpc_bulk_desc *desc)
376 /* This should be safe: these handles are initialized to be
377 * invalid in ptlrpc_prep_bulk() */
378 PtlMDUnlink(desc->bd_md_h);
379 PtlMEUnlink(desc->bd_me_h);
384 void obd_brw_set_add(struct obd_brw_set *set, struct ptlrpc_bulk_desc *desc)
386 LASSERT(list_empty(&desc->bd_set_chain));
388 ptlrpc_bulk_addref(desc);
389 atomic_inc(&set->brw_refcount);
390 desc->bd_brw_set = set;
391 list_add(&desc->bd_set_chain, &set->brw_desc_head);
394 struct obd_brw_set *obd_brw_set_new(void)
396 struct obd_brw_set *set;
398 OBD_ALLOC(set, sizeof(*set));
401 init_waitqueue_head(&set->brw_waitq);
402 INIT_LIST_HEAD(&set->brw_desc_head);
403 atomic_set(&set->brw_refcount, 0);
409 void obd_brw_set_free(struct obd_brw_set *set)
411 struct list_head *tmp, *next;
414 if (!list_empty(&set->brw_desc_head)) {
419 list_for_each_safe(tmp, next, &set->brw_desc_head) {
420 struct ptlrpc_bulk_desc *desc =
421 list_entry(tmp, struct ptlrpc_bulk_desc, bd_set_chain);
423 CERROR("Unfinished bulk descriptor: %p\n", desc);
425 ptlrpc_abort_bulk(desc);
427 OBD_FREE(set, sizeof(*set));
432 int ptlrpc_reply(struct ptlrpc_service *svc, struct ptlrpc_request *req)
434 if (req->rq_repmsg == NULL) {
435 CERROR("bad: someone called ptlrpc_reply when they meant "
440 /* FIXME: we need to increment the count of handled events */
441 if (req->rq_type != PTL_RPC_MSG_ERR)
442 req->rq_type = PTL_RPC_MSG_REPLY;
443 //req->rq_repmsg->conn = req->rq_connection->c_remote_conn;
444 //req->rq_repmsg->token = req->rq_connection->c_remote_token;
445 req->rq_repmsg->status = HTON__u32(req->rq_status);
446 return ptl_send_buf(req, req->rq_connection, svc->srv_rep_portal);
449 int ptlrpc_error(struct ptlrpc_service *svc, struct ptlrpc_request *req)
454 if (!req->rq_repmsg) {
455 rc = lustre_pack_msg(0, NULL, NULL, &req->rq_replen,
462 req->rq_type = PTL_RPC_MSG_ERR;
464 rc = ptlrpc_reply(svc, req);
468 int ptl_send_rpc(struct ptlrpc_request *request)
472 ptl_process_id_t source_id;
476 if (request->rq_type != PTL_RPC_MSG_REQUEST) {
477 CERROR("wrong packet type sent %d\n",
478 NTOH__u32(request->rq_reqmsg->type));
483 source_id.nid = request->rq_connection->c_peer.peer_nid;
484 source_id.pid = PTL_PID_ANY;
486 /* add a ref, which will be balanced in request_out_callback */
487 ptlrpc_request_addref(request);
488 if (request->rq_replen != 0) {
489 if (request->rq_reply_md.start != NULL) {
490 rc = PtlMEUnlink(request->rq_reply_me_h);
491 if (rc != PTL_OK && rc != PTL_INV_ME) {
492 CERROR("rc %d\n", rc);
495 repbuf = (char *)request->rq_reply_md.start;
496 request->rq_repmsg = NULL;
498 OBD_ALLOC(repbuf, request->rq_replen);
505 rc = PtlMEAttach(request->rq_connection->c_peer.peer_ni,
506 request->rq_reply_portal,/* XXX FIXME bug 625069 */
507 source_id, request->rq_xid, 0, PTL_UNLINK,
508 PTL_INS_AFTER, &request->rq_reply_me_h);
510 CERROR("PtlMEAttach failed: %d\n", rc);
515 request->rq_reply_md.start = repbuf;
516 request->rq_reply_md.length = request->rq_replen;
517 request->rq_reply_md.threshold = 1;
518 request->rq_reply_md.options = PTL_MD_OP_PUT;
519 request->rq_reply_md.user_ptr = request;
520 request->rq_reply_md.eventq = reply_in_eq;
522 rc = PtlMDAttach(request->rq_reply_me_h, request->rq_reply_md,
525 CERROR("PtlMDAttach failed: %d\n", rc);
530 CDEBUG(D_NET, "Setup reply buffer: %u bytes, xid "LPU64
532 request->rq_replen, request->rq_xid,
533 request->rq_reply_portal);
536 /* Clear any flags that may be present from previous sends,
537 * except for REPLAY. */
538 request->rq_flags &= PTL_RPC_FL_REPLAY;
539 rc = ptl_send_buf(request, request->rq_connection,
540 request->rq_request_portal);
544 PtlMEUnlink(request->rq_reply_me_h);
546 OBD_FREE(repbuf, request->rq_replen);
547 // up(&request->rq_client->cli_rpc_sem);
552 void ptlrpc_link_svc_me(struct ptlrpc_request_buffer_desc *rqbd)
554 struct ptlrpc_service *service = rqbd->rqbd_service;
555 static ptl_process_id_t match_id = {PTL_NID_ANY, PTL_PID_ANY};
558 ptl_handle_md_t md_h;
560 LASSERT(atomic_read(&rqbd->rqbd_refcount) == 0);
562 /* Attach the leading ME on which we build the ring */
563 rc = PtlMEAttach(service->srv_self.peer_ni, service->srv_req_portal,
565 PTL_UNLINK, PTL_INS_AFTER, &rqbd->rqbd_me_h);
567 CERROR("PtlMEAttach failed: %d\n", rc);
571 dummy.start = rqbd->rqbd_buffer;
572 dummy.length = service->srv_buf_size;
573 dummy.max_size = service->srv_max_req_size;
574 dummy.threshold = PTL_MD_THRESH_INF;
575 dummy.options = PTL_MD_OP_PUT | PTL_MD_MAX_SIZE | PTL_MD_AUTO_UNLINK;
576 dummy.user_ptr = rqbd;
577 dummy.eventq = service->srv_eq_h;
579 atomic_inc(&service->srv_nrqbds_receiving);
580 atomic_set(&rqbd->rqbd_refcount, 1); /* 1 ref for portals */
582 rc = PtlMDAttach(rqbd->rqbd_me_h, dummy, PTL_UNLINK, &md_h);
584 CERROR("PtlMDAttach failed: %d\n", rc);
586 #warning proper cleanup required
587 PtlMEUnlink (rqbd->rqbd_me_h);
588 atomic_set(&rqbd->rqbd_refcount, 0);
589 atomic_dec(&service->srv_nrqbds_receiving);