1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2002 Cluster File Systems, Inc.
6 * This file is part of Lustre, http://www.lustre.org.
8 * Lustre is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Lustre is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Lustre; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #define DEBUG_SUBSYSTEM S_RPC
25 #include <linux/obd_support.h>
26 #include <linux/lustre_net.h>
27 #include <linux/lustre_lib.h>
29 extern ptl_handle_eq_t request_out_eq, reply_in_eq, reply_out_eq,
30 bulk_source_eq, bulk_sink_eq;
31 static ptl_process_id_t local_id = {PTL_NID_ANY, PTL_PID_ANY};
33 int ptlrpc_check_bulk_sent(struct ptlrpc_bulk_desc *desc)
37 if (desc->b_flags & PTL_BULK_FL_SENT)
40 if (l_killable_pending(current)) {
41 desc->b_flags |= PTL_RPC_FL_INTR;
45 CDEBUG(D_NET, "no event yet\n");
49 int ptlrpc_check_bulk_received(struct ptlrpc_bulk_desc *desc)
53 if (desc->b_flags & PTL_BULK_FL_RCVD)
56 if (l_killable_pending(current)) {
57 desc->b_flags |= PTL_RPC_FL_INTR;
61 CDEBUG(D_NET, "no event yet\n");
65 static int ptl_send_buf(struct ptlrpc_request *request,
66 struct ptlrpc_connection *conn, int portal)
69 ptl_process_id_t remote_id;
73 request->rq_req_md.user_ptr = request;
75 switch (request->rq_type) {
76 case PTL_RPC_TYPE_REQUEST:
77 request->rq_req_md.start = request->rq_reqmsg;
78 request->rq_req_md.length = request->rq_reqlen;
79 request->rq_req_md.eventq = request_out_eq;
80 request->rq_req_md.threshold = 1;
83 case PTL_RPC_TYPE_REPLY:
84 request->rq_req_md.start = request->rq_repmsg;
85 request->rq_req_md.length = request->rq_replen;
86 request->rq_req_md.eventq = reply_out_eq;
87 request->rq_req_md.threshold = 1;
92 return -1; /* notreached */
94 request->rq_req_md.options = PTL_MD_OP_PUT;
95 request->rq_req_md.user_ptr = request;
97 rc = PtlMDBind(conn->c_peer.peer_ni, request->rq_req_md, &md_h);
98 //CERROR("MDBind (outgoing req/rep/bulk): %Lu\n", (__u64)md_h);
100 CERROR("PtlMDBind failed: %d\n", rc);
105 remote_id.nid = conn->c_peer.peer_nid;
108 CDEBUG(D_NET, "Sending %d bytes to portal %d, xid %Ld\n",
109 request->rq_req_md.length, portal, request->rq_xid);
111 rc = PtlPut(md_h, ack, remote_id, portal, 0, request->rq_xid,
114 CERROR("PtlPut(%Lu, %d, %Ld) failed: %d\n", remote_id.nid,
115 portal, request->rq_xid, rc);
122 static inline struct iovec *
123 ptlrpc_get_bulk_iov (struct ptlrpc_bulk_desc *desc)
127 if (desc->b_page_count <= sizeof (desc->b_iov)/sizeof (struct iovec))
128 return (desc->b_iov);
130 OBD_ALLOC (iov, desc->b_page_count * sizeof (struct iovec));
138 ptlrpc_put_bulk_iov (struct ptlrpc_bulk_desc *desc, struct iovec *iov)
140 if (desc->b_page_count <= sizeof (desc->b_iov)/sizeof (struct iovec))
143 OBD_FREE (iov, desc->b_page_count * sizeof (struct iovec));
146 int ptlrpc_send_bulk(struct ptlrpc_bulk_desc *desc)
149 struct list_head *tmp, *next;
150 ptl_process_id_t remote_id;
155 iov = ptlrpc_get_bulk_iov (desc);
159 desc->b_md.start = iov;
161 desc->b_md.length = 0;
162 desc->b_md.eventq = bulk_source_eq;
163 desc->b_md.threshold = 2; /* SENT and ACK */
164 desc->b_md.options = PTL_MD_OP_PUT | PTL_MD_IOV;
165 desc->b_md.user_ptr = desc;
167 list_for_each_safe(tmp, next, &desc->b_page_list) {
168 struct ptlrpc_bulk_page *bulk;
169 bulk = list_entry(tmp, struct ptlrpc_bulk_page, b_link);
171 LASSERT (desc->b_md.niov < desc->b_page_count);
173 if (desc->b_md.niov == 0)
175 LASSERT (xid == bulk->b_xid); /* should all be the same */
177 iov[desc->b_md.niov].iov_base = bulk->b_buf;
178 iov[desc->b_md.niov].iov_len = bulk->b_buflen;
180 desc->b_md.length += bulk->b_buflen;
183 LASSERT (desc->b_md.niov == desc->b_page_count);
184 LASSERT (desc->b_md.niov != 0);
186 rc = PtlMDBind(desc->b_connection->c_peer.peer_ni, desc->b_md,
189 ptlrpc_put_bulk_iov (desc, iov); /* move down to reduce latency to send */
192 CERROR("PtlMDBind failed: %d\n", rc);
197 remote_id.nid = desc->b_connection->c_peer.peer_nid;
200 CDEBUG(D_NET, "Sending %u pages %u bytes to portal %d nid %Lx pid %d xid %d\n",
201 desc->b_md.niov, desc->b_md.length,
202 desc->b_portal, remote_id.nid, remote_id.pid, xid);
204 rc = PtlPut(desc->b_md_h, PTL_ACK_REQ, remote_id,
205 desc->b_portal, 0, xid, 0, 0);
207 CERROR("PtlPut(%Lu, %d, %d) failed: %d\n",
208 remote_id.nid, desc->b_portal, xid, rc);
209 PtlMDUnlink(desc->b_md_h);
217 int ptlrpc_register_bulk(struct ptlrpc_bulk_desc *desc)
219 struct list_head *tmp, *next;
225 iov = ptlrpc_get_bulk_iov (desc);
229 desc->b_md.start = iov;
231 desc->b_md.length = 0;
232 desc->b_md.threshold = 1;
233 desc->b_md.options = PTL_MD_OP_PUT | PTL_MD_IOV;
234 desc->b_md.user_ptr = desc;
235 desc->b_md.eventq = bulk_sink_eq;
237 list_for_each_safe(tmp, next, &desc->b_page_list) {
238 struct ptlrpc_bulk_page *bulk;
239 bulk = list_entry(tmp, struct ptlrpc_bulk_page, b_link);
241 LASSERT (desc->b_md.niov < desc->b_page_count);
243 if (desc->b_md.niov == 0)
245 LASSERT (xid == bulk->b_xid); /* should all be the same */
247 iov[desc->b_md.niov].iov_base = bulk->b_buf;
248 iov[desc->b_md.niov].iov_len = bulk->b_buflen;
250 desc->b_md.length += bulk->b_buflen;
253 LASSERT (desc->b_md.niov == desc->b_page_count);
254 LASSERT (desc->b_md.niov != 0);
256 rc = PtlMEAttach(desc->b_connection->c_peer.peer_ni,
257 desc->b_portal, local_id, xid, 0,
258 PTL_UNLINK, PTL_INS_AFTER, &desc->b_me_h);
260 ptlrpc_put_bulk_iov (desc, iov);
263 CERROR("PtlMEAttach failed: %d\n", rc);
268 rc = PtlMDAttach(desc->b_me_h, desc->b_md, PTL_UNLINK,
271 CERROR("PtlMDAttach failed: %d\n", rc);
276 CDEBUG(D_NET, "Setup bulk sink buffers: %u pages %u bytes, xid %u, "
277 "portal %u\n", desc->b_md.niov, desc->b_md.length,
278 xid, desc->b_portal);
283 ptlrpc_abort_bulk(desc);
288 int ptlrpc_abort_bulk(struct ptlrpc_bulk_desc *desc)
290 /* This should be safe: these handles are initialized to be
291 * invalid in ptlrpc_prep_bulk() */
292 PtlMDUnlink(desc->b_md_h);
293 PtlMEUnlink(desc->b_me_h);
298 int ptlrpc_reply(struct ptlrpc_service *svc, struct ptlrpc_request *req)
300 if (req->rq_repmsg == NULL) {
301 CERROR("bad: someone called ptlrpc_reply when they meant "
306 /* FIXME: we need to increment the count of handled events */
307 req->rq_type = PTL_RPC_TYPE_REPLY;
308 //req->rq_repmsg->conn = req->rq_connection->c_remote_conn;
309 //req->rq_repmsg->token = req->rq_connection->c_remote_token;
310 req->rq_repmsg->status = HTON__u32(req->rq_status);
311 req->rq_reqmsg->type = HTON__u32(req->rq_type);
312 return ptl_send_buf(req, req->rq_connection, svc->srv_rep_portal);
315 int ptlrpc_error(struct ptlrpc_service *svc, struct ptlrpc_request *req)
320 if (req->rq_repmsg) {
321 CERROR("req already has repmsg\n");
325 rc = lustre_pack_msg(0, NULL, NULL, &req->rq_replen, &req->rq_repmsg);
329 req->rq_repmsg->type = HTON__u32(PTL_RPC_MSG_ERR);
331 rc = ptlrpc_reply(svc, req);
336 int ptl_send_rpc(struct ptlrpc_request *request)
343 if (NTOH__u32(request->rq_reqmsg->type) != PTL_RPC_MSG_REQUEST) {
344 CERROR("wrong packet type sent %d\n",
345 NTOH__u32(request->rq_reqmsg->type));
349 if (request->rq_replen == 0) {
350 CERROR("request->rq_replen is 0!\n");
354 /* request->rq_repmsg is set only when the reply comes in, in
355 * client_packet_callback() */
356 if (request->rq_reply_md.start)
357 OBD_FREE(request->rq_reply_md.start, request->rq_replen);
359 OBD_ALLOC(repbuf, request->rq_replen);
365 down(&request->rq_client->cli_rpc_sem);
367 rc = PtlMEAttach(request->rq_connection->c_peer.peer_ni,
368 request->rq_client->cli_reply_portal,
369 local_id, request->rq_xid, 0, PTL_UNLINK,
370 PTL_INS_AFTER, &request->rq_reply_me_h);
372 CERROR("PtlMEAttach failed: %d\n", rc);
377 request->rq_type = PTL_RPC_TYPE_REQUEST;
378 request->rq_reply_md.start = repbuf;
379 request->rq_reply_md.length = request->rq_replen;
380 request->rq_reply_md.threshold = 1;
381 request->rq_reply_md.options = PTL_MD_OP_PUT;
382 request->rq_reply_md.user_ptr = request;
383 request->rq_reply_md.eventq = reply_in_eq;
385 rc = PtlMDAttach(request->rq_reply_me_h, request->rq_reply_md,
386 PTL_UNLINK, &request->rq_reply_md_h);
388 CERROR("PtlMDAttach failed: %d\n", rc);
393 CDEBUG(D_NET, "Setup reply buffer: %u bytes, xid %Lu, portal %u\n",
394 request->rq_replen, request->rq_xid,
395 request->rq_client->cli_reply_portal);
397 rc = ptl_send_buf(request, request->rq_connection,
398 request->rq_client->cli_request_portal);
402 PtlMEUnlink(request->rq_reply_me_h);
404 OBD_FREE(repbuf, request->rq_replen);
405 up(&request->rq_client->cli_rpc_sem);
410 void ptlrpc_link_svc_me(struct ptlrpc_service *service, int i)
414 ptl_handle_md_t md_h;
416 /* Attach the leading ME on which we build the ring */
417 rc = PtlMEAttach(service->srv_self.peer_ni, service->srv_req_portal,
418 local_id, 0, ~0, PTL_RETAIN, PTL_INS_BEFORE,
419 &(service->srv_me_h[i]));
421 CERROR("PtlMEAttach failed: %d\n", rc);
425 if (service->srv_ref_count[i])
428 dummy.start = service->srv_buf[i];
429 dummy.length = service->srv_buf_size;
430 dummy.max_offset = service->srv_buf_size;
431 dummy.threshold = PTL_MD_THRESH_INF;
432 dummy.options = PTL_MD_OP_PUT | PTL_MD_AUTO_UNLINK;
433 dummy.user_ptr = service;
434 dummy.eventq = service->srv_eq_h;
435 dummy.max_offset = service->srv_buf_size;
437 rc = PtlMDAttach(service->srv_me_h[i], dummy, PTL_UNLINK, &md_h);
440 CERROR("PtlMDAttach failed: %d\n", rc);
445 /* ptl_handled_rpc() should be called by the sleeping process once
446 * it finishes processing an event. This ensures the ref count is
447 * decremented and that the rpc ring buffer cycles properly.
449 int ptl_handled_rpc(struct ptlrpc_service *service, void *start)
453 spin_lock(&service->srv_lock);
454 for (index = 0; index < service->srv_ring_length; index++)
455 if (service->srv_buf[index] == start)
458 if (index == service->srv_ring_length)
461 CDEBUG(D_INFO, "MD index=%d Ref Count=%d\n", index,
462 service->srv_ref_count[index]);
463 service->srv_ref_count[index]--;
465 if (service->srv_ref_count[index] < 0)
468 if (service->srv_ref_count[index] == 0 &&
469 !ptl_is_valid_handle(&(service->srv_me_h[index]))) {
470 CDEBUG(D_NET, "relinking %d\n", index);
471 ptlrpc_link_svc_me(service, index);
474 spin_unlock(&service->srv_lock);