1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2002 Cluster File Systems, Inc.
6 * This file is part of Lustre, http://www.lustre.org.
8 * Lustre is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Lustre is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Lustre; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 #include <linux/config.h>
26 #include <linux/module.h>
27 #include <linux/kernel.h>
29 #define DEBUG_SUBSYSTEM S_RPC
31 #include <linux/obd_support.h>
32 #include <linux/lustre_net.h>
34 static ptl_handle_eq_t sent_pkt_eq, rcvd_rep_eq,
35 bulk_source_eq, bulk_sink_eq;
38 struct ptlrpc_request *ptlrpc_prep_req(struct ptlrpc_client *cl,
39 int opcode, int namelen, char *name,
40 int tgtlen, char *tgt)
42 struct ptlrpc_request *request;
46 OBD_ALLOC(request, sizeof(*request));
48 CERROR("request allocation out of memory\n");
52 memset(request, 0, sizeof(*request));
53 request->rq_xid = cl->cli_xid++;
55 rc = cl->cli_req_pack(name, namelen, tgt, tgtlen,
56 &request->rq_reqhdr, &request->rq_req,
57 &request->rq_reqlen, &request->rq_reqbuf);
59 CERROR("cannot pack request %d\n", rc);
62 request->rq_reqhdr->opc = opcode;
63 request->rq_reqhdr->seqno = request->rq_xid;
69 void ptlrpc_free_req(struct ptlrpc_request *request)
71 OBD_FREE(request, sizeof(*request));
74 /* Abort this request and cleanup any resources associated with it. */
75 int ptl_abort_rpc(struct ptlrpc_request *request)
77 /* First remove the MD for the reply; in theory, this means
78 * that we can tear down the buffer safely. */
79 PtlMEUnlink(request->rq_reply_me_h);
80 PtlMDUnlink(request->rq_reply_md_h);
82 if (request->rq_bulklen != 0) {
83 PtlMEUnlink(request->rq_bulk_me_h);
84 PtlMDUnlink(request->rq_bulk_md_h);
90 int ptlrpc_queue_wait(struct ptlrpc_request *req, struct ptlrpc_client *cl)
93 DECLARE_WAITQUEUE(wait, current);
95 init_waitqueue_head(&req->rq_wait_for_rep);
97 if (cl->cli_enqueue) {
100 rc = cl->cli_enqueue(req);
102 /* Remote delivery via portals. */
103 req->rq_req_portal = cl->cli_request_portal;
104 req->rq_reply_portal = cl->cli_reply_portal;
105 rc = ptl_send_rpc(req, &cl->cli_server);
108 CERROR("error %d, opcode %d\n", rc,
109 req->rq_reqhdr->opc);
113 CDEBUG(0, "-- sleeping\n");
114 add_wait_queue(&req->rq_wait_for_rep, &wait);
115 while (req->rq_repbuf == NULL) {
116 set_current_state(TASK_INTERRUPTIBLE);
118 /* if this process really wants to die, let it go */
119 if (sigismember(&(current->pending.signal), SIGKILL) ||
120 sigismember(&(current->pending.signal), SIGINT))
125 remove_wait_queue(&req->rq_wait_for_rep, &wait);
126 set_current_state(TASK_RUNNING);
127 CDEBUG(0, "-- done\n");
129 if (req->rq_repbuf == NULL) {
130 /* We broke out because of a signal. Clean up the dangling
137 rc = cl->cli_rep_unpack(req->rq_repbuf, req->rq_replen, &req->rq_rephdr,
140 CERROR("unpack_rep failed: %d\n", rc);
143 CERROR("got rep %lld\n", req->rq_rephdr->seqno);
144 if ( req->rq_rephdr->status == 0 )
145 CDEBUG(0, "--> buf %p len %d status %d\n",
146 req->rq_repbuf, req->rq_replen,
147 req->rq_rephdr->status);
153 * Free the packet when it has gone out
155 static int sent_packet_callback(ptl_event_t *ev, void *data)
159 if (ev->type == PTL_EVENT_SENT) {
160 OBD_FREE(ev->mem_desc.start, ev->mem_desc.length);
162 // XXX make sure we understand all events, including ACK's
163 CERROR("Unknown event %d\n", ev->type);
172 * Wake up the thread waiting for the reply once it comes in.
174 static int rcvd_reply_callback(ptl_event_t *ev, void *data)
176 struct ptlrpc_request *rpc = ev->mem_desc.user_ptr;
179 if (ev->type == PTL_EVENT_PUT) {
180 rpc->rq_repbuf = ev->mem_desc.start + ev->offset;
182 wake_up_interruptible(&rpc->rq_wait_for_rep);
184 // XXX make sure we understand all events, including ACK's
185 CERROR("Unknown event %d\n", ev->type);
193 static int server_request_callback(ptl_event_t *ev, void *data)
195 struct ptlrpc_service *service = data;
198 if (ev->rlength != ev->mlength)
199 CERROR("Warning: Possibly truncated rpc (%d/%d)\n",
200 ev->mlength, ev->rlength);
202 /* The ME is unlinked when there is less than 1024 bytes free
203 * on its MD. This ensures we are always able to handle the rpc,
204 * although the 1024 value is a guess as to the size of a
205 * large rpc (the known safe margin should be determined).
207 * NOTE: The portals API by default unlinks all MD's associated
208 * with an ME when it's unlinked. For now, this behavior
209 * has been commented out of the portals library so the
210 * MD can be unlinked when its ref count drops to zero.
211 * A new MD and ME will then be created that use the same
212 * kmalloc()'ed memory and inserted at the ring tail.
215 service->srv_ref_count[service->srv_md_active]++;
217 if (ev->offset >= (service->srv_buf_size - 1024)) {
218 CDEBUG(D_INODE, "Unlinking ME %d\n", service->srv_me_active);
220 rc = PtlMEUnlink(service->srv_me_h[service->srv_me_active]);
221 service->srv_me_h[service->srv_me_active] = 0;
224 CERROR("PtlMEUnlink failed - DROPPING soon: %d\n", rc);
228 service->srv_me_active = NEXT_INDEX(service->srv_me_active,
229 service->srv_ring_length);
231 if (service->srv_me_h[service->srv_me_active] == 0)
232 CERROR("All %d ring ME's are unlinked!\n",
233 service->srv_ring_length);
236 if (ev->type == PTL_EVENT_PUT) {
237 wake_up(service->srv_wait_queue);
239 CERROR("Unexpected event type: %d\n", ev->type);
245 static int bulk_source_callback(ptl_event_t *ev, void *data)
247 struct ptlrpc_request *rpc = ev->mem_desc.user_ptr;
251 if (ev->type == PTL_EVENT_SENT) {
252 CDEBUG(D_NET, "got SENT event\n");
253 } else if (ev->type == PTL_EVENT_ACK) {
254 CDEBUG(D_NET, "got ACK event\n");
255 rpc->rq_bulkbuf = NULL;
256 wake_up_interruptible(&rpc->rq_wait_for_bulk);
258 CERROR("Unexpected event type!\n");
266 static int bulk_sink_callback(ptl_event_t *ev, void *data)
268 struct ptlrpc_request *rpc = ev->mem_desc.user_ptr;
272 if (ev->type == PTL_EVENT_PUT) {
273 if (rpc->rq_bulkbuf != ev->mem_desc.start + ev->offset)
274 CERROR("bulkbuf != mem_desc -- why?\n");
275 //wake_up_interruptible(&rpc->rq_wait_for_bulk);
277 CERROR("Unexpected event type!\n");
285 int ptl_send_buf(struct ptlrpc_request *request, struct lustre_peer *peer,
289 ptl_process_id_t remote_id;
290 ptl_handle_md_t md_h;
293 switch (request->rq_type) {
295 request->rq_req_md.start = request->rq_bulkbuf;
296 request->rq_req_md.length = request->rq_bulklen;
297 request->rq_req_md.eventq = bulk_source_eq;
298 request->rq_req_md.threshold = 2; /* SENT and ACK events */
302 request->rq_req_md.start = request->rq_reqbuf;
303 request->rq_req_md.length = request->rq_reqlen;
304 request->rq_req_md.eventq = sent_pkt_eq;
305 request->rq_req_md.threshold = 1;
309 request->rq_req_md.start = request->rq_repbuf;
310 request->rq_req_md.length = request->rq_replen;
311 request->rq_req_md.eventq = sent_pkt_eq;
312 request->rq_req_md.threshold = 1;
318 request->rq_req_md.options = PTL_MD_OP_PUT;
319 request->rq_req_md.user_ptr = request;
321 rc = PtlMDBind(peer->peer_ni, request->rq_req_md, &md_h);
324 CERROR("PtlMDBind failed: %d\n", rc);
328 remote_id.addr_kind = PTL_ADDR_NID;
329 remote_id.nid = peer->peer_nid;
332 CERROR("Sending %d bytes to portal %d, xid %d\n",
333 request->rq_req_md.length, portal, request->rq_xid);
335 rc = PtlPut(md_h, ack, remote_id, portal, 0, request->rq_xid, 0, 0);
338 CERROR("PtlPut(%d, %d, %d) failed: %d\n", remote_id.nid,
339 portal, request->rq_xid, rc);
340 /* FIXME: tear down md */
346 int ptl_send_rpc(struct ptlrpc_request *request, struct lustre_peer *peer)
348 ptl_process_id_t local_id;
354 if (request->rq_replen == 0) {
355 CERROR("request->rq_replen is 0!\n");
360 /* request->rq_repbuf is set only when the reply comes in, in
361 * client_packet_callback() */
362 OBD_ALLOC(repbuf, request->rq_replen);
368 local_id.addr_kind = PTL_ADDR_GID;
369 local_id.gid = PTL_ID_ANY;
370 local_id.rid = PTL_ID_ANY;
372 CERROR("sending req %d\n", request->rq_xid);
373 rc = PtlMEAttach(peer->peer_ni, request->rq_reply_portal, local_id,
374 request->rq_xid, 0, PTL_UNLINK,
375 &request->rq_reply_me_h);
377 CERROR("PtlMEAttach failed: %d\n", rc);
383 request->rq_type = PTLRPC_REQUEST;
384 request->rq_reply_md.start = repbuf;
385 request->rq_reply_md.length = request->rq_replen;
386 request->rq_reply_md.threshold = 1;
387 request->rq_reply_md.options = PTL_MD_OP_PUT;
388 request->rq_reply_md.user_ptr = request;
389 request->rq_reply_md.eventq = rcvd_rep_eq;
391 rc = PtlMDAttach(request->rq_reply_me_h, request->rq_reply_md,
392 PTL_UNLINK, &request->rq_reply_md_h);
394 CERROR("PtlMDAttach failed: %d\n", rc);
400 if (request->rq_bulklen != 0) {
401 rc = PtlMEAttach(peer->peer_ni, request->rq_bulk_portal,
402 local_id, request->rq_xid, 0, PTL_UNLINK,
403 &request->rq_bulk_me_h);
405 CERROR("PtlMEAttach failed: %d\n", rc);
411 request->rq_bulk_md.start = request->rq_bulkbuf;
412 request->rq_bulk_md.length = request->rq_bulklen;
413 request->rq_bulk_md.threshold = 1;
414 request->rq_bulk_md.options = PTL_MD_OP_PUT;
415 request->rq_bulk_md.user_ptr = request;
416 request->rq_bulk_md.eventq = bulk_sink_eq;
418 rc = PtlMDAttach(request->rq_bulk_me_h,
419 request->rq_bulk_md, PTL_UNLINK,
420 &request->rq_bulk_md_h);
422 CERROR("PtlMDAttach failed: %d\n", rc);
429 return ptl_send_buf(request, peer, request->rq_req_portal);
432 PtlMEUnlink(request->rq_bulk_me_h);
434 PtlMDUnlink(request->rq_reply_md_h);
436 PtlMEUnlink(request->rq_reply_me_h);
438 OBD_FREE(repbuf, request->rq_replen);
443 /* ptl_received_rpc() should be called by the sleeping process once
444 * it finishes processing an event. This ensures the ref count is
445 * decremented and that the rpc ring buffer cycles properly.
447 int ptl_received_rpc(struct ptlrpc_service *service) {
450 index = service->srv_md_active;
451 CDEBUG(D_INFO, "MD index=%d Ref Count=%d\n", index,
452 service->srv_ref_count[index]);
453 service->srv_ref_count[index]--;
455 if ((service->srv_ref_count[index] <= 0) &&
456 (service->srv_me_h[index] == 0)) {
458 /* Replace the unlinked ME and MD */
459 rc = PtlMEInsert(service->srv_me_h[service->srv_me_tail],
460 service->srv_id, 0, ~0, PTL_RETAIN,
461 PTL_INS_AFTER, &(service->srv_me_h[index]));
462 CDEBUG(D_INFO, "Inserting new ME and MD in ring, rc %d\n", rc);
463 service->srv_me_tail = index;
464 service->srv_ref_count[index] = 0;
467 CERROR("PtlMEInsert failed: %d\n", rc);
471 service->srv_md[index].start = service->srv_buf[index];
472 service->srv_md[index].length = service->srv_buf_size;
473 service->srv_md[index].threshold = PTL_MD_THRESH_INF;
474 service->srv_md[index].options = PTL_MD_OP_PUT;
475 service->srv_md[index].user_ptr = service;
476 service->srv_md[index].eventq = service->srv_eq_h;
478 rc = PtlMDAttach(service->srv_me_h[index],
479 service->srv_md[index],
480 PTL_RETAIN, &(service->srv_md_h[index]));
482 CDEBUG(D_INFO, "Attach MD in ring, rc %d\n", rc);
486 CERROR("PtlMDAttach failed: %d\n", rc);
490 service->srv_md_active =
491 NEXT_INDEX(index, service->srv_ring_length);
497 int rpc_register_service(struct ptlrpc_service *service, char *uuid)
499 struct lustre_peer peer;
502 rc = kportal_uuid_to_peer(uuid, &peer);
504 CERROR("Invalid uuid \"%s\"\n", uuid);
508 service->srv_ring_length = RPC_RING_LENGTH;
509 service->srv_me_active = 0;
510 service->srv_md_active = 0;
512 service->srv_id.addr_kind = PTL_ADDR_GID;
513 service->srv_id.gid = PTL_ID_ANY;
514 service->srv_id.rid = PTL_ID_ANY;
516 rc = PtlEQAlloc(peer.peer_ni, 128, server_request_callback,
517 service, &(service->srv_eq_h));
520 CERROR("PtlEQAlloc failed: %d\n", rc);
524 /* Attach the leading ME on which we build the ring */
525 rc = PtlMEAttach(peer.peer_ni, service->srv_portal,
526 service->srv_id, 0, ~0, PTL_RETAIN,
527 &(service->srv_me_h[0]));
530 CERROR("PtlMEAttach failed: %d\n", rc);
534 for (i = 0; i < service->srv_ring_length; i++) {
535 OBD_ALLOC(service->srv_buf[i], service->srv_buf_size);
537 if (service->srv_buf[i] == NULL) {
538 CERROR("no memory\n");
542 /* Insert additional ME's to the ring */
544 rc = PtlMEInsert(service->srv_me_h[i-1],
545 service->srv_id, 0, ~0, PTL_RETAIN,
546 PTL_INS_AFTER,&(service->srv_me_h[i]));
547 service->srv_me_tail = i;
550 CERROR("PtlMEInsert failed: %d\n", rc);
555 service->srv_ref_count[i] = 0;
556 service->srv_md[i].start = service->srv_buf[i];
557 service->srv_md[i].length = service->srv_buf_size;
558 service->srv_md[i].threshold = PTL_MD_THRESH_INF;
559 service->srv_md[i].options = PTL_MD_OP_PUT;
560 service->srv_md[i].user_ptr = service;
561 service->srv_md[i].eventq = service->srv_eq_h;
563 rc = PtlMDAttach(service->srv_me_h[i], service->srv_md[i],
564 PTL_RETAIN, &(service->srv_md_h[i]));
568 CERROR("PtlMDAttach failed: %d\n", rc);
576 int rpc_unregister_service(struct ptlrpc_service *service)
580 for (i = 0; i < service->srv_ring_length; i++) {
581 rc = PtlMDUnlink(service->srv_md_h[i]);
583 CERROR("PtlMDUnlink failed: %d\n", rc);
585 rc = PtlMEUnlink(service->srv_me_h[i]);
587 CERROR("PtlMEUnlink failed: %d\n", rc);
589 OBD_FREE(service->srv_buf[i], service->srv_buf_size);
592 rc = PtlEQFree(service->srv_eq_h);
594 CERROR("PtlEQFree failed: %d\n", rc);
599 static int req_init_portals(void)
602 const ptl_handle_ni_t *nip;
605 nip = inter_module_get_request(LUSTRE_NAL "_ni", LUSTRE_NAL);
607 CERROR("get_ni failed: is the NAL module loaded?\n");
612 rc = PtlEQAlloc(ni, 128, sent_packet_callback, NULL, &sent_pkt_eq);
614 CERROR("PtlEQAlloc failed: %d\n", rc);
616 rc = PtlEQAlloc(ni, 128, rcvd_reply_callback, NULL, &rcvd_rep_eq);
618 CERROR("PtlEQAlloc failed: %d\n", rc);
620 rc = PtlEQAlloc(ni, 128, bulk_source_callback, NULL, &bulk_source_eq);
622 CERROR("PtlEQAlloc failed: %d\n", rc);
624 rc = PtlEQAlloc(ni, 128, bulk_sink_callback, NULL, &bulk_sink_eq);
626 CERROR("PtlEQAlloc failed: %d\n", rc);
631 static int __init ptlrpc_init(void)
633 return req_init_portals();
636 static void __exit ptlrpc_exit(void)
638 PtlEQFree(sent_pkt_eq);
639 PtlEQFree(rcvd_rep_eq);
640 PtlEQFree(bulk_source_eq);
641 PtlEQFree(bulk_sink_eq);
643 inter_module_put(LUSTRE_NAL "_ni");
648 MODULE_AUTHOR("Peter J. Braam <braam@clusterfs.com>");
649 MODULE_DESCRIPTION("Lustre Request Processor v1.0");
650 MODULE_LICENSE("GPL");
652 module_init(ptlrpc_init);
653 module_exit(ptlrpc_exit);