1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2002 Cluster File Systems, Inc.
6 * This file is part of Lustre, http://www.lustre.org.
8 * Lustre is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Lustre is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Lustre; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 #include <linux/config.h>
26 #include <linux/module.h>
27 #include <linux/kernel.h>
29 #define DEBUG_SUBSYSTEM S_RPC
31 #include <linux/obd_support.h>
32 #include <linux/lustre_net.h>
34 static ptl_handle_eq_t sent_pkt_eq, rcvd_rep_eq,
35 bulk_source_eq, bulk_sink_eq;
38 struct ptlrpc_request *ptlrpc_prep_req(struct ptlrpc_client *cl,
39 int opcode, int namelen, char *name,
40 int tgtlen, char *tgt)
42 struct ptlrpc_request *request;
46 OBD_ALLOC(request, sizeof(*request));
48 CERROR("request allocation out of memory\n");
52 memset(request, 0, sizeof(*request));
53 request->rq_xid = cl->cli_xid++;
55 rc = cl->cli_req_pack(name, namelen, tgt, tgtlen,
56 &request->rq_reqhdr, &request->rq_req,
57 &request->rq_reqlen, &request->rq_reqbuf);
59 CERROR("cannot pack request %d\n", rc);
62 request->rq_reqhdr->opc = opcode;
63 request->rq_reqhdr->seqno = request->rq_xid;
69 void ptlrpc_free_req(struct ptlrpc_request *request)
71 OBD_FREE(request, sizeof(*request));
74 int ptlrpc_queue_wait(struct ptlrpc_request *req,
75 struct ptlrpc_client *cl)
78 DECLARE_WAITQUEUE(wait, current);
80 init_waitqueue_head(&req->rq_wait_for_rep);
82 if (cl->cli_enqueue) {
85 rc = cl->cli_enqueue(req);
87 /* Remote delivery via portals. */
88 req->rq_req_portal = cl->cli_request_portal;
89 req->rq_reply_portal = cl->cli_reply_portal;
90 rc = ptl_send_rpc(req, &cl->cli_server);
93 CERROR("error %d, opcode %d\n", rc,
98 CDEBUG(0, "-- sleeping\n");
99 add_wait_queue(&req->rq_wait_for_rep, &wait);
100 while (req->rq_repbuf == NULL) {
101 set_current_state(TASK_INTERRUPTIBLE);
103 /* if this process really wants to die, let it go */
104 if (sigismember(&(current->pending.signal), SIGKILL) ||
105 sigismember(&(current->pending.signal), SIGINT))
110 remove_wait_queue(&req->rq_wait_for_rep, &wait);
111 set_current_state(TASK_RUNNING);
112 CDEBUG(0, "-- done\n");
114 if (req->rq_repbuf == NULL) {
115 /* We broke out because of a signal */
120 rc = cl->cli_rep_unpack(req->rq_repbuf, req->rq_replen, &req->rq_rephdr, &req->rq_rep);
122 CERROR("unpack_rep failed: %d\n", rc);
125 CERROR("got rep %lld\n", req->rq_rephdr->seqno);
126 if ( req->rq_rephdr->status == 0 )
127 CDEBUG(0, "--> buf %p len %d status %d\n",
128 req->rq_repbuf, req->rq_replen,
129 req->rq_rephdr->status);
135 * Free the packet when it has gone out
137 static int sent_packet_callback(ptl_event_t *ev, void *data)
141 if (ev->type == PTL_EVENT_SENT) {
142 OBD_FREE(ev->mem_desc.start, ev->mem_desc.length);
144 // XXX make sure we understand all events, including ACK's
145 CERROR("Unknown event %d\n", ev->type);
154 * Wake up the thread waiting for the reply once it comes in.
156 static int rcvd_reply_callback(ptl_event_t *ev, void *data)
158 struct ptlrpc_request *rpc = ev->mem_desc.user_ptr;
161 if (ev->type == PTL_EVENT_PUT) {
162 rpc->rq_repbuf = ev->mem_desc.start + ev->offset;
163 wake_up_interruptible(&rpc->rq_wait_for_rep);
165 // XXX make sure we understand all events, including ACK's
166 CERROR("Unknown event %d\n", ev->type);
174 static int server_request_callback(ptl_event_t *ev, void *data)
176 struct ptlrpc_service *service = data;
179 if (ev->rlength != ev->mlength)
180 CERROR("Warning: Possibly truncated rpc (%d/%d)\n",
181 ev->mlength, ev->rlength);
183 /* The ME is unlinked when there is less than 1024 bytes free
184 * on its MD. This ensures we are always able to handle the rpc,
185 * although the 1024 value is a guess as to the size of a
186 * large rpc (the known safe margin should be determined).
188 * NOTE: The portals API by default unlinks all MD's associated
189 * with an ME when it's unlinked. For now, this behavior
190 * has been commented out of the portals library so the
191 * MD can be unlinked when its ref count drops to zero.
192 * A new MD and ME will then be created that use the same
193 * kmalloc()'ed memory and inserted at the ring tail.
196 service->srv_ref_count[service->srv_md_active]++;
198 if (ev->offset >= (service->srv_buf_size - 1024)) {
199 CDEBUG(D_INODE, "Unlinking ME %d\n", service->srv_me_active);
201 rc = PtlMEUnlink(service->srv_me_h[service->srv_me_active]);
202 service->srv_me_h[service->srv_me_active] = 0;
205 CERROR("PtlMEUnlink failed - DROPPING soon: %d\n", rc);
209 service->srv_me_active = NEXT_INDEX(service->srv_me_active,
210 service->srv_ring_length);
212 if (service->srv_me_h[service->srv_me_active] == 0)
213 CERROR("All %d ring ME's are unlinked!\n",
214 service->srv_ring_length);
217 if (ev->type == PTL_EVENT_PUT) {
218 wake_up(service->srv_wait_queue);
220 CERROR("Unexpected event type: %d\n", ev->type);
226 static int bulk_source_callback(ptl_event_t *ev, void *data)
228 struct ptlrpc_request *rpc = ev->mem_desc.user_ptr;
232 if (ev->type == PTL_EVENT_SENT) {
233 CDEBUG(D_NET, "got SENT event\n");
234 } else if (ev->type == PTL_EVENT_ACK) {
235 CDEBUG(D_NET, "got ACK event\n");
236 wake_up_interruptible(&rpc->rq_wait_for_bulk);
238 CERROR("Unexpected event type!\n");
246 static int bulk_sink_callback(ptl_event_t *ev, void *data)
248 struct ptlrpc_request *rpc = ev->mem_desc.user_ptr;
252 if (ev->type == PTL_EVENT_PUT) {
253 if (rpc->rq_bulkbuf != ev->mem_desc.start + ev->offset)
254 CERROR("bulkbuf != mem_desc -- why?\n");
255 wake_up_interruptible(&rpc->rq_wait_for_bulk);
257 CERROR("Unexpected event type!\n");
265 int ptl_send_buf(struct ptlrpc_request *request, struct lustre_peer *peer,
266 int portal, int is_request)
269 ptl_process_id_t remote_id;
270 ptl_handle_md_t md_h;
272 /* FIXME: This is bad. */
273 if (request->rq_bulklen) {
274 request->rq_req_md.start = request->rq_bulkbuf;
275 request->rq_req_md.length = request->rq_bulklen;
276 request->rq_req_md.eventq = bulk_source_eq;
277 } else if (is_request) {
278 request->rq_req_md.start = request->rq_reqbuf;
279 request->rq_req_md.length = request->rq_reqlen;
280 request->rq_req_md.eventq = sent_pkt_eq;
282 request->rq_req_md.start = request->rq_repbuf;
283 request->rq_req_md.length = request->rq_replen;
284 request->rq_req_md.eventq = sent_pkt_eq;
286 request->rq_req_md.threshold = 1;
287 request->rq_req_md.options = PTL_MD_OP_PUT;
288 request->rq_req_md.user_ptr = request;
290 rc = PtlMDBind(peer->peer_ni, request->rq_req_md, &md_h);
293 CERROR("PtlMDBind failed: %d\n", rc);
297 remote_id.addr_kind = PTL_ADDR_NID;
298 remote_id.nid = peer->peer_nid;
301 if (request->rq_bulklen) {
302 rc = PtlPut(md_h, PTL_ACK_REQ, remote_id, portal, 0,
303 request->rq_xid, 0, 0);
305 rc = PtlPut(md_h, PTL_NOACK_REQ, remote_id, portal, 0,
306 request->rq_xid, 0, 0);
310 CERROR("PtlPut(%d, %d, %d) failed: %d\n", remote_id.nid,
311 portal, request->rq_xid, rc);
312 /* FIXME: tear down md */
318 int ptl_send_rpc(struct ptlrpc_request *request, struct lustre_peer *peer)
320 ptl_handle_me_t me_h, bulk_me_h;
321 ptl_process_id_t local_id;
327 if (request->rq_replen == 0) {
328 CERROR("request->rq_replen is 0!\n");
333 /* request->rq_repbuf is set only when the reply comes in, in
334 * client_packet_callback() */
335 OBD_ALLOC(repbuf, request->rq_replen);
341 local_id.addr_kind = PTL_ADDR_GID;
342 local_id.gid = PTL_ID_ANY;
343 local_id.rid = PTL_ID_ANY;
345 CERROR("sending req %d\n", request->rq_xid);
346 rc = PtlMEAttach(peer->peer_ni, request->rq_reply_portal, local_id,
347 request->rq_xid, 0, PTL_UNLINK, &me_h);
349 CERROR("PtlMEAttach failed: %d\n", rc);
355 request->rq_reply_md.start = repbuf;
356 request->rq_reply_md.length = request->rq_replen;
357 request->rq_reply_md.threshold = 1;
358 request->rq_reply_md.options = PTL_MD_OP_PUT;
359 request->rq_reply_md.user_ptr = request;
360 request->rq_reply_md.eventq = rcvd_rep_eq;
362 rc = PtlMDAttach(me_h, request->rq_reply_md, PTL_UNLINK,
363 &request->rq_reply_md_h);
365 CERROR("PtlMDAttach failed: %d\n", rc);
371 if (request->rq_bulklen != 0) {
372 rc = PtlMEAttach(peer->peer_ni, request->rq_bulk_portal,
373 local_id, request->rq_xid, 0, PTL_UNLINK,
376 CERROR("PtlMEAttach failed: %d\n", rc);
382 request->rq_bulk_md.start = request->rq_bulkbuf;
383 request->rq_bulk_md.length = request->rq_bulklen;
384 request->rq_bulk_md.threshold = 1;
385 request->rq_bulk_md.options = PTL_MD_OP_PUT;
386 request->rq_bulk_md.user_ptr = request;
387 request->rq_bulk_md.eventq = bulk_sink_eq;
389 rc = PtlMDAttach(bulk_me_h, request->rq_bulk_md, PTL_UNLINK,
390 &request->rq_bulk_md_h);
392 CERROR("PtlMDAttach failed: %d\n", rc);
399 return ptl_send_buf(request, peer, request->rq_req_portal, 1);
402 PtlMEUnlink(bulk_me_h);
404 PtlMDUnlink(request->rq_reply_md_h);
408 OBD_FREE(repbuf, request->rq_replen);
413 /* ptl_received_rpc() should be called by the sleeping process once
414 * it finishes processing an event. This ensures the ref count is
415 * decremented and that the rpc ring buffer cycles properly.
417 int ptl_received_rpc(struct ptlrpc_service *service) {
420 index = service->srv_md_active;
421 CDEBUG(D_INFO, "MD index=%d Ref Count=%d\n", index,
422 service->srv_ref_count[index]);
423 service->srv_ref_count[index]--;
425 if ((service->srv_ref_count[index] <= 0) &&
426 (service->srv_me_h[index] == 0)) {
428 /* Replace the unlinked ME and MD */
429 rc = PtlMEInsert(service->srv_me_h[service->srv_me_tail],
430 service->srv_id, 0, ~0, PTL_RETAIN,
431 PTL_INS_AFTER, &(service->srv_me_h[index]));
432 CDEBUG(D_INFO, "Inserting new ME and MD in ring, rc %d\n", rc);
433 service->srv_me_tail = index;
434 service->srv_ref_count[index] = 0;
437 CERROR("PtlMEInsert failed: %d\n", rc);
441 service->srv_md[index].start = service->srv_buf[index];
442 service->srv_md[index].length = service->srv_buf_size;
443 service->srv_md[index].threshold = PTL_MD_THRESH_INF;
444 service->srv_md[index].options = PTL_MD_OP_PUT;
445 service->srv_md[index].user_ptr = service;
446 service->srv_md[index].eventq = service->srv_eq_h;
448 rc = PtlMDAttach(service->srv_me_h[index],
449 service->srv_md[index],
450 PTL_RETAIN, &(service->srv_md_h[index]));
452 CDEBUG(D_INFO, "Attach MD in ring, rc %d\n", rc);
456 CERROR("PtlMDAttach failed: %d\n", rc);
460 service->srv_md_active =
461 NEXT_INDEX(index, service->srv_ring_length);
467 int rpc_register_service(struct ptlrpc_service *service, char *uuid)
469 struct lustre_peer peer;
472 rc = kportal_uuid_to_peer(uuid, &peer);
474 CERROR("Invalid uuid \"%s\"\n", uuid);
478 service->srv_ring_length = RPC_RING_LENGTH;
479 service->srv_me_active = 0;
480 service->srv_md_active = 0;
482 service->srv_id.addr_kind = PTL_ADDR_GID;
483 service->srv_id.gid = PTL_ID_ANY;
484 service->srv_id.rid = PTL_ID_ANY;
486 rc = PtlEQAlloc(peer.peer_ni, 128, server_request_callback,
487 service, &(service->srv_eq_h));
490 CERROR("PtlEQAlloc failed: %d\n", rc);
494 /* Attach the leading ME on which we build the ring */
495 rc = PtlMEAttach(peer.peer_ni, service->srv_portal,
496 service->srv_id, 0, ~0, PTL_RETAIN,
497 &(service->srv_me_h[0]));
500 CERROR("PtlMEAttach failed: %d\n", rc);
504 for (i = 0; i < service->srv_ring_length; i++) {
505 OBD_ALLOC(service->srv_buf[i], service->srv_buf_size);
507 if (service->srv_buf[i] == NULL) {
508 CERROR("no memory\n");
512 /* Insert additional ME's to the ring */
514 rc = PtlMEInsert(service->srv_me_h[i-1],
515 service->srv_id, 0, ~0, PTL_RETAIN,
516 PTL_INS_AFTER,&(service->srv_me_h[i]));
517 service->srv_me_tail = i;
520 CERROR("PtlMEInsert failed: %d\n", rc);
525 service->srv_ref_count[i] = 0;
526 service->srv_md[i].start = service->srv_buf[i];
527 service->srv_md[i].length = service->srv_buf_size;
528 service->srv_md[i].threshold = PTL_MD_THRESH_INF;
529 service->srv_md[i].options = PTL_MD_OP_PUT;
530 service->srv_md[i].user_ptr = service;
531 service->srv_md[i].eventq = service->srv_eq_h;
533 rc = PtlMDAttach(service->srv_me_h[i], service->srv_md[i],
534 PTL_RETAIN, &(service->srv_md_h[i]));
538 CERROR("PtlMDAttach failed: %d\n", rc);
546 int rpc_unregister_service(struct ptlrpc_service *service)
550 for (i = 0; i < service->srv_ring_length; i++) {
551 rc = PtlMDUnlink(service->srv_md_h[i]);
553 CERROR("PtlMDUnlink failed: %d\n", rc);
555 rc = PtlMEUnlink(service->srv_me_h[i]);
557 CERROR("PtlMEUnlink failed: %d\n", rc);
559 OBD_FREE(service->srv_buf[i], service->srv_buf_size);
562 rc = PtlEQFree(service->srv_eq_h);
564 CERROR("PtlEQFree failed: %d\n", rc);
569 static int req_init_portals(void)
572 const ptl_handle_ni_t *nip;
575 nip = inter_module_get_request(LUSTRE_NAL "_ni", LUSTRE_NAL);
577 CERROR("get_ni failed: is the NAL module loaded?\n");
582 rc = PtlEQAlloc(ni, 128, sent_packet_callback, NULL, &sent_pkt_eq);
584 CERROR("PtlEQAlloc failed: %d\n", rc);
586 rc = PtlEQAlloc(ni, 128, rcvd_reply_callback, NULL, &rcvd_rep_eq);
588 CERROR("PtlEQAlloc failed: %d\n", rc);
590 rc = PtlEQAlloc(ni, 128, bulk_source_callback, NULL, &bulk_source_eq);
592 CERROR("PtlEQAlloc failed: %d\n", rc);
594 rc = PtlEQAlloc(ni, 128, bulk_sink_callback, NULL, &bulk_sink_eq);
596 CERROR("PtlEQAlloc failed: %d\n", rc);
601 static int __init ptlrpc_init(void)
603 return req_init_portals();
606 static void __exit ptlrpc_exit(void)
608 PtlEQFree(sent_pkt_eq);
609 PtlEQFree(rcvd_rep_eq);
610 PtlEQFree(bulk_source_eq);
611 PtlEQFree(bulk_sink_eq);
613 inter_module_put(LUSTRE_NAL "_ni");
618 MODULE_AUTHOR("Peter J. Braam <braam@clusterfs.com>");
619 MODULE_DESCRIPTION("Lustre Request Processor v1.0");
620 MODULE_LICENSE("GPL");
622 module_init(ptlrpc_init);
623 module_exit(ptlrpc_exit);