Whamcloud - gitweb
6a7b7c9308b97f4af36a91bb98552a3ae1f5fa36
[fs/lustre-release.git] / lustre / ptlrpc / rpc.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (C) 2002 Cluster File Systems, Inc.
5  *
6  *   This file is part of Lustre, http://www.lustre.org.
7  *
8  *   Lustre is free software; you can redistribute it and/or
9  *   modify it under the terms of version 2 of the GNU General Public
10  *   License as published by the Free Software Foundation.
11  *
12  *   Lustre is distributed in the hope that it will be useful,
13  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *   GNU General Public License for more details.
16  *
17  *   You should have received a copy of the GNU General Public License
18  *   along with Lustre; if not, write to the Free Software
19  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  *
21  */
22
23 #define EXPORT_SYMTAB
24
25 #include <linux/config.h>
26 #include <linux/module.h>
27 #include <linux/kernel.h>
28
29 #define DEBUG_SUBSYSTEM S_RPC
30
31 #include <linux/obd_support.h>
32 #include <linux/lustre_net.h>
33
34 static ptl_handle_eq_t sent_pkt_eq, rcvd_rep_eq, 
35         bulk_source_eq, bulk_sink_eq;
36
37
38 struct ptlrpc_request *ptlrpc_prep_req(struct ptlrpc_client *cl, 
39                                        int opcode, int namelen, char *name,
40                                        int tgtlen, char *tgt)
41 {
42         struct ptlrpc_request *request;
43         int rc;
44         ENTRY; 
45
46         OBD_ALLOC(request, sizeof(*request));
47         if (!request) { 
48                 CERROR("request allocation out of memory\n");
49                 return NULL;
50         }
51
52         memset(request, 0, sizeof(*request));
53         request->rq_xid = cl->cli_xid++;
54
55         rc = cl->cli_req_pack(name, namelen, tgt, tgtlen,
56                           &request->rq_reqhdr, &request->rq_req,
57                           &request->rq_reqlen, &request->rq_reqbuf);
58         if (rc) { 
59                 CERROR("cannot pack request %d\n", rc); 
60                 return NULL;
61         }
62         request->rq_reqhdr->opc = opcode;
63         request->rq_reqhdr->seqno = request->rq_xid;
64
65         EXIT;
66         return request;
67 }
68
69 void ptlrpc_free_req(struct ptlrpc_request *request)
70 {
71         OBD_FREE(request, sizeof(*request));
72 }
73
74 int ptlrpc_queue_wait(struct ptlrpc_request *req, 
75                              struct ptlrpc_client *cl)
76 {
77         int rc;
78         DECLARE_WAITQUEUE(wait, current);
79
80         init_waitqueue_head(&req->rq_wait_for_rep);
81
82         if (cl->cli_enqueue) {
83                 /* Local delivery */
84                 ENTRY;
85                 rc = cl->cli_enqueue(req); 
86         } else {
87                 /* Remote delivery via portals. */
88                 req->rq_req_portal = cl->cli_request_portal;
89                 req->rq_reply_portal = cl->cli_reply_portal;
90                 rc = ptl_send_rpc(req, &cl->cli_server);
91         }
92         if (rc) { 
93                 CERROR("error %d, opcode %d\n", rc, 
94                        req->rq_reqhdr->opc); 
95                 return -rc;
96         }
97
98         CDEBUG(0, "-- sleeping\n");
99         add_wait_queue(&req->rq_wait_for_rep, &wait);
100         while (req->rq_repbuf == NULL) {
101                 set_current_state(TASK_INTERRUPTIBLE);
102
103                 /* if this process really wants to die, let it go */
104                 if (sigismember(&(current->pending.signal), SIGKILL) ||
105                     sigismember(&(current->pending.signal), SIGINT))
106                         break;
107
108                 schedule();
109         }
110         remove_wait_queue(&req->rq_wait_for_rep, &wait);
111         set_current_state(TASK_RUNNING);
112         CDEBUG(0, "-- done\n");
113
114         if (req->rq_repbuf == NULL) {
115                 /* We broke out because of a signal */
116                 EXIT;
117                 return -EINTR;
118         }
119
120         rc = cl->cli_rep_unpack(req->rq_repbuf, req->rq_replen, &req->rq_rephdr, &req->rq_rep);
121         if (rc) {
122                 CERROR("unpack_rep failed: %d\n", rc);
123                 return rc;
124         }
125         CERROR("got rep %lld\n", req->rq_rephdr->seqno);
126         if ( req->rq_rephdr->status == 0 )
127                 CDEBUG(0, "--> buf %p len %d status %d\n",
128                        req->rq_repbuf, req->rq_replen, 
129                        req->rq_rephdr->status); 
130
131         EXIT;
132         return 0;
133 }
134 /*
135  *  Free the packet when it has gone out
136  */
137 static int sent_packet_callback(ptl_event_t *ev, void *data)
138 {
139         ENTRY;
140
141         if (ev->type == PTL_EVENT_SENT) {
142                 OBD_FREE(ev->mem_desc.start, ev->mem_desc.length);
143         } else { 
144                 // XXX make sure we understand all events, including ACK's
145                 CERROR("Unknown event %d\n", ev->type); 
146                 BUG();
147         }
148
149         EXIT;
150         return 1;
151 }
152
153 /*
154  * Wake up the thread waiting for the reply once it comes in.
155  */
156 static int rcvd_reply_callback(ptl_event_t *ev, void *data)
157 {
158         struct ptlrpc_request *rpc = ev->mem_desc.user_ptr;
159         ENTRY;
160
161         if (ev->type == PTL_EVENT_PUT) {
162                 rpc->rq_repbuf = ev->mem_desc.start + ev->offset;
163                 barrier();
164                 wake_up_interruptible(&rpc->rq_wait_for_rep);
165         } else { 
166                 // XXX make sure we understand all events, including ACK's
167                 CERROR("Unknown event %d\n", ev->type); 
168                 BUG();
169         }
170
171         EXIT;
172         return 1;
173 }
174
175 static int server_request_callback(ptl_event_t *ev, void *data)
176 {
177         struct ptlrpc_service *service = data;
178         int rc;
179
180         if (ev->rlength != ev->mlength)
181                 CERROR("Warning: Possibly truncated rpc (%d/%d)\n",
182                        ev->mlength, ev->rlength);
183
184         /* The ME is unlinked when there is less than 1024 bytes free
185          * on its MD.  This ensures we are always able to handle the rpc, 
186          * although the 1024 value is a guess as to the size of a
187          * large rpc (the known safe margin should be determined).
188          *
189          * NOTE: The portals API by default unlinks all MD's associated
190          *       with an ME when it's unlinked.  For now, this behavior
191          *       has been commented out of the portals library so the
192          *       MD can be unlinked when its ref count drops to zero.
193          *       A new MD and ME will then be created that use the same
194          *       kmalloc()'ed memory and inserted at the ring tail.
195          */
196
197         service->srv_ref_count[service->srv_md_active]++;
198
199         if (ev->offset >= (service->srv_buf_size - 1024)) {
200                 CDEBUG(D_INODE, "Unlinking ME %d\n", service->srv_me_active);
201
202                 rc = PtlMEUnlink(service->srv_me_h[service->srv_me_active]);
203                 service->srv_me_h[service->srv_me_active] = 0;
204
205                 if (rc != PTL_OK) {
206                         CERROR("PtlMEUnlink failed - DROPPING soon: %d\n", rc);
207                         return rc;
208                 }
209
210                 service->srv_me_active = NEXT_INDEX(service->srv_me_active,
211                         service->srv_ring_length);
212
213                 if (service->srv_me_h[service->srv_me_active] == 0)
214                         CERROR("All %d ring ME's are unlinked!\n",
215                                service->srv_ring_length);
216         }
217
218         if (ev->type == PTL_EVENT_PUT) {
219                 wake_up(service->srv_wait_queue);
220         } else {
221                 CERROR("Unexpected event type: %d\n", ev->type);
222         }
223
224         return 0;
225 }
226
227 static int bulk_source_callback(ptl_event_t *ev, void *data)
228 {
229         struct ptlrpc_request *rpc = ev->mem_desc.user_ptr;
230
231         ENTRY;
232
233         if (ev->type == PTL_EVENT_SENT) {
234                 CDEBUG(D_NET, "got SENT event\n");
235         } else if (ev->type == PTL_EVENT_ACK) {
236                 CDEBUG(D_NET, "got ACK event\n");
237                 wake_up_interruptible(&rpc->rq_wait_for_bulk);
238         } else {
239                 CERROR("Unexpected event type!\n");
240                 BUG();
241         }
242
243         EXIT;
244         return 1;
245 }
246
247 static int bulk_sink_callback(ptl_event_t *ev, void *data)
248 {
249         struct ptlrpc_request *rpc = ev->mem_desc.user_ptr;
250
251         ENTRY;
252
253         if (ev->type == PTL_EVENT_PUT) {
254                 if (rpc->rq_bulkbuf != ev->mem_desc.start + ev->offset)
255                         CERROR("bulkbuf != mem_desc -- why?\n");
256                 wake_up_interruptible(&rpc->rq_wait_for_bulk);
257         } else {
258                 CERROR("Unexpected event type!\n");
259                 BUG();
260         }
261
262         EXIT;
263         return 1;
264 }
265
266 int ptl_send_buf(struct ptlrpc_request *request, struct lustre_peer *peer,
267                  int portal, int is_request)
268 {
269         int rc;
270         ptl_process_id_t remote_id;
271         ptl_handle_md_t md_h;
272
273         /* FIXME: This is bad. */
274         if (request->rq_bulklen) {
275                 request->rq_req_md.start = request->rq_bulkbuf;
276                 request->rq_req_md.length = request->rq_bulklen;
277                 request->rq_req_md.eventq = bulk_source_eq;
278         } else if (is_request) {
279                 request->rq_req_md.start = request->rq_reqbuf;
280                 request->rq_req_md.length = request->rq_reqlen;
281                 request->rq_req_md.eventq = sent_pkt_eq;
282         } else {
283                 request->rq_req_md.start = request->rq_repbuf;
284                 request->rq_req_md.length = request->rq_replen;
285                 request->rq_req_md.eventq = sent_pkt_eq;
286         }
287         request->rq_req_md.threshold = 1;
288         request->rq_req_md.options = PTL_MD_OP_PUT;
289         request->rq_req_md.user_ptr = request;
290
291         rc = PtlMDBind(peer->peer_ni, request->rq_req_md, &md_h);
292         if (rc != 0) {
293                 BUG();
294                 CERROR("PtlMDBind failed: %d\n", rc);
295                 return rc;
296         }
297
298         remote_id.addr_kind = PTL_ADDR_NID;
299         remote_id.nid = peer->peer_nid;
300         remote_id.pid = 0;
301
302         if (request->rq_bulklen) {
303                 rc = PtlPut(md_h, PTL_ACK_REQ, remote_id, portal, 0,
304                             request->rq_xid, 0, 0);
305         } else {
306                 rc = PtlPut(md_h, PTL_NOACK_REQ, remote_id, portal, 0,
307                             request->rq_xid, 0, 0);
308         }
309         if (rc != PTL_OK) {
310                 BUG();
311                 CERROR("PtlPut(%d, %d, %d) failed: %d\n", remote_id.nid,
312                        portal, request->rq_xid, rc);
313                 /* FIXME: tear down md */
314         }
315
316         return rc;
317 }
318
319 int ptl_send_rpc(struct ptlrpc_request *request, struct lustre_peer *peer)
320 {
321         ptl_handle_me_t me_h, bulk_me_h;
322         ptl_process_id_t local_id;
323         int rc;
324         char *repbuf;
325
326         ENTRY;
327
328         if (request->rq_replen == 0) {
329                 CERROR("request->rq_replen is 0!\n");
330                 EXIT;
331                 return -EINVAL;
332         }
333
334         /* request->rq_repbuf is set only when the reply comes in, in
335          * client_packet_callback() */
336         OBD_ALLOC(repbuf, request->rq_replen);
337         if (!repbuf) { 
338                 EXIT;
339                 return -ENOMEM;
340         }
341
342         local_id.addr_kind = PTL_ADDR_GID;
343         local_id.gid = PTL_ID_ANY;
344         local_id.rid = PTL_ID_ANY;
345
346         CERROR("sending req %d\n", request->rq_xid);
347         rc = PtlMEAttach(peer->peer_ni, request->rq_reply_portal, local_id,
348                          request->rq_xid, 0, PTL_UNLINK, &me_h);
349         if (rc != PTL_OK) {
350                 CERROR("PtlMEAttach failed: %d\n", rc);
351                 BUG();
352                 EXIT;
353                 goto cleanup;
354         }
355
356         request->rq_reply_md.start = repbuf;
357         request->rq_reply_md.length = request->rq_replen;
358         request->rq_reply_md.threshold = 1;
359         request->rq_reply_md.options = PTL_MD_OP_PUT;
360         request->rq_reply_md.user_ptr = request;
361         request->rq_reply_md.eventq = rcvd_rep_eq;
362
363         rc = PtlMDAttach(me_h, request->rq_reply_md, PTL_UNLINK,
364                          &request->rq_reply_md_h);
365         if (rc != PTL_OK) {
366                 CERROR("PtlMDAttach failed: %d\n", rc);
367                 BUG();
368                 EXIT;
369                 goto cleanup2;
370         }
371
372         if (request->rq_bulklen != 0) {
373                 rc = PtlMEAttach(peer->peer_ni, request->rq_bulk_portal,
374                                  local_id, request->rq_xid, 0, PTL_UNLINK,
375                                  &bulk_me_h);
376                 if (rc != PTL_OK) {
377                         CERROR("PtlMEAttach failed: %d\n", rc);
378                         BUG();
379                         EXIT;
380                         goto cleanup3;
381                 }
382
383                 request->rq_bulk_md.start = request->rq_bulkbuf;
384                 request->rq_bulk_md.length = request->rq_bulklen;
385                 request->rq_bulk_md.threshold = 1;
386                 request->rq_bulk_md.options = PTL_MD_OP_PUT;
387                 request->rq_bulk_md.user_ptr = request;
388                 request->rq_bulk_md.eventq = bulk_sink_eq;
389
390                 rc = PtlMDAttach(bulk_me_h, request->rq_bulk_md, PTL_UNLINK,
391                                  &request->rq_bulk_md_h);
392                 if (rc != PTL_OK) {
393                         CERROR("PtlMDAttach failed: %d\n", rc);
394                         BUG();
395                         EXIT;
396                         goto cleanup4;
397                 }
398         }
399
400         return ptl_send_buf(request, peer, request->rq_req_portal, 1);
401
402  cleanup4:
403         PtlMEUnlink(bulk_me_h);
404  cleanup3:
405         PtlMDUnlink(request->rq_reply_md_h);
406  cleanup2:
407         PtlMEUnlink(me_h);
408  cleanup:
409         OBD_FREE(repbuf, request->rq_replen);
410
411         return rc;
412 }
413
414 /* ptl_received_rpc() should be called by the sleeping process once
415  * it finishes processing an event.  This ensures the ref count is
416  * decremented and that the rpc ring buffer cycles properly.
417  */ 
418 int ptl_received_rpc(struct ptlrpc_service *service) {
419         int rc, index;
420
421         index = service->srv_md_active;
422         CDEBUG(D_INFO, "MD index=%d Ref Count=%d\n", index,
423                service->srv_ref_count[index]);
424         service->srv_ref_count[index]--;
425
426         if ((service->srv_ref_count[index] <= 0) &&
427             (service->srv_me_h[index] == 0)) {
428
429                 /* Replace the unlinked ME and MD */
430                 rc = PtlMEInsert(service->srv_me_h[service->srv_me_tail],
431                                  service->srv_id, 0, ~0, PTL_RETAIN,
432                                  PTL_INS_AFTER, &(service->srv_me_h[index]));
433                 CDEBUG(D_INFO, "Inserting new ME and MD in ring, rc %d\n", rc);
434                 service->srv_me_tail = index;
435                 service->srv_ref_count[index] = 0;
436                 
437                 if (rc != PTL_OK) {
438                         CERROR("PtlMEInsert failed: %d\n", rc);
439                         return rc;
440                 }
441
442                 service->srv_md[index].start        = service->srv_buf[index];
443                 service->srv_md[index].length       = service->srv_buf_size;
444                 service->srv_md[index].threshold    = PTL_MD_THRESH_INF;
445                 service->srv_md[index].options      = PTL_MD_OP_PUT;
446                 service->srv_md[index].user_ptr     = service;
447                 service->srv_md[index].eventq       = service->srv_eq_h;
448
449                 rc = PtlMDAttach(service->srv_me_h[index],
450                                  service->srv_md[index],
451                                  PTL_RETAIN, &(service->srv_md_h[index]));
452
453                 CDEBUG(D_INFO, "Attach MD in ring, rc %d\n", rc);
454                 if (rc != PTL_OK) {
455                         /* XXX cleanup */
456                         BUG();
457                         CERROR("PtlMDAttach failed: %d\n", rc);
458                         return rc;
459                 }
460
461                 service->srv_md_active =
462                         NEXT_INDEX(index, service->srv_ring_length);
463         } 
464         
465         return 0;
466 }
467
468 int rpc_register_service(struct ptlrpc_service *service, char *uuid)
469 {
470         struct lustre_peer peer;
471         int rc, i;
472
473         rc = kportal_uuid_to_peer(uuid, &peer);
474         if (rc != 0) {
475                 CERROR("Invalid uuid \"%s\"\n", uuid);
476                 return -EINVAL;
477         }
478
479         service->srv_ring_length = RPC_RING_LENGTH;
480         service->srv_me_active = 0;
481         service->srv_md_active = 0;
482
483         service->srv_id.addr_kind = PTL_ADDR_GID;
484         service->srv_id.gid = PTL_ID_ANY;
485         service->srv_id.rid = PTL_ID_ANY;
486
487         rc = PtlEQAlloc(peer.peer_ni, 128, server_request_callback,
488                         service, &(service->srv_eq_h));
489
490         if (rc != PTL_OK) {
491                 CERROR("PtlEQAlloc failed: %d\n", rc);
492                 return rc;
493         }
494
495         /* Attach the leading ME on which we build the ring */
496         rc = PtlMEAttach(peer.peer_ni, service->srv_portal,
497                          service->srv_id, 0, ~0, PTL_RETAIN,
498                          &(service->srv_me_h[0]));
499
500         if (rc != PTL_OK) {
501                 CERROR("PtlMEAttach failed: %d\n", rc);
502                 return rc;
503         }
504
505         for (i = 0; i < service->srv_ring_length; i++) {
506                 OBD_ALLOC(service->srv_buf[i], service->srv_buf_size);
507
508                 if (service->srv_buf[i] == NULL) {
509                         CERROR("no memory\n");
510                         return -ENOMEM;
511                 }
512
513                 /* Insert additional ME's to the ring */
514                 if (i > 0) {
515                         rc = PtlMEInsert(service->srv_me_h[i-1],
516                                          service->srv_id, 0, ~0, PTL_RETAIN,
517                                          PTL_INS_AFTER,&(service->srv_me_h[i]));
518                         service->srv_me_tail = i;
519
520                         if (rc != PTL_OK) {
521                                 CERROR("PtlMEInsert failed: %d\n", rc);
522                                 return rc;
523                         }
524                 }
525
526                 service->srv_ref_count[i] = 0;
527                 service->srv_md[i].start        = service->srv_buf[i];
528                 service->srv_md[i].length        = service->srv_buf_size;
529                 service->srv_md[i].threshold        = PTL_MD_THRESH_INF;
530                 service->srv_md[i].options        = PTL_MD_OP_PUT;
531                 service->srv_md[i].user_ptr        = service;
532                 service->srv_md[i].eventq        = service->srv_eq_h;
533
534                 rc = PtlMDAttach(service->srv_me_h[i], service->srv_md[i],
535                                  PTL_RETAIN, &(service->srv_md_h[i]));
536
537                 if (rc != PTL_OK) {
538                         /* cleanup */
539                         CERROR("PtlMDAttach failed: %d\n", rc);
540                         return rc;
541                 }
542         }
543
544         return 0;
545 }
546
547 int rpc_unregister_service(struct ptlrpc_service *service)
548 {
549         int rc, i;
550
551         for (i = 0; i < service->srv_ring_length; i++) {
552                 rc = PtlMDUnlink(service->srv_md_h[i]);
553                 if (rc)
554                         CERROR("PtlMDUnlink failed: %d\n", rc);
555         
556                 rc = PtlMEUnlink(service->srv_me_h[i]);
557                 if (rc)
558                         CERROR("PtlMEUnlink failed: %d\n", rc);
559         
560                 OBD_FREE(service->srv_buf[i], service->srv_buf_size);                
561         }
562
563         rc = PtlEQFree(service->srv_eq_h);
564         if (rc)
565                 CERROR("PtlEQFree failed: %d\n", rc);
566
567         return 0;
568 }
569
570 static int req_init_portals(void)
571 {
572         int rc;
573         const ptl_handle_ni_t *nip;
574         ptl_handle_ni_t ni;
575
576         nip = inter_module_get_request(LUSTRE_NAL "_ni", LUSTRE_NAL);
577         if (nip == NULL) {
578                 CERROR("get_ni failed: is the NAL module loaded?\n");
579                 return -EIO;
580         }
581         ni = *nip;
582
583         rc = PtlEQAlloc(ni, 128, sent_packet_callback, NULL, &sent_pkt_eq);
584         if (rc != PTL_OK)
585                 CERROR("PtlEQAlloc failed: %d\n", rc);
586
587         rc = PtlEQAlloc(ni, 128, rcvd_reply_callback, NULL, &rcvd_rep_eq);
588         if (rc != PTL_OK)
589                 CERROR("PtlEQAlloc failed: %d\n", rc);
590
591         rc = PtlEQAlloc(ni, 128, bulk_source_callback, NULL, &bulk_source_eq);
592         if (rc != PTL_OK)
593                 CERROR("PtlEQAlloc failed: %d\n", rc);
594
595         rc = PtlEQAlloc(ni, 128, bulk_sink_callback, NULL, &bulk_sink_eq);
596         if (rc != PTL_OK)
597                 CERROR("PtlEQAlloc failed: %d\n", rc);
598
599         return rc;
600 }
601
602 static int __init ptlrpc_init(void)
603 {
604         return req_init_portals();
605 }
606
607 static void __exit ptlrpc_exit(void)
608 {
609         PtlEQFree(sent_pkt_eq);
610         PtlEQFree(rcvd_rep_eq);
611         PtlEQFree(bulk_source_eq);
612         PtlEQFree(bulk_sink_eq);
613
614         inter_module_put(LUSTRE_NAL "_ni");
615
616         return;
617 }
618
619 MODULE_AUTHOR("Peter J. Braam <braam@clusterfs.com>");
620 MODULE_DESCRIPTION("Lustre Request Processor v1.0");
621 MODULE_LICENSE("GPL"); 
622
623 module_init(ptlrpc_init);
624 module_exit(ptlrpc_exit);