Whamcloud - gitweb
- bug fix to unload modules with safe wait functions
[fs/lustre-release.git] / lustre / ptlrpc / rpc.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (C) 2002 Cluster File Systems, Inc.
5  *
6  *   This file is part of Lustre, http://www.lustre.org.
7  *
8  *   Lustre is free software; you can redistribute it and/or
9  *   modify it under the terms of version 2 of the GNU General Public
10  *   License as published by the Free Software Foundation.
11  *
12  *   Lustre is distributed in the hope that it will be useful,
13  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *   GNU General Public License for more details.
16  *
17  *   You should have received a copy of the GNU General Public License
18  *   along with Lustre; if not, write to the Free Software
19  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  *
21  */
22
23 #define EXPORT_SYMTAB
24
25 #include <linux/config.h>
26 #include <linux/module.h>
27 #include <linux/kernel.h>
28
29 #define DEBUG_SUBSYSTEM S_RPC
30
31 #include <linux/obd_support.h>
32 #include <linux/lustre_net.h>
33
34 static ptl_handle_eq_t sent_pkt_eq, rcvd_rep_eq, 
35         bulk_source_eq, bulk_sink_eq;
36
37
38 struct ptlrpc_request *ptlrpc_prep_req(struct ptlrpc_client *cl, 
39                                        int opcode, int namelen, char *name,
40                                        int tgtlen, char *tgt)
41 {
42         struct ptlrpc_request *request;
43         int rc;
44         ENTRY; 
45
46         OBD_ALLOC(request, sizeof(*request));
47         if (!request) { 
48                 CERROR("request allocation out of memory\n");
49                 return NULL;
50         }
51
52         memset(request, 0, sizeof(*request));
53         request->rq_xid = cl->cli_xid++;
54
55         rc = cl->cli_req_pack(name, namelen, tgt, tgtlen,
56                           &request->rq_reqhdr, &request->rq_req,
57                           &request->rq_reqlen, &request->rq_reqbuf);
58         if (rc) { 
59                 CERROR("cannot pack request %d\n", rc); 
60                 return NULL;
61         }
62         request->rq_reqhdr->opc = opcode;
63
64         EXIT;
65         return request;
66 }
67
68 void ptlrpc_free_req(struct ptlrpc_request *request)
69 {
70         OBD_FREE(request, sizeof(*request));
71 }
72
73 int ptlrpc_queue_wait(struct ptlrpc_request *req, 
74                              struct ptlrpc_client *cl)
75 {
76         int rc;
77         DECLARE_WAITQUEUE(wait, current);
78
79         init_waitqueue_head(&req->rq_wait_for_rep);
80
81         if (cl->cli_enqueue) {
82                 /* Local delivery */
83                 ENTRY;
84                 rc = cl->cli_enqueue(req); 
85         } else {
86                 /* Remote delivery via portals. */
87                 req->rq_req_portal = cl->cli_request_portal;
88                 req->rq_reply_portal = cl->cli_reply_portal;
89                 rc = ptl_send_rpc(req, &cl->cli_server);
90         }
91         if (rc) { 
92                 CERROR("error %d, opcode %d\n", rc, 
93                        req->rq_reqhdr->opc); 
94                 return -rc;
95         }
96
97         CDEBUG(0, "-- sleeping\n");
98         add_wait_queue(&req->rq_wait_for_rep, &wait);
99         while (req->rq_repbuf == NULL) {
100                 set_current_state(TASK_INTERRUPTIBLE);
101
102                 /* if this process really wants to die, let it go */
103                 if (sigismember(&(current->pending.signal), SIGKILL) ||
104                     sigismember(&(current->pending.signal), SIGINT))
105                         break;
106
107                 schedule();
108         }
109         remove_wait_queue(&req->rq_wait_for_rep, &wait);
110         set_current_state(TASK_RUNNING);
111         CDEBUG(0, "-- done\n");
112
113         if (req->rq_repbuf == NULL) {
114                 /* We broke out because of a signal */
115                 EXIT;
116                 return -EINTR;
117         }
118
119         rc = cl->cli_rep_unpack(req->rq_repbuf, req->rq_replen, &req->rq_rephdr, &req->rq_rep);
120         if (rc) {
121                 CERROR("unpack_rep failed: %d\n", rc);
122                 return rc;
123         }
124
125         if ( req->rq_rephdr->status == 0 )
126                 CDEBUG(0, "--> buf %p len %d status %d\n",
127                        req->rq_repbuf, req->rq_replen, 
128                        req->rq_rephdr->status); 
129
130         EXIT;
131         return 0;
132 }
133 /*
134  *  Free the packet when it has gone out
135  */
136 static int sent_packet_callback(ptl_event_t *ev, void *data)
137 {
138         ENTRY;
139
140         if (ev->type == PTL_EVENT_SENT) {
141                 OBD_FREE(ev->mem_desc.start, ev->mem_desc.length);
142         } else { 
143                 // XXX make sure we understand all events, including ACK's
144                 CERROR("Unknown event %d\n", ev->type); 
145                 BUG();
146         }
147
148         EXIT;
149         return 1;
150 }
151
152 /*
153  * Wake up the thread waiting for the reply once it comes in.
154  */
155 static int rcvd_reply_callback(ptl_event_t *ev, void *data)
156 {
157         struct ptlrpc_request *rpc = ev->mem_desc.user_ptr;
158         ENTRY;
159
160         if (ev->type == PTL_EVENT_PUT) {
161                 rpc->rq_repbuf = ev->mem_desc.start + ev->offset;
162                 wake_up_interruptible(&rpc->rq_wait_for_rep);
163         } else { 
164                 // XXX make sure we understand all events, including ACK's
165                 CERROR("Unknown event %d\n", ev->type); 
166                 BUG();
167         }
168
169         EXIT;
170         return 1;
171 }
172
173 static int server_request_callback(ptl_event_t *ev, void *data)
174 {
175         struct ptlrpc_service *service = data;
176         int rc;
177
178         if (ev->rlength != ev->mlength)
179                 CERROR("Warning: Possibly truncated rpc (%d/%d)\n",
180                        ev->mlength, ev->rlength);
181
182         /* The ME is unlinked when there is less than 1024 bytes free
183          * on its MD.  This ensures we are always able to handle the rpc, 
184          * although the 1024 value is a guess as to the size of a
185          * large rpc (the known safe margin should be determined).
186          *
187          * NOTE: The portals API by default unlinks all MD's associated
188          *       with an ME when it's unlinked.  For now, this behavior
189          *       has been commented out of the portals library so the
190          *       MD can be unlinked when its ref count drops to zero.
191          *       A new MD and ME will then be created that use the same
192          *       kmalloc()'ed memory and inserted at the ring tail.
193          */
194
195         service->srv_ref_count[service->srv_md_active]++;
196
197         if (ev->offset >= (service->srv_buf_size - 1024)) {
198                 CDEBUG(D_INODE, "Unlinking ME %d\n", service->srv_me_active);
199
200                 rc = PtlMEUnlink(service->srv_me_h[service->srv_me_active]);
201                 service->srv_me_h[service->srv_me_active] = 0;
202
203                 if (rc != PTL_OK) {
204                         CERROR("PtlMEUnlink failed - DROPPING soon: %d\n", rc);
205                         return rc;
206                 }
207
208                 service->srv_me_active = NEXT_INDEX(service->srv_me_active,
209                         service->srv_ring_length);
210
211                 if (service->srv_me_h[service->srv_me_active] == 0)
212                         CERROR("All %d ring ME's are unlinked!\n",
213                                service->srv_ring_length);
214         }
215
216         if (ev->type == PTL_EVENT_PUT) {
217                 wake_up(service->srv_wait_queue);
218         } else {
219                 CERROR("Unexpected event type: %d\n", ev->type);
220         }
221
222         return 0;
223 }
224
225 static int bulk_source_callback(ptl_event_t *ev, void *data)
226 {
227         struct ptlrpc_request *rpc = ev->mem_desc.user_ptr;
228
229         ENTRY;
230
231         if (ev->type == PTL_EVENT_SENT) {
232                 CDEBUG(D_NET, "got SENT event\n");
233         } else if (ev->type == PTL_EVENT_ACK) {
234                 CDEBUG(D_NET, "got ACK event\n");
235                 wake_up_interruptible(&rpc->rq_wait_for_bulk);
236         } else {
237                 CERROR("Unexpected event type!\n");
238                 BUG();
239         }
240
241         EXIT;
242         return 1;
243 }
244
245 static int bulk_sink_callback(ptl_event_t *ev, void *data)
246 {
247         struct ptlrpc_request *rpc = ev->mem_desc.user_ptr;
248
249         ENTRY;
250
251         if (ev->type == PTL_EVENT_PUT) {
252                 if (rpc->rq_bulkbuf != ev->mem_desc.start + ev->offset)
253                         CERROR("bulkbuf != mem_desc -- why?\n");
254                 wake_up_interruptible(&rpc->rq_wait_for_bulk);
255         } else {
256                 CERROR("Unexpected event type!\n");
257                 BUG();
258         }
259
260         EXIT;
261         return 1;
262 }
263
264 int ptl_send_buf(struct ptlrpc_request *request, struct lustre_peer *peer,
265                  int portal, int is_request)
266 {
267         int rc;
268         ptl_process_id_t remote_id;
269         ptl_handle_md_t md_h;
270
271         /* FIXME: This is bad. */
272         if (request->rq_bulklen) {
273                 request->rq_req_md.start = request->rq_bulkbuf;
274                 request->rq_req_md.length = request->rq_bulklen;
275                 request->rq_req_md.eventq = bulk_source_eq;
276         } else if (is_request) {
277                 request->rq_req_md.start = request->rq_reqbuf;
278                 request->rq_req_md.length = request->rq_reqlen;
279                 request->rq_req_md.eventq = sent_pkt_eq;
280         } else {
281                 request->rq_req_md.start = request->rq_repbuf;
282                 request->rq_req_md.length = request->rq_replen;
283                 request->rq_req_md.eventq = sent_pkt_eq;
284         }
285         request->rq_req_md.threshold = 1;
286         request->rq_req_md.options = PTL_MD_OP_PUT;
287         request->rq_req_md.user_ptr = request;
288
289         rc = PtlMDBind(peer->peer_ni, request->rq_req_md, &md_h);
290         if (rc != 0) {
291                 BUG();
292                 CERROR("PtlMDBind failed: %d\n", rc);
293                 return rc;
294         }
295
296         remote_id.addr_kind = PTL_ADDR_NID;
297         remote_id.nid = peer->peer_nid;
298         remote_id.pid = 0;
299
300         if (request->rq_bulklen) {
301                 rc = PtlPut(md_h, PTL_ACK_REQ, remote_id, portal, 0,
302                             request->rq_xid, 0, 0);
303         } else {
304                 rc = PtlPut(md_h, PTL_NOACK_REQ, remote_id, portal, 0,
305                             request->rq_xid, 0, 0);
306         }
307         if (rc != PTL_OK) {
308                 BUG();
309                 CERROR("PtlPut(%d, %d, %d) failed: %d\n", remote_id.nid,
310                        portal, request->rq_xid, rc);
311                 /* FIXME: tear down md */
312         }
313
314         return rc;
315 }
316
317 int ptl_send_rpc(struct ptlrpc_request *request, struct lustre_peer *peer)
318 {
319         ptl_handle_me_t me_h, bulk_me_h;
320         ptl_process_id_t local_id;
321         int rc;
322         char *repbuf;
323
324         ENTRY;
325
326         if (request->rq_replen == 0) {
327                 CERROR("request->rq_replen is 0!\n");
328                 EXIT;
329                 return -EINVAL;
330         }
331
332         /* request->rq_repbuf is set only when the reply comes in, in
333          * client_packet_callback() */
334         OBD_ALLOC(repbuf, request->rq_replen);
335         if (!repbuf) { 
336                 EXIT;
337                 return -ENOMEM;
338         }
339
340         local_id.addr_kind = PTL_ADDR_GID;
341         local_id.gid = PTL_ID_ANY;
342         local_id.rid = PTL_ID_ANY;
343
344         CERROR("sending req %d\n", request->rq_xid);
345         rc = PtlMEAttach(peer->peer_ni, request->rq_reply_portal, local_id,
346                          request->rq_xid, 0, PTL_UNLINK, &me_h);
347         if (rc != PTL_OK) {
348                 CERROR("PtlMEAttach failed: %d\n", rc);
349                 BUG();
350                 EXIT;
351                 goto cleanup;
352         }
353
354         request->rq_reply_md.start = repbuf;
355         request->rq_reply_md.length = request->rq_replen;
356         request->rq_reply_md.threshold = 1;
357         request->rq_reply_md.options = PTL_MD_OP_PUT;
358         request->rq_reply_md.user_ptr = request;
359         request->rq_reply_md.eventq = rcvd_rep_eq;
360
361         rc = PtlMDAttach(me_h, request->rq_reply_md, PTL_UNLINK,
362                          &request->rq_reply_md_h);
363         if (rc != PTL_OK) {
364                 CERROR("PtlMDAttach failed: %d\n", rc);
365                 BUG();
366                 EXIT;
367                 goto cleanup2;
368         }
369
370         if (request->rq_bulklen != 0) {
371                 rc = PtlMEAttach(peer->peer_ni, request->rq_bulk_portal,
372                                  local_id, request->rq_xid, 0, PTL_UNLINK,
373                                  &bulk_me_h);
374                 if (rc != PTL_OK) {
375                         CERROR("PtlMEAttach failed: %d\n", rc);
376                         BUG();
377                         EXIT;
378                         goto cleanup3;
379                 }
380
381                 request->rq_bulk_md.start = request->rq_bulkbuf;
382                 request->rq_bulk_md.length = request->rq_bulklen;
383                 request->rq_bulk_md.threshold = 1;
384                 request->rq_bulk_md.options = PTL_MD_OP_PUT;
385                 request->rq_bulk_md.user_ptr = request;
386                 request->rq_bulk_md.eventq = bulk_sink_eq;
387
388                 rc = PtlMDAttach(bulk_me_h, request->rq_bulk_md, PTL_UNLINK,
389                                  &request->rq_bulk_md_h);
390                 if (rc != PTL_OK) {
391                         CERROR("PtlMDAttach failed: %d\n", rc);
392                         BUG();
393                         EXIT;
394                         goto cleanup4;
395                 }
396         }
397
398         return ptl_send_buf(request, peer, request->rq_req_portal, 1);
399
400  cleanup4:
401         PtlMEUnlink(bulk_me_h);
402  cleanup3:
403         PtlMDUnlink(request->rq_reply_md_h);
404  cleanup2:
405         PtlMEUnlink(me_h);
406  cleanup:
407         OBD_FREE(repbuf, request->rq_replen);
408
409         return rc;
410 }
411
412 /* ptl_received_rpc() should be called by the sleeping process once
413  * it finishes processing an event.  This ensures the ref count is
414  * decremented and that the rpc ring buffer cycles properly.
415  */ 
416 int ptl_received_rpc(struct ptlrpc_service *service) {
417         int rc, index;
418
419         index = service->srv_md_active;
420         CDEBUG(D_INFO, "MD index=%d Ref Count=%d\n", index,
421                service->srv_ref_count[index]);
422         service->srv_ref_count[index]--;
423
424         if ((service->srv_ref_count[index] <= 0) &&
425             (service->srv_me_h[index] == 0)) {
426
427                 /* Replace the unlinked ME and MD */
428                 rc = PtlMEInsert(service->srv_me_h[service->srv_me_tail],
429                                  service->srv_id, 0, ~0, PTL_RETAIN,
430                                  PTL_INS_AFTER, &(service->srv_me_h[index]));
431                 CDEBUG(D_INFO, "Inserting new ME and MD in ring, rc %d\n", rc);
432                 service->srv_me_tail = index;
433                 service->srv_ref_count[index] = 0;
434                 
435                 if (rc != PTL_OK) {
436                         CERROR("PtlMEInsert failed: %d\n", rc);
437                         return rc;
438                 }
439
440                 service->srv_md[index].start        = service->srv_buf[index];
441                 service->srv_md[index].length       = service->srv_buf_size;
442                 service->srv_md[index].threshold    = PTL_MD_THRESH_INF;
443                 service->srv_md[index].options      = PTL_MD_OP_PUT;
444                 service->srv_md[index].user_ptr     = service;
445                 service->srv_md[index].eventq       = service->srv_eq_h;
446
447                 rc = PtlMDAttach(service->srv_me_h[index],
448                                  service->srv_md[index],
449                                  PTL_RETAIN, &(service->srv_md_h[index]));
450
451                 CDEBUG(D_INFO, "Attach MD in ring, rc %d\n", rc);
452                 if (rc != PTL_OK) {
453                         /* XXX cleanup */
454                         BUG();
455                         CERROR("PtlMDAttach failed: %d\n", rc);
456                         return rc;
457                 }
458
459                 service->srv_md_active =
460                         NEXT_INDEX(index, service->srv_ring_length);
461         } 
462         
463         return 0;
464 }
465
466 int rpc_register_service(struct ptlrpc_service *service, char *uuid)
467 {
468         struct lustre_peer peer;
469         int rc, i;
470
471         rc = kportal_uuid_to_peer(uuid, &peer);
472         if (rc != 0) {
473                 CERROR("Invalid uuid \"%s\"\n", uuid);
474                 return -EINVAL;
475         }
476
477         service->srv_ring_length = RPC_RING_LENGTH;
478         service->srv_me_active = 0;
479         service->srv_md_active = 0;
480
481         service->srv_id.addr_kind = PTL_ADDR_GID;
482         service->srv_id.gid = PTL_ID_ANY;
483         service->srv_id.rid = PTL_ID_ANY;
484
485         rc = PtlEQAlloc(peer.peer_ni, 128, server_request_callback,
486                         service, &(service->srv_eq_h));
487
488         if (rc != PTL_OK) {
489                 CERROR("PtlEQAlloc failed: %d\n", rc);
490                 return rc;
491         }
492
493         /* Attach the leading ME on which we build the ring */
494         rc = PtlMEAttach(peer.peer_ni, service->srv_portal,
495                          service->srv_id, 0, ~0, PTL_RETAIN,
496                          &(service->srv_me_h[0]));
497
498         if (rc != PTL_OK) {
499                 CERROR("PtlMEAttach failed: %d\n", rc);
500                 return rc;
501         }
502
503         for (i = 0; i < service->srv_ring_length; i++) {
504                 OBD_ALLOC(service->srv_buf[i], service->srv_buf_size);
505
506                 if (service->srv_buf[i] == NULL) {
507                         CERROR("no memory\n");
508                         return -ENOMEM;
509                 }
510
511                 /* Insert additional ME's to the ring */
512                 if (i > 0) {
513                         rc = PtlMEInsert(service->srv_me_h[i-1],
514                                          service->srv_id, 0, ~0, PTL_RETAIN,
515                                          PTL_INS_AFTER,&(service->srv_me_h[i]));
516                         service->srv_me_tail = i;
517
518                         if (rc != PTL_OK) {
519                                 CERROR("PtlMEInsert failed: %d\n", rc);
520                                 return rc;
521                         }
522                 }
523
524                 service->srv_ref_count[i] = 0;
525                 service->srv_md[i].start        = service->srv_buf[i];
526                 service->srv_md[i].length        = service->srv_buf_size;
527                 service->srv_md[i].threshold        = PTL_MD_THRESH_INF;
528                 service->srv_md[i].options        = PTL_MD_OP_PUT;
529                 service->srv_md[i].user_ptr        = service;
530                 service->srv_md[i].eventq        = service->srv_eq_h;
531
532                 rc = PtlMDAttach(service->srv_me_h[i], service->srv_md[i],
533                                  PTL_RETAIN, &(service->srv_md_h[i]));
534
535                 if (rc != PTL_OK) {
536                         /* cleanup */
537                         CERROR("PtlMDAttach failed: %d\n", rc);
538                         return rc;
539                 }
540         }
541
542         return 0;
543 }
544
545 int rpc_unregister_service(struct ptlrpc_service *service)
546 {
547         int rc, i;
548
549         for (i = 0; i < service->srv_ring_length; i++) {
550                 rc = PtlMDUnlink(service->srv_md_h[i]);
551                 if (rc)
552                         CERROR("PtlMDUnlink failed: %d\n", rc);
553         
554                 rc = PtlMEUnlink(service->srv_me_h[i]);
555                 if (rc)
556                         CERROR("PtlMEUnlink failed: %d\n", rc);
557         
558                 OBD_FREE(service->srv_buf[i], service->srv_buf_size);                
559         }
560
561         rc = PtlEQFree(service->srv_eq_h);
562         if (rc)
563                 CERROR("PtlEQFree failed: %d\n", rc);
564
565         return 0;
566 }
567
568 static int req_init_portals(void)
569 {
570         int rc;
571         const ptl_handle_ni_t *nip;
572         ptl_handle_ni_t ni;
573
574         nip = inter_module_get_request(LUSTRE_NAL "_ni", LUSTRE_NAL);
575         if (nip == NULL) {
576                 CERROR("get_ni failed: is the NAL module loaded?\n");
577                 return -EIO;
578         }
579         ni = *nip;
580
581         rc = PtlEQAlloc(ni, 128, sent_packet_callback, NULL, &sent_pkt_eq);
582         if (rc != PTL_OK)
583                 CERROR("PtlEQAlloc failed: %d\n", rc);
584
585         rc = PtlEQAlloc(ni, 128, rcvd_reply_callback, NULL, &rcvd_rep_eq);
586         if (rc != PTL_OK)
587                 CERROR("PtlEQAlloc failed: %d\n", rc);
588
589         rc = PtlEQAlloc(ni, 128, bulk_source_callback, NULL, &bulk_source_eq);
590         if (rc != PTL_OK)
591                 CERROR("PtlEQAlloc failed: %d\n", rc);
592
593         rc = PtlEQAlloc(ni, 128, bulk_sink_callback, NULL, &bulk_sink_eq);
594         if (rc != PTL_OK)
595                 CERROR("PtlEQAlloc failed: %d\n", rc);
596
597         return rc;
598 }
599
600 static int __init ptlrpc_init(void)
601 {
602         return req_init_portals();
603 }
604
605 static void __exit ptlrpc_exit(void)
606 {
607         PtlEQFree(sent_pkt_eq);
608         PtlEQFree(rcvd_rep_eq);
609         PtlEQFree(bulk_source_eq);
610         PtlEQFree(bulk_sink_eq);
611
612         inter_module_put(LUSTRE_NAL "_ni");
613
614         return;
615 }
616
617 MODULE_AUTHOR("Peter J. Braam <braam@clusterfs.com>");
618 MODULE_DESCRIPTION("Lustre Request Processor v1.0");
619 MODULE_LICENSE("GPL"); 
620
621 module_init(ptlrpc_init);
622 module_exit(ptlrpc_exit);