Whamcloud - gitweb
- added rq_type field to ptlrpc_request
[fs/lustre-release.git] / lustre / ptlrpc / rpc.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (C) 2002 Cluster File Systems, Inc.
5  *
6  *   This file is part of Lustre, http://www.lustre.org.
7  *
8  *   Lustre is free software; you can redistribute it and/or
9  *   modify it under the terms of version 2 of the GNU General Public
10  *   License as published by the Free Software Foundation.
11  *
12  *   Lustre is distributed in the hope that it will be useful,
13  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *   GNU General Public License for more details.
16  *
17  *   You should have received a copy of the GNU General Public License
18  *   along with Lustre; if not, write to the Free Software
19  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  *
21  */
22
23 #define EXPORT_SYMTAB
24
25 #include <linux/config.h>
26 #include <linux/module.h>
27 #include <linux/kernel.h>
28
29 #define DEBUG_SUBSYSTEM S_RPC
30
31 #include <linux/obd_support.h>
32 #include <linux/lustre_net.h>
33
34 static ptl_handle_eq_t sent_pkt_eq, rcvd_rep_eq, 
35         bulk_source_eq, bulk_sink_eq;
36
37
38 struct ptlrpc_request *ptlrpc_prep_req(struct ptlrpc_client *cl, 
39                                        int opcode, int namelen, char *name,
40                                        int tgtlen, char *tgt)
41 {
42         struct ptlrpc_request *request;
43         int rc;
44         ENTRY; 
45
46         OBD_ALLOC(request, sizeof(*request));
47         if (!request) { 
48                 CERROR("request allocation out of memory\n");
49                 return NULL;
50         }
51
52         memset(request, 0, sizeof(*request));
53         request->rq_xid = cl->cli_xid++;
54
55         rc = cl->cli_req_pack(name, namelen, tgt, tgtlen,
56                           &request->rq_reqhdr, &request->rq_req,
57                           &request->rq_reqlen, &request->rq_reqbuf);
58         if (rc) { 
59                 CERROR("cannot pack request %d\n", rc); 
60                 return NULL;
61         }
62         request->rq_reqhdr->opc = opcode;
63         request->rq_reqhdr->seqno = request->rq_xid;
64
65         EXIT;
66         return request;
67 }
68
69 void ptlrpc_free_req(struct ptlrpc_request *request)
70 {
71         OBD_FREE(request, sizeof(*request));
72 }
73
74 /* Abort this request and cleanup any resources associated with it. */
75 int ptl_abort_rpc(struct ptlrpc_request *request)
76 {
77         /* First remove the MD for the reply; in theory, this means
78          * that we can tear down the buffer safely. */
79         PtlMEUnlink(request->rq_reply_me_h);
80         PtlMDUnlink(request->rq_reply_md_h);
81
82         if (request->rq_bulklen != 0) {
83                 PtlMEUnlink(request->rq_bulk_me_h);
84                 PtlMDUnlink(request->rq_bulk_md_h);
85         }
86
87         return 0;
88 }
89
90 int ptlrpc_queue_wait(struct ptlrpc_request *req, struct ptlrpc_client *cl)
91 {
92         int rc;
93         DECLARE_WAITQUEUE(wait, current);
94
95         init_waitqueue_head(&req->rq_wait_for_rep);
96
97         if (cl->cli_enqueue) {
98                 /* Local delivery */
99                 ENTRY;
100                 rc = cl->cli_enqueue(req); 
101         } else {
102                 /* Remote delivery via portals. */
103                 req->rq_req_portal = cl->cli_request_portal;
104                 req->rq_reply_portal = cl->cli_reply_portal;
105                 rc = ptl_send_rpc(req, &cl->cli_server);
106         }
107         if (rc) { 
108                 CERROR("error %d, opcode %d\n", rc, 
109                        req->rq_reqhdr->opc); 
110                 return -rc;
111         }
112
113         CDEBUG(0, "-- sleeping\n");
114         add_wait_queue(&req->rq_wait_for_rep, &wait);
115         while (req->rq_repbuf == NULL) {
116                 set_current_state(TASK_INTERRUPTIBLE);
117
118                 /* if this process really wants to die, let it go */
119                 if (sigismember(&(current->pending.signal), SIGKILL) ||
120                     sigismember(&(current->pending.signal), SIGINT))
121                         break;
122
123                 schedule();
124         }
125         remove_wait_queue(&req->rq_wait_for_rep, &wait);
126         set_current_state(TASK_RUNNING);
127         CDEBUG(0, "-- done\n");
128
129         if (req->rq_repbuf == NULL) {
130                 /* We broke out because of a signal.  Clean up the dangling
131                  * reply buffers! */
132                 ptl_abort_rpc(req);
133                 EXIT;
134                 return -EINTR;
135         }
136
137         rc = cl->cli_rep_unpack(req->rq_repbuf, req->rq_replen, &req->rq_rephdr,
138                                 &req->rq_rep);
139         if (rc) {
140                 CERROR("unpack_rep failed: %d\n", rc);
141                 return rc;
142         }
143         CERROR("got rep %lld\n", req->rq_rephdr->seqno);
144         if ( req->rq_rephdr->status == 0 )
145                 CDEBUG(0, "--> buf %p len %d status %d\n",
146                        req->rq_repbuf, req->rq_replen, 
147                        req->rq_rephdr->status); 
148
149         EXIT;
150         return 0;
151 }
152 /*
153  *  Free the packet when it has gone out
154  */
155 static int sent_packet_callback(ptl_event_t *ev, void *data)
156 {
157         ENTRY;
158
159         if (ev->type == PTL_EVENT_SENT) {
160                 OBD_FREE(ev->mem_desc.start, ev->mem_desc.length);
161         } else { 
162                 // XXX make sure we understand all events, including ACK's
163                 CERROR("Unknown event %d\n", ev->type); 
164                 BUG();
165         }
166
167         EXIT;
168         return 1;
169 }
170
171 /*
172  * Wake up the thread waiting for the reply once it comes in.
173  */
174 static int rcvd_reply_callback(ptl_event_t *ev, void *data)
175 {
176         struct ptlrpc_request *rpc = ev->mem_desc.user_ptr;
177         ENTRY;
178
179         if (ev->type == PTL_EVENT_PUT) {
180                 rpc->rq_repbuf = ev->mem_desc.start + ev->offset;
181                 barrier();
182                 wake_up_interruptible(&rpc->rq_wait_for_rep);
183         } else { 
184                 // XXX make sure we understand all events, including ACK's
185                 CERROR("Unknown event %d\n", ev->type); 
186                 BUG();
187         }
188
189         EXIT;
190         return 1;
191 }
192
193 static int server_request_callback(ptl_event_t *ev, void *data)
194 {
195         struct ptlrpc_service *service = data;
196         int rc;
197
198         if (ev->rlength != ev->mlength)
199                 CERROR("Warning: Possibly truncated rpc (%d/%d)\n",
200                        ev->mlength, ev->rlength);
201
202         /* The ME is unlinked when there is less than 1024 bytes free
203          * on its MD.  This ensures we are always able to handle the rpc, 
204          * although the 1024 value is a guess as to the size of a
205          * large rpc (the known safe margin should be determined).
206          *
207          * NOTE: The portals API by default unlinks all MD's associated
208          *       with an ME when it's unlinked.  For now, this behavior
209          *       has been commented out of the portals library so the
210          *       MD can be unlinked when its ref count drops to zero.
211          *       A new MD and ME will then be created that use the same
212          *       kmalloc()'ed memory and inserted at the ring tail.
213          */
214
215         service->srv_ref_count[service->srv_md_active]++;
216
217         if (ev->offset >= (service->srv_buf_size - 1024)) {
218                 CDEBUG(D_INODE, "Unlinking ME %d\n", service->srv_me_active);
219
220                 rc = PtlMEUnlink(service->srv_me_h[service->srv_me_active]);
221                 service->srv_me_h[service->srv_me_active] = 0;
222
223                 if (rc != PTL_OK) {
224                         CERROR("PtlMEUnlink failed - DROPPING soon: %d\n", rc);
225                         return rc;
226                 }
227
228                 service->srv_me_active = NEXT_INDEX(service->srv_me_active,
229                         service->srv_ring_length);
230
231                 if (service->srv_me_h[service->srv_me_active] == 0)
232                         CERROR("All %d ring ME's are unlinked!\n",
233                                service->srv_ring_length);
234         }
235
236         if (ev->type == PTL_EVENT_PUT) {
237                 wake_up(service->srv_wait_queue);
238         } else {
239                 CERROR("Unexpected event type: %d\n", ev->type);
240         }
241
242         return 0;
243 }
244
245 static int bulk_source_callback(ptl_event_t *ev, void *data)
246 {
247         struct ptlrpc_request *rpc = ev->mem_desc.user_ptr;
248
249         ENTRY;
250
251         if (ev->type == PTL_EVENT_SENT) {
252                 CDEBUG(D_NET, "got SENT event\n");
253         } else if (ev->type == PTL_EVENT_ACK) {
254                 CDEBUG(D_NET, "got ACK event\n");
255                 rpc->rq_bulkbuf = NULL;
256                 wake_up_interruptible(&rpc->rq_wait_for_bulk);
257         } else {
258                 CERROR("Unexpected event type!\n");
259                 BUG();
260         }
261
262         EXIT;
263         return 1;
264 }
265
266 static int bulk_sink_callback(ptl_event_t *ev, void *data)
267 {
268         struct ptlrpc_request *rpc = ev->mem_desc.user_ptr;
269
270         ENTRY;
271
272         if (ev->type == PTL_EVENT_PUT) {
273                 if (rpc->rq_bulkbuf != ev->mem_desc.start + ev->offset)
274                         CERROR("bulkbuf != mem_desc -- why?\n");
275                 //wake_up_interruptible(&rpc->rq_wait_for_bulk);
276         } else {
277                 CERROR("Unexpected event type!\n");
278                 BUG();
279         }
280
281         EXIT;
282         return 1;
283 }
284
285 int ptl_send_buf(struct ptlrpc_request *request, struct lustre_peer *peer,
286                  int portal)
287 {
288         int rc;
289         ptl_process_id_t remote_id;
290         ptl_handle_md_t md_h;
291         ptl_ack_req_t ack;
292
293         switch (request->rq_type) {
294         case PTLRPC_BULK:
295                 request->rq_req_md.start = request->rq_bulkbuf;
296                 request->rq_req_md.length = request->rq_bulklen;
297                 request->rq_req_md.eventq = bulk_source_eq;
298                 request->rq_req_md.threshold = 2; /* SENT and ACK events */
299                 ack = PTL_ACK_REQ;
300                 break;
301         case PTLRPC_REQUEST:
302                 request->rq_req_md.start = request->rq_reqbuf;
303                 request->rq_req_md.length = request->rq_reqlen;
304                 request->rq_req_md.eventq = sent_pkt_eq;
305                 request->rq_req_md.threshold = 1;
306                 ack = PTL_NOACK_REQ;
307                 break;
308         case PTLRPC_REPLY:
309                 request->rq_req_md.start = request->rq_repbuf;
310                 request->rq_req_md.length = request->rq_replen;
311                 request->rq_req_md.eventq = sent_pkt_eq;
312                 request->rq_req_md.threshold = 1;
313                 ack = PTL_NOACK_REQ;
314                 break;
315         default:
316                 BUG();
317         }
318         request->rq_req_md.options = PTL_MD_OP_PUT;
319         request->rq_req_md.user_ptr = request;
320
321         rc = PtlMDBind(peer->peer_ni, request->rq_req_md, &md_h);
322         if (rc != 0) {
323                 BUG();
324                 CERROR("PtlMDBind failed: %d\n", rc);
325                 return rc;
326         }
327
328         remote_id.addr_kind = PTL_ADDR_NID;
329         remote_id.nid = peer->peer_nid;
330         remote_id.pid = 0;
331
332         CERROR("Sending %d bytes to portal %d, xid %d\n",
333                request->rq_req_md.length, portal, request->rq_xid);
334
335         rc = PtlPut(md_h, ack, remote_id, portal, 0, request->rq_xid, 0, 0);
336         if (rc != PTL_OK) {
337                 BUG();
338                 CERROR("PtlPut(%d, %d, %d) failed: %d\n", remote_id.nid,
339                        portal, request->rq_xid, rc);
340                 /* FIXME: tear down md */
341         }
342
343         return rc;
344 }
345
346 int ptl_send_rpc(struct ptlrpc_request *request, struct lustre_peer *peer)
347 {
348         ptl_process_id_t local_id;
349         int rc;
350         char *repbuf;
351
352         ENTRY;
353
354         if (request->rq_replen == 0) {
355                 CERROR("request->rq_replen is 0!\n");
356                 EXIT;
357                 return -EINVAL;
358         }
359
360         /* request->rq_repbuf is set only when the reply comes in, in
361          * client_packet_callback() */
362         OBD_ALLOC(repbuf, request->rq_replen);
363         if (!repbuf) { 
364                 EXIT;
365                 return -ENOMEM;
366         }
367
368         local_id.addr_kind = PTL_ADDR_GID;
369         local_id.gid = PTL_ID_ANY;
370         local_id.rid = PTL_ID_ANY;
371
372         CERROR("sending req %d\n", request->rq_xid);
373         rc = PtlMEAttach(peer->peer_ni, request->rq_reply_portal, local_id,
374                          request->rq_xid, 0, PTL_UNLINK,
375                          &request->rq_reply_me_h);
376         if (rc != PTL_OK) {
377                 CERROR("PtlMEAttach failed: %d\n", rc);
378                 BUG();
379                 EXIT;
380                 goto cleanup;
381         }
382
383         request->rq_type = PTLRPC_REQUEST;
384         request->rq_reply_md.start = repbuf;
385         request->rq_reply_md.length = request->rq_replen;
386         request->rq_reply_md.threshold = 1;
387         request->rq_reply_md.options = PTL_MD_OP_PUT;
388         request->rq_reply_md.user_ptr = request;
389         request->rq_reply_md.eventq = rcvd_rep_eq;
390
391         rc = PtlMDAttach(request->rq_reply_me_h, request->rq_reply_md,
392                          PTL_UNLINK, &request->rq_reply_md_h);
393         if (rc != PTL_OK) {
394                 CERROR("PtlMDAttach failed: %d\n", rc);
395                 BUG();
396                 EXIT;
397                 goto cleanup2;
398         }
399
400         if (request->rq_bulklen != 0) {
401                 rc = PtlMEAttach(peer->peer_ni, request->rq_bulk_portal,
402                                  local_id, request->rq_xid, 0, PTL_UNLINK,
403                                  &request->rq_bulk_me_h);
404                 if (rc != PTL_OK) {
405                         CERROR("PtlMEAttach failed: %d\n", rc);
406                         BUG();
407                         EXIT;
408                         goto cleanup3;
409                 }
410
411                 request->rq_bulk_md.start = request->rq_bulkbuf;
412                 request->rq_bulk_md.length = request->rq_bulklen;
413                 request->rq_bulk_md.threshold = 1;
414                 request->rq_bulk_md.options = PTL_MD_OP_PUT;
415                 request->rq_bulk_md.user_ptr = request;
416                 request->rq_bulk_md.eventq = bulk_sink_eq;
417
418                 rc = PtlMDAttach(request->rq_bulk_me_h,
419                                  request->rq_bulk_md, PTL_UNLINK,
420                                  &request->rq_bulk_md_h);
421                 if (rc != PTL_OK) {
422                         CERROR("PtlMDAttach failed: %d\n", rc);
423                         BUG();
424                         EXIT;
425                         goto cleanup4;
426                 }
427         }
428
429         return ptl_send_buf(request, peer, request->rq_req_portal);
430
431  cleanup4:
432         PtlMEUnlink(request->rq_bulk_me_h);
433  cleanup3:
434         PtlMDUnlink(request->rq_reply_md_h);
435  cleanup2:
436         PtlMEUnlink(request->rq_reply_me_h);
437  cleanup:
438         OBD_FREE(repbuf, request->rq_replen);
439
440         return rc;
441 }
442
443 /* ptl_received_rpc() should be called by the sleeping process once
444  * it finishes processing an event.  This ensures the ref count is
445  * decremented and that the rpc ring buffer cycles properly.
446  */ 
447 int ptl_received_rpc(struct ptlrpc_service *service) {
448         int rc, index;
449
450         index = service->srv_md_active;
451         CDEBUG(D_INFO, "MD index=%d Ref Count=%d\n", index,
452                service->srv_ref_count[index]);
453         service->srv_ref_count[index]--;
454
455         if ((service->srv_ref_count[index] <= 0) &&
456             (service->srv_me_h[index] == 0)) {
457
458                 /* Replace the unlinked ME and MD */
459                 rc = PtlMEInsert(service->srv_me_h[service->srv_me_tail],
460                                  service->srv_id, 0, ~0, PTL_RETAIN,
461                                  PTL_INS_AFTER, &(service->srv_me_h[index]));
462                 CDEBUG(D_INFO, "Inserting new ME and MD in ring, rc %d\n", rc);
463                 service->srv_me_tail = index;
464                 service->srv_ref_count[index] = 0;
465                 
466                 if (rc != PTL_OK) {
467                         CERROR("PtlMEInsert failed: %d\n", rc);
468                         return rc;
469                 }
470
471                 service->srv_md[index].start        = service->srv_buf[index];
472                 service->srv_md[index].length       = service->srv_buf_size;
473                 service->srv_md[index].threshold    = PTL_MD_THRESH_INF;
474                 service->srv_md[index].options      = PTL_MD_OP_PUT;
475                 service->srv_md[index].user_ptr     = service;
476                 service->srv_md[index].eventq       = service->srv_eq_h;
477
478                 rc = PtlMDAttach(service->srv_me_h[index],
479                                  service->srv_md[index],
480                                  PTL_RETAIN, &(service->srv_md_h[index]));
481
482                 CDEBUG(D_INFO, "Attach MD in ring, rc %d\n", rc);
483                 if (rc != PTL_OK) {
484                         /* XXX cleanup */
485                         BUG();
486                         CERROR("PtlMDAttach failed: %d\n", rc);
487                         return rc;
488                 }
489
490                 service->srv_md_active =
491                         NEXT_INDEX(index, service->srv_ring_length);
492         } 
493         
494         return 0;
495 }
496
497 int rpc_register_service(struct ptlrpc_service *service, char *uuid)
498 {
499         struct lustre_peer peer;
500         int rc, i;
501
502         rc = kportal_uuid_to_peer(uuid, &peer);
503         if (rc != 0) {
504                 CERROR("Invalid uuid \"%s\"\n", uuid);
505                 return -EINVAL;
506         }
507
508         service->srv_ring_length = RPC_RING_LENGTH;
509         service->srv_me_active = 0;
510         service->srv_md_active = 0;
511
512         service->srv_id.addr_kind = PTL_ADDR_GID;
513         service->srv_id.gid = PTL_ID_ANY;
514         service->srv_id.rid = PTL_ID_ANY;
515
516         rc = PtlEQAlloc(peer.peer_ni, 128, server_request_callback,
517                         service, &(service->srv_eq_h));
518
519         if (rc != PTL_OK) {
520                 CERROR("PtlEQAlloc failed: %d\n", rc);
521                 return rc;
522         }
523
524         /* Attach the leading ME on which we build the ring */
525         rc = PtlMEAttach(peer.peer_ni, service->srv_portal,
526                          service->srv_id, 0, ~0, PTL_RETAIN,
527                          &(service->srv_me_h[0]));
528
529         if (rc != PTL_OK) {
530                 CERROR("PtlMEAttach failed: %d\n", rc);
531                 return rc;
532         }
533
534         for (i = 0; i < service->srv_ring_length; i++) {
535                 OBD_ALLOC(service->srv_buf[i], service->srv_buf_size);
536
537                 if (service->srv_buf[i] == NULL) {
538                         CERROR("no memory\n");
539                         return -ENOMEM;
540                 }
541
542                 /* Insert additional ME's to the ring */
543                 if (i > 0) {
544                         rc = PtlMEInsert(service->srv_me_h[i-1],
545                                          service->srv_id, 0, ~0, PTL_RETAIN,
546                                          PTL_INS_AFTER,&(service->srv_me_h[i]));
547                         service->srv_me_tail = i;
548
549                         if (rc != PTL_OK) {
550                                 CERROR("PtlMEInsert failed: %d\n", rc);
551                                 return rc;
552                         }
553                 }
554
555                 service->srv_ref_count[i] = 0;
556                 service->srv_md[i].start        = service->srv_buf[i];
557                 service->srv_md[i].length        = service->srv_buf_size;
558                 service->srv_md[i].threshold        = PTL_MD_THRESH_INF;
559                 service->srv_md[i].options        = PTL_MD_OP_PUT;
560                 service->srv_md[i].user_ptr        = service;
561                 service->srv_md[i].eventq        = service->srv_eq_h;
562
563                 rc = PtlMDAttach(service->srv_me_h[i], service->srv_md[i],
564                                  PTL_RETAIN, &(service->srv_md_h[i]));
565
566                 if (rc != PTL_OK) {
567                         /* cleanup */
568                         CERROR("PtlMDAttach failed: %d\n", rc);
569                         return rc;
570                 }
571         }
572
573         return 0;
574 }
575
576 int rpc_unregister_service(struct ptlrpc_service *service)
577 {
578         int rc, i;
579
580         for (i = 0; i < service->srv_ring_length; i++) {
581                 rc = PtlMDUnlink(service->srv_md_h[i]);
582                 if (rc)
583                         CERROR("PtlMDUnlink failed: %d\n", rc);
584         
585                 rc = PtlMEUnlink(service->srv_me_h[i]);
586                 if (rc)
587                         CERROR("PtlMEUnlink failed: %d\n", rc);
588         
589                 OBD_FREE(service->srv_buf[i], service->srv_buf_size);                
590         }
591
592         rc = PtlEQFree(service->srv_eq_h);
593         if (rc)
594                 CERROR("PtlEQFree failed: %d\n", rc);
595
596         return 0;
597 }
598
599 static int req_init_portals(void)
600 {
601         int rc;
602         const ptl_handle_ni_t *nip;
603         ptl_handle_ni_t ni;
604
605         nip = inter_module_get_request(LUSTRE_NAL "_ni", LUSTRE_NAL);
606         if (nip == NULL) {
607                 CERROR("get_ni failed: is the NAL module loaded?\n");
608                 return -EIO;
609         }
610         ni = *nip;
611
612         rc = PtlEQAlloc(ni, 128, sent_packet_callback, NULL, &sent_pkt_eq);
613         if (rc != PTL_OK)
614                 CERROR("PtlEQAlloc failed: %d\n", rc);
615
616         rc = PtlEQAlloc(ni, 128, rcvd_reply_callback, NULL, &rcvd_rep_eq);
617         if (rc != PTL_OK)
618                 CERROR("PtlEQAlloc failed: %d\n", rc);
619
620         rc = PtlEQAlloc(ni, 128, bulk_source_callback, NULL, &bulk_source_eq);
621         if (rc != PTL_OK)
622                 CERROR("PtlEQAlloc failed: %d\n", rc);
623
624         rc = PtlEQAlloc(ni, 128, bulk_sink_callback, NULL, &bulk_sink_eq);
625         if (rc != PTL_OK)
626                 CERROR("PtlEQAlloc failed: %d\n", rc);
627
628         return rc;
629 }
630
631 static int __init ptlrpc_init(void)
632 {
633         return req_init_portals();
634 }
635
636 static void __exit ptlrpc_exit(void)
637 {
638         PtlEQFree(sent_pkt_eq);
639         PtlEQFree(rcvd_rep_eq);
640         PtlEQFree(bulk_source_eq);
641         PtlEQFree(bulk_sink_eq);
642
643         inter_module_put(LUSTRE_NAL "_ni");
644
645         return;
646 }
647
648 MODULE_AUTHOR("Peter J. Braam <braam@clusterfs.com>");
649 MODULE_DESCRIPTION("Lustre Request Processor v1.0");
650 MODULE_LICENSE("GPL"); 
651
652 module_init(ptlrpc_init);
653 module_exit(ptlrpc_exit);