Whamcloud - gitweb
- minor debugging fixes
[fs/lustre-release.git] / lustre / ptlrpc / rpc.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (C) 2002 Cluster File Systems, Inc.
5  *
6  *   This file is part of Lustre, http://www.lustre.org.
7  *
8  *   Lustre is free software; you can redistribute it and/or
9  *   modify it under the terms of version 2 of the GNU General Public
10  *   License as published by the Free Software Foundation.
11  *
12  *   Lustre is distributed in the hope that it will be useful,
13  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *   GNU General Public License for more details.
16  *
17  *   You should have received a copy of the GNU General Public License
18  *   along with Lustre; if not, write to the Free Software
19  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  *
21  */
22
23 #define EXPORT_SYMTAB
24
25 #include <linux/config.h>
26 #include <linux/module.h>
27 #include <linux/kernel.h>
28
29 #define DEBUG_SUBSYSTEM S_RPC
30
31 #include <linux/obd_support.h>
32 #include <linux/lustre_net.h>
33
34 static ptl_handle_eq_t req_eq, bulk_source_eq, bulk_sink_eq;
35
36 /*
37  * 1. Free the request buffer after it has gone out on the wire
38  * 2. Wake up the thread waiting for the reply once it comes in.
39  */
40 static int client_packet_callback(ptl_event_t *ev, void *data)
41 {
42         struct ptlrpc_request *rpc = ev->mem_desc.user_ptr;
43
44         ENTRY;
45         // XXX make sure we understand all events, including ACK's
46
47         if (ev->type == PTL_EVENT_SENT) {
48                 OBD_FREE(ev->mem_desc.start, ev->mem_desc.length);
49         } else if (ev->type == PTL_EVENT_PUT) {
50                 rpc->rq_repbuf = ev->mem_desc.start + ev->offset;
51                 wake_up_interruptible(&rpc->rq_wait_for_rep);
52         }
53
54         EXIT;
55         return 1;
56 }
57
58 static int server_request_callback(ptl_event_t *ev, void *data)
59 {
60         struct ptlrpc_service *service = data;
61         int rc;
62
63         if (ev->rlength != ev->mlength)
64                 CERROR("Warning: Possibly truncated rpc (%d/%d)\n",
65                         ev->mlength, ev->rlength);
66
67         /* The ME is unlinked when there is less than 1024 bytes free
68          * on its MD.  This ensures we are always able to handle the rpc, 
69          * although the 1024 value is a guess as to the size of a
70          * large rpc (the known safe margin should be determined).
71          *
72          * NOTE: The portals API by default unlinks all MD's associated
73          *       with an ME when it's unlinked.  For now, this behavior
74          *       has been commented out of the portals library so the
75          *       MD can be unlinked when its ref count drops to zero.
76          *       A new MD and ME will then be created that use the same
77          *       kmalloc()'ed memory and inserted at the ring tail.
78          */
79
80         service->srv_ref_count[service->srv_md_active]++;
81
82         if (ev->offset >= (service->srv_buf_size - 1024)) {
83                 CDEBUG(D_INODE, "Unlinking ME %d\n", service->srv_me_active);
84
85                 rc = PtlMEUnlink(service->srv_me_h[service->srv_me_active]);
86                 service->srv_me_h[service->srv_me_active] = 0;
87
88                 if (rc != PTL_OK) {
89                         CERROR("PtlMEUnlink failed - DROPPING soon: %d\n", rc);
90                         return rc;
91                 }
92
93                 service->srv_me_active = NEXT_INDEX(service->srv_me_active,
94                         service->srv_ring_length);
95
96                 if (service->srv_me_h[service->srv_me_active] == 0)
97                         CERROR("All %d ring ME's are unlinked!\n",
98                                 service->srv_ring_length);
99
100         }
101
102         if (ev->type == PTL_EVENT_PUT) {
103                 wake_up(service->srv_wait_queue);
104         } else {
105                 CERROR("Unexpected event type: %d\n", ev->type);
106         }
107
108         return 0;
109 }
110
111 static int bulk_source_callback(ptl_event_t *ev, void *data)
112 {
113         struct ptlrpc_request *rpc = ev->mem_desc.user_ptr;
114
115         ENTRY;
116
117         if (ev->type == PTL_EVENT_SENT) {
118                 ;
119         } else if (ev->type == PTL_EVENT_ACK) {
120                 wake_up_interruptible(&rpc->rq_wait_for_bulk);
121         } else {
122                 CERROR("Unexpected event type!\n");
123         }
124
125         EXIT;
126         return 1;
127 }
128
129 static int bulk_sink_callback(ptl_event_t *ev, void *data)
130 {
131         struct ptlrpc_request *rpc = ev->mem_desc.user_ptr;
132
133         ENTRY;
134
135         if (ev->type == PTL_EVENT_PUT) {
136                 if (rpc->rq_bulkbuf != ev->mem_desc.start + ev->offset)
137                         CERROR("bulkbuf != mem_desc -- why?\n");
138                 wake_up_interruptible(&rpc->rq_wait_for_bulk);
139         } else {
140                 CERROR("Unexpected event type!\n");
141         }
142
143         EXIT;
144         return 1;
145 }
146
147 int ptl_send_buf(struct ptlrpc_request *request, struct lustre_peer *peer,
148                  int portal, int is_request)
149 {
150         int rc;
151         ptl_process_id_t remote_id;
152         ptl_handle_md_t md_h;
153
154         /* FIXME: This is bad. */
155         if (request->rq_bulklen) {
156                 request->rq_req_md.start = request->rq_bulkbuf;
157                 request->rq_req_md.length = request->rq_bulklen;
158                 request->rq_req_md.eventq = bulk_source_eq;
159         } else if (is_request) {
160                 request->rq_req_md.start = request->rq_reqbuf;
161                 request->rq_req_md.length = request->rq_reqlen;
162                 request->rq_req_md.eventq = req_eq;
163         } else {
164                 request->rq_req_md.start = request->rq_repbuf;
165                 request->rq_req_md.length = request->rq_replen;
166                 request->rq_req_md.eventq = req_eq;
167         }
168         request->rq_req_md.threshold = 1;
169         request->rq_req_md.options = PTL_MD_OP_PUT;
170         request->rq_req_md.user_ptr = request;
171
172         rc = PtlMDBind(peer->peer_ni, request->rq_req_md, &md_h);
173         if (rc != 0) {
174                 CERROR("PtlMDBind failed: %d\n", rc);
175                 return rc;
176         }
177
178         remote_id.addr_kind = PTL_ADDR_NID;
179         remote_id.nid = peer->peer_nid;
180         remote_id.pid = 0;
181
182         if (request->rq_bulklen) {
183                 rc = PtlPut(md_h, PTL_ACK_REQ, remote_id, portal, 0,
184                             request->rq_xid, 0, 0);
185         } else {
186                 rc = PtlPut(md_h, PTL_NOACK_REQ, remote_id, portal, 0,
187                             request->rq_xid, 0, 0);
188         }
189         if (rc != PTL_OK) {
190                 CERROR("PtlPut failed: %d\n", rc);
191                 /* FIXME: tear down md */
192         }
193
194         return rc;
195 }
196
197 int ptl_send_rpc(struct ptlrpc_request *request, struct lustre_peer *peer)
198 {
199         ptl_handle_me_t me_h, bulk_me_h;
200         ptl_process_id_t local_id;
201         int rc;
202
203         ENTRY;
204
205         if (request->rq_replen == 0) {
206                 CERROR("request->rq_replen is 0!\n");
207                 EXIT;
208                 return -EINVAL;
209         }
210
211         OBD_ALLOC(request->rq_repbuf, request->rq_replen);
212         if (!request->rq_repbuf) { 
213                 EXIT;
214                 return -ENOMEM;
215         }
216
217         local_id.addr_kind = PTL_ADDR_GID;
218         local_id.gid = PTL_ID_ANY;
219         local_id.rid = PTL_ID_ANY;
220
221         rc = PtlMEAttach(peer->peer_ni, request->rq_reply_portal, local_id,
222                          request->rq_xid, 0, PTL_UNLINK, &me_h);
223         if (rc != PTL_OK) {
224                 EXIT;
225                 /* FIXME: tear down EQ, free reqbuf */
226                 return rc;
227         }
228
229         request->rq_reply_md.start = request->rq_repbuf;
230         request->rq_reply_md.length = request->rq_replen;
231         request->rq_reply_md.threshold = 1;
232         request->rq_reply_md.options = PTL_MD_OP_PUT;
233         request->rq_reply_md.user_ptr = request;
234         request->rq_reply_md.eventq = req_eq;
235
236         rc = PtlMDAttach(me_h, request->rq_reply_md, PTL_UNLINK,
237                          &request->rq_reply_md_h);
238         if (rc != PTL_OK) {
239                 EXIT;
240                 return rc;
241         }
242
243         if (request->rq_bulklen != 0) {
244                 rc = PtlMEAttach(peer->peer_ni, request->rq_bulk_portal,
245                                  local_id, request->rq_xid, 0, PTL_UNLINK,
246                                  &bulk_me_h);
247                 if (rc != PTL_OK) {
248                         EXIT;
249                         return rc;
250                 }
251
252                 request->rq_bulk_md.start = request->rq_bulkbuf;
253                 request->rq_bulk_md.length = request->rq_bulklen;
254                 request->rq_bulk_md.threshold = 1;
255                 request->rq_bulk_md.options = PTL_MD_OP_PUT;
256                 request->rq_bulk_md.user_ptr = request;
257                 request->rq_bulk_md.eventq = bulk_sink_eq;
258
259                 rc = PtlMDAttach(bulk_me_h, request->rq_bulk_md, PTL_UNLINK,
260                                  &request->rq_bulk_md_h);
261                 if (rc != PTL_OK) {
262                         EXIT;
263                         return rc;
264                 }
265         }
266
267         return ptl_send_buf(request, peer, request->rq_req_portal, 1);
268 }
269
270 /* ptl_received_rpc() should be called by the sleeping process once
271  * it finishes processing an event.  This ensures the ref count is
272  * decremented and that the rpc ring buffer cycles properly.
273  */ 
274 int ptl_received_rpc(struct ptlrpc_service *service) {
275         int rc, index;
276
277         index = service->srv_md_active;
278         CDEBUG(D_INFO, "MD index=%d Ref Count=%d\n", index,
279                 service->srv_ref_count[index]);
280         service->srv_ref_count[index]--;
281
282         if ((service->srv_ref_count[index] <= 0) &&
283             (service->srv_me_h[index] == 0)) {
284
285                 /* Replace the unlinked ME and MD */
286                 rc = PtlMEInsert(service->srv_me_h[service->srv_me_tail],
287                         service->srv_id, 0, ~0, PTL_RETAIN,
288                         PTL_INS_AFTER, &(service->srv_me_h[index]));
289                 CDEBUG(D_INFO, "Inserting new ME and MD in ring, rc %d\n", rc);
290                 service->srv_me_tail = index;
291                 service->srv_ref_count[index] = 0;
292                 
293                 if (rc != PTL_OK) {
294                         CERROR("PtlMEInsert failed: %d\n", rc);
295                         return rc;
296                 }
297
298                 service->srv_md[index].start        = service->srv_buf[index];
299                 service->srv_md[index].length       = service->srv_buf_size;
300                 service->srv_md[index].threshold    = PTL_MD_THRESH_INF;
301                 service->srv_md[index].options      = PTL_MD_OP_PUT;
302                 service->srv_md[index].user_ptr     = service;
303                 service->srv_md[index].eventq       = service->srv_eq_h;
304
305                 rc = PtlMDAttach(service->srv_me_h[index], service->srv_md[index],
306                         PTL_RETAIN, &(service->srv_md_h[index]));
307
308                 CDEBUG(D_INFO, "Attach MD in ring, rc %d\n", rc);
309                 if (rc != PTL_OK) {
310                         /* cleanup */
311                         CERROR("PtlMDAttach failed: %d\n", rc);
312                         return rc;
313                 }
314
315                 service->srv_md_active = NEXT_INDEX(index,
316                         service->srv_ring_length);
317         } 
318         
319         return 0;
320 }
321
322 int rpc_register_service(struct ptlrpc_service *service, char *uuid)
323 {
324         struct lustre_peer peer;
325         int rc, i;
326
327         rc = kportal_uuid_to_peer(uuid, &peer);
328         if (rc != 0) {
329                 CERROR("Invalid uuid \"%s\"\n", uuid);
330                 return -EINVAL;
331         }
332
333         service->srv_ring_length = RPC_RING_LENGTH;
334         service->srv_me_active = 0;
335         service->srv_md_active = 0;
336
337         service->srv_id.addr_kind = PTL_ADDR_GID;
338         service->srv_id.gid = PTL_ID_ANY;
339         service->srv_id.rid = PTL_ID_ANY;
340
341         rc = PtlEQAlloc(peer.peer_ni, 128, server_request_callback,
342                 service, &(service->srv_eq_h));
343
344         if (rc != PTL_OK) {
345                 CERROR("PtlEQAlloc failed: %d\n", rc);
346                 return rc;
347         }
348
349         /* Attach the leading ME on which we build the ring */
350         rc = PtlMEAttach(peer.peer_ni, service->srv_portal,
351                 service->srv_id, 0, ~0, PTL_RETAIN,
352                 &(service->srv_me_h[0]));
353
354         if (rc != PTL_OK) {
355                 CERROR("PtlMEAttach failed: %d\n", rc);
356                 return rc;
357         }
358
359         for (i = 0; i < service->srv_ring_length; i++) {
360                 OBD_ALLOC(service->srv_buf[i], service->srv_buf_size);                
361
362                 if (service->srv_buf[i] == NULL) {
363                         CERROR("no memory\n");
364                         return -ENOMEM;
365                 }
366
367                 /* Insert additional ME's to the ring */
368                 if (i > 0) {
369                         rc = PtlMEInsert(service->srv_me_h[i-1],
370                                 service->srv_id, 0, ~0, PTL_RETAIN,
371                                 PTL_INS_AFTER, &(service->srv_me_h[i]));
372                         service->srv_me_tail = i;
373
374                         if (rc != PTL_OK) {
375                                 CERROR("PtlMEInsert failed: %d\n", rc);
376                                 return rc;
377                         }
378                 }
379
380                 service->srv_ref_count[i] = 0;
381                 service->srv_md[i].start        = service->srv_buf[i];
382                 service->srv_md[i].length       = service->srv_buf_size;
383                 service->srv_md[i].threshold    = PTL_MD_THRESH_INF;
384                 service->srv_md[i].options      = PTL_MD_OP_PUT;
385                 service->srv_md[i].user_ptr     = service;
386                 service->srv_md[i].eventq       = service->srv_eq_h;
387
388                 rc = PtlMDAttach(service->srv_me_h[i], service->srv_md[i],
389                         PTL_RETAIN, &(service->srv_md_h[i]));
390
391                 if (rc != PTL_OK) {
392                         /* cleanup */
393                         CERROR("PtlMDAttach failed: %d\n", rc);
394                         return rc;
395                 }
396         }
397
398         return 0;
399 }
400
401 int rpc_unregister_service(struct ptlrpc_service *service)
402 {
403         int rc, i;
404
405         for (i = 0; i < service->srv_ring_length; i++) {
406                 rc = PtlMDUnlink(service->srv_md_h[i]);
407                 if (rc)
408                         CERROR("PtlMDUnlink failed: %d\n", rc);
409         
410                 rc = PtlMEUnlink(service->srv_me_h[i]);
411                 if (rc)
412                         CERROR("PtlMEUnlink failed: %d\n", rc);
413         
414                 OBD_FREE(service->srv_buf[i], service->srv_buf_size);           
415         }
416
417         rc = PtlEQFree(service->srv_eq_h);
418         if (rc)
419                 CERROR("PtlEQFree failed: %d\n", rc);
420
421         return 0;
422 }
423
424 static int req_init_portals(void)
425 {
426         int rc;
427         const ptl_handle_ni_t *nip;
428         ptl_handle_ni_t ni;
429
430         nip = inter_module_get_request(LUSTRE_NAL "_ni", LUSTRE_NAL);
431         if (nip == NULL) {
432                 CERROR("get_ni failed: is the NAL module loaded?\n");
433                 return -EIO;
434         }
435         ni = *nip;
436
437         rc = PtlEQAlloc(ni, 128, client_packet_callback, NULL, &req_eq);
438         if (rc != PTL_OK)
439                 CERROR("PtlEQAlloc failed: %d\n", rc);
440
441         rc = PtlEQAlloc(ni, 128, bulk_source_callback, NULL, &bulk_source_eq);
442         if (rc != PTL_OK)
443                 CERROR("PtlEQAlloc failed: %d\n", rc);
444
445         rc = PtlEQAlloc(ni, 128, bulk_sink_callback, NULL, &bulk_sink_eq);
446         if (rc != PTL_OK)
447                 CERROR("PtlEQAlloc failed: %d\n", rc);
448
449         return rc;
450 }
451
452 static int __init ptlrpc_init(void)
453 {
454         return req_init_portals();
455 }
456
457 static void __exit ptlrpc_exit(void)
458 {
459         PtlEQFree(req_eq);
460         PtlEQFree(bulk_source_eq);
461         PtlEQFree(bulk_sink_eq);
462
463         inter_module_put(LUSTRE_NAL "_ni");
464
465         return;
466 }
467
468 MODULE_AUTHOR("Peter J. Braam <braam@clusterfs.com>");
469 MODULE_DESCRIPTION("Lustre Request Processor v1.0");
470 MODULE_LICENSE("GPL"); 
471
472 module_init(ptlrpc_init);
473 module_exit(ptlrpc_exit);