Whamcloud - gitweb
- Added DEBUG_SUBSYSTEMs
[fs/lustre-release.git] / lustre / ptlrpc / rpc.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (C) 2002 Cluster File Systems, Inc.
5  *
6  *   This file is part of Lustre, http://www.lustre.org.
7  *
8  *   Lustre is free software; you can redistribute it and/or
9  *   modify it under the terms of version 2 of the GNU General Public
10  *   License as published by the Free Software Foundation.
11  *
12  *   Lustre is distributed in the hope that it will be useful,
13  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *   GNU General Public License for more details.
16  *
17  *   You should have received a copy of the GNU General Public License
18  *   along with Lustre; if not, write to the Free Software
19  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  *
21  */
22
23 #define EXPORT_SYMTAB
24
25 #include <linux/config.h>
26 #include <linux/module.h>
27 #include <linux/kernel.h>
28
29 #define DEBUG_SUBSYSTEM S_RPC
30
31 #include <linux/obd_support.h>
32 #include <linux/lustre_net.h>
33
34 static ptl_handle_eq_t req_eq, bulk_source_eq, bulk_sink_eq;
35
36 /*
37  * 1. Free the request buffer after it has gone out on the wire
38  * 2. Wake up the thread waiting for the reply once it comes in.
39  */
40 static int request_callback(ptl_event_t *ev, void *data)
41 {
42         struct ptlrpc_request *rpc = ev->mem_desc.user_ptr;
43
44         ENTRY;
45
46         if (ev->type == PTL_EVENT_SENT) {
47                 OBD_FREE(ev->mem_desc.start, ev->mem_desc.length);
48         } else if (ev->type == PTL_EVENT_PUT) {
49                 rpc->rq_repbuf = ev->mem_desc.start + ev->offset;
50                 wake_up_interruptible(&rpc->rq_wait_for_rep);
51         }
52
53         EXIT;
54         return 1;
55 }
56
57 static int incoming_callback(ptl_event_t *ev, void *data)
58 {
59         struct ptlrpc_service *service = data;
60         int rc;
61
62         if (ev->rlength != ev->mlength)
63                 printk("Warning: Possibly truncated rpc (%d/%d)\n",
64                         ev->mlength, ev->rlength);
65
66         /* The ME is unlinked when there is less than 1024 bytes free
67          * on its MD.  This ensures we are always able to handle the rpc, 
68          * although the 1024 value is a guess as to the size of a
69          * large rpc (the known safe margin should be determined).
70          *
71          * NOTE: The portals API by default unlinks all MD's associated
72          *       with an ME when it's unlinked.  For now, this behavior
73          *       has been commented out of the portals library so the
74          *       MD can be unlinked when its ref count drops to zero.
75          *       A new MD and ME will then be created that use the same
76          *       kmalloc()'ed memory and inserted at the ring tail.
77          */
78
79         service->srv_ref_count[service->srv_md_active]++;
80
81         if (ev->offset >= (service->srv_buf_size - 1024)) {
82                 printk("Unlinking ME %d\n", service->srv_me_active);
83
84                 rc = PtlMEUnlink(service->srv_me_h[service->srv_me_active]);
85                 service->srv_me_h[service->srv_me_active] = 0;
86
87                 if (rc != PTL_OK) {
88                         printk("PtlMEUnlink failed: %d\n", rc); 
89                         return rc;
90                 }
91
92                 service->srv_me_active = NEXT_INDEX(service->srv_me_active,
93                         service->srv_ring_length);
94
95                 if (service->srv_me_h[service->srv_me_active] == 0)
96                         printk("All %d ring ME's are unlinked!\n",
97                                 service->srv_ring_length);
98
99         }
100
101         if (ev->type == PTL_EVENT_PUT) {
102                 wake_up(service->srv_wait_queue);
103         } else {
104                 printk("Unexpected event type: %d\n", ev->type);
105         }
106
107         return 0;
108 }
109
110 static int bulk_source_callback(ptl_event_t *ev, void *data)
111 {
112         struct ptlrpc_request *rpc = ev->mem_desc.user_ptr;
113
114         ENTRY;
115
116         if (ev->type == PTL_EVENT_SENT) {
117                 ;
118         } else if (ev->type == PTL_EVENT_ACK) {
119                 wake_up_interruptible(&rpc->rq_wait_for_bulk);
120         } else {
121                 printk("Unexpected event type in " __FUNCTION__ "!\n");
122         }
123
124         EXIT;
125         return 1;
126 }
127
128 static int bulk_sink_callback(ptl_event_t *ev, void *data)
129 {
130         struct ptlrpc_request *rpc = ev->mem_desc.user_ptr;
131
132         ENTRY;
133
134         if (ev->type == PTL_EVENT_PUT) {
135                 if (rpc->rq_bulkbuf != ev->mem_desc.start + ev->offset)
136                         printk(__FUNCTION__ ": bulkbuf != mem_desc -- why?\n");
137                 wake_up_interruptible(&rpc->rq_wait_for_bulk);
138         } else {
139                 printk("Unexpected event type in " __FUNCTION__ "!\n");
140         }
141
142         EXIT;
143         return 1;
144 }
145
146 int ptl_send_buf(struct ptlrpc_request *request, struct lustre_peer *peer,
147                  int portal, int is_request)
148 {
149         int rc;
150         ptl_process_id_t remote_id;
151         ptl_handle_md_t md_h;
152
153         /* FIXME: This is bad. */
154         if (request->rq_bulklen) {
155                 request->rq_req_md.start = request->rq_bulkbuf;
156                 request->rq_req_md.length = request->rq_bulklen;
157                 request->rq_req_md.eventq = bulk_source_eq;
158         } else if (is_request) {
159                 request->rq_req_md.start = request->rq_reqbuf;
160                 request->rq_req_md.length = request->rq_reqlen;
161                 request->rq_req_md.eventq = req_eq;
162         } else {
163                 request->rq_req_md.start = request->rq_repbuf;
164                 request->rq_req_md.length = request->rq_replen;
165                 request->rq_req_md.eventq = req_eq;
166         }
167         request->rq_req_md.threshold = 1;
168         request->rq_req_md.options = PTL_MD_OP_PUT;
169         request->rq_req_md.user_ptr = request;
170
171         rc = PtlMDBind(peer->peer_ni, request->rq_req_md, &md_h);
172         if (rc != 0) {
173                 printk(__FUNCTION__ ": PtlMDBind failed: %d\n", rc);
174                 return rc;
175         }
176
177         remote_id.addr_kind = PTL_ADDR_NID;
178         remote_id.nid = peer->peer_nid;
179         remote_id.pid = 0;
180
181         if (request->rq_bulklen) {
182                 rc = PtlPut(md_h, PTL_ACK_REQ, remote_id, portal, 0,
183                             request->rq_xid, 0, 0);
184         } else {
185                 rc = PtlPut(md_h, PTL_NOACK_REQ, remote_id, portal, 0,
186                             request->rq_xid, 0, 0);
187         }
188         if (rc != PTL_OK) {
189                 printk(__FUNCTION__ ": PtlPut failed: %d\n", rc);
190                 /* FIXME: tear down md */
191         }
192
193         return rc;
194 }
195
196 int ptl_send_rpc(struct ptlrpc_request *request, struct lustre_peer *peer)
197 {
198         ptl_handle_me_t me_h, bulk_me_h;
199         ptl_process_id_t local_id;
200         int rc;
201
202         ENTRY;
203
204         if (request->rq_replen == 0) {
205                 printk(__FUNCTION__ ": request->rq_replen is 0!\n");
206                 EXIT;
207                 return -EINVAL;
208         }
209
210         OBD_ALLOC(request->rq_repbuf, request->rq_replen);
211         if (!request->rq_repbuf) { 
212                 EXIT;
213                 return -ENOMEM;
214         }
215
216         local_id.addr_kind = PTL_ADDR_GID;
217         local_id.gid = PTL_ID_ANY;
218         local_id.rid = PTL_ID_ANY;
219
220         rc = PtlMEAttach(peer->peer_ni, request->rq_reply_portal, local_id,
221                          request->rq_xid, 0, PTL_UNLINK, &me_h);
222         if (rc != PTL_OK) {
223                 EXIT;
224                 /* FIXME: tear down EQ, free reqbuf */
225                 return rc;
226         }
227
228         request->rq_reply_md.start = request->rq_repbuf;
229         request->rq_reply_md.length = request->rq_replen;
230         request->rq_reply_md.threshold = 1;
231         request->rq_reply_md.options = PTL_MD_OP_PUT;
232         request->rq_reply_md.user_ptr = request;
233         request->rq_reply_md.eventq = req_eq;
234
235         rc = PtlMDAttach(me_h, request->rq_reply_md, PTL_UNLINK,
236                          &request->rq_reply_md_h);
237         if (rc != PTL_OK) {
238                 EXIT;
239                 return rc;
240         }
241
242         if (request->rq_bulklen != 0) {
243                 rc = PtlMEAttach(peer->peer_ni, request->rq_bulk_portal,
244                                  local_id, request->rq_xid, 0, PTL_UNLINK,
245                                  &bulk_me_h);
246                 if (rc != PTL_OK) {
247                         EXIT;
248                         return rc;
249                 }
250
251                 request->rq_bulk_md.start = request->rq_bulkbuf;
252                 request->rq_bulk_md.length = request->rq_bulklen;
253                 request->rq_bulk_md.threshold = 1;
254                 request->rq_bulk_md.options = PTL_MD_OP_PUT;
255                 request->rq_bulk_md.user_ptr = request;
256                 request->rq_bulk_md.eventq = bulk_sink_eq;
257
258                 rc = PtlMDAttach(bulk_me_h, request->rq_bulk_md, PTL_UNLINK,
259                                  &request->rq_bulk_md_h);
260                 if (rc != PTL_OK) {
261                         EXIT;
262                         return rc;
263                 }
264         }
265
266         return ptl_send_buf(request, peer, request->rq_req_portal, 1);
267 }
268
269 /* ptl_received_rpc() should be called by the sleeping process once
270  * it finishes processing an event.  This ensures the ref count is
271  * decremented and that the rpc ring buffer cycles properly.
272  */ 
273 int ptl_received_rpc(struct ptlrpc_service *service) {
274         int rc, index;
275
276         index = service->srv_md_active;
277         CDEBUG(D_INFO, "MD index=%d Ref Count=%d\n", index,
278                 service->srv_ref_count[index]);
279         service->srv_ref_count[index]--;
280
281         if ((service->srv_ref_count[index] <= 0) &&
282             (service->srv_me_h[index] == 0)) {
283
284                 rc = PtlMDUnlink(service->srv_md_h[index]);
285                 CDEBUG(D_INFO, "Removing MD at index %d, rc %d\n", index, rc);
286
287                 if (rc)
288                         printk(__FUNCTION__ 
289                                ": PtlMDUnlink failed: index %d rc %d\n", 
290                                index, rc);
291
292                 /* Replace the unlinked ME and MD */
293
294                 rc = PtlMEInsert(service->srv_me_h[service->srv_me_tail],
295                         service->srv_id, 0, ~0, PTL_RETAIN,
296                         PTL_INS_AFTER, &(service->srv_me_h[index]));
297                 CDEBUG(D_INFO, "Inserting new ME and MD in ring, rc %d\n", rc);
298                 service->srv_me_tail = index;
299                 service->srv_ref_count[index] = 0;
300                 
301                 if (rc != PTL_OK) {
302                         printk("PtlMEInsert failed: %d\n", rc);
303                         return rc;
304                 }
305
306                 service->srv_md[index].start        = service->srv_buf[index];
307                 service->srv_md[index].length       = service->srv_buf_size;
308                 service->srv_md[index].threshold    = PTL_MD_THRESH_INF;
309                 service->srv_md[index].options      = PTL_MD_OP_PUT;
310                 service->srv_md[index].user_ptr     = service;
311                 service->srv_md[index].eventq       = service->srv_eq_h;
312
313                 rc = PtlMDAttach(service->srv_me_h[index], service->srv_md[index],
314                         PTL_RETAIN, &(service->srv_md_h[index]));
315
316                 CDEBUG(D_INFO, "Attach MD in ring, rc %d\n", rc);
317                 if (rc != PTL_OK) {
318                         /* cleanup */
319                         printk("PtlMDAttach failed: %d\n", rc);
320                         return rc;
321                 }
322
323                 service->srv_md_active = NEXT_INDEX(index,
324                         service->srv_ring_length);
325         } 
326         
327         return 0;
328 }
329
330 int rpc_register_service(struct ptlrpc_service *service, char *uuid)
331 {
332         struct lustre_peer peer;
333         int rc, i;
334
335         rc = kportal_uuid_to_peer(uuid, &peer);
336         if (rc != 0) {
337                 printk("Invalid uuid \"%s\"\n", uuid);
338                 return -EINVAL;
339         }
340
341         service->srv_ring_length = RPC_RING_LENGTH;
342         service->srv_me_active = 0;
343         service->srv_md_active = 0;
344
345         service->srv_id.addr_kind = PTL_ADDR_GID;
346         service->srv_id.gid = PTL_ID_ANY;
347         service->srv_id.rid = PTL_ID_ANY;
348
349         rc = PtlEQAlloc(peer.peer_ni, 128, incoming_callback,
350                 service, &(service->srv_eq_h));
351
352         if (rc != PTL_OK) {
353                 printk("PtlEQAlloc failed: %d\n", rc);
354                 return rc;
355         }
356
357         /* Attach the leading ME on which we build the ring */
358         rc = PtlMEAttach(peer.peer_ni, service->srv_portal,
359                 service->srv_id, 0, ~0, PTL_RETAIN,
360                 &(service->srv_me_h[0]));
361
362         if (rc != PTL_OK) {
363                 printk("PtlMEAttach failed: %d\n", rc);
364                 return rc;
365         }
366
367         for (i = 0; i < service->srv_ring_length; i++) {
368                 OBD_ALLOC(service->srv_buf[i], service->srv_buf_size);                
369
370                 if (service->srv_buf[i] == NULL) {
371                         printk(__FUNCTION__ ": no memory\n");
372                         return -ENOMEM;
373                 }
374
375                 /* Insert additional ME's to the ring */
376                 if (i > 0) {
377                         rc = PtlMEInsert(service->srv_me_h[i-1],
378                                 service->srv_id, 0, ~0, PTL_RETAIN,
379                                 PTL_INS_AFTER, &(service->srv_me_h[i]));
380                         service->srv_me_tail = i;
381
382                         if (rc != PTL_OK) {
383                                 printk("PtlMEInsert failed: %d\n", rc);
384                                 return rc;
385                         }
386                 }
387
388                 service->srv_ref_count[i] = 0;
389                 service->srv_md[i].start        = service->srv_buf[i];
390                 service->srv_md[i].length       = service->srv_buf_size;
391                 service->srv_md[i].threshold    = PTL_MD_THRESH_INF;
392                 service->srv_md[i].options      = PTL_MD_OP_PUT;
393                 service->srv_md[i].user_ptr     = service;
394                 service->srv_md[i].eventq       = service->srv_eq_h;
395
396                 rc = PtlMDAttach(service->srv_me_h[i], service->srv_md[i],
397                         PTL_RETAIN, &(service->srv_md_h[i]));
398
399                 if (rc != PTL_OK) {
400                         /* cleanup */
401                         printk("PtlMDAttach failed: %d\n", rc);
402                         return rc;
403                 }
404         }
405
406         return 0;
407 }
408
409 int rpc_unregister_service(struct ptlrpc_service *service)
410 {
411         int rc, i;
412
413         for (i = 0; i < service->srv_ring_length; i++) {
414                 rc = PtlMDUnlink(service->srv_md_h[i]);
415                 if (rc)
416                         printk(__FUNCTION__ ": PtlMDUnlink failed: %d\n", rc);
417         
418                 rc = PtlMEUnlink(service->srv_me_h[i]);
419                 if (rc)
420                         printk(__FUNCTION__ ": PtlMEUnlink failed: %d\n", rc);
421         
422                 OBD_FREE(service->srv_buf[i], service->srv_buf_size);           
423         }
424
425         rc = PtlEQFree(service->srv_eq_h);
426         if (rc)
427                 printk(__FUNCTION__ ": PtlEQFree failed: %d\n", rc);
428
429         return 0;
430 }
431
432 static int req_init_portals(void)
433 {
434         int rc;
435         const ptl_handle_ni_t *nip;
436         ptl_handle_ni_t ni;
437
438         nip = inter_module_get_request(LUSTRE_NAL "_ni", LUSTRE_NAL);
439         if (nip == NULL) {
440                 printk("get_ni failed: is the NAL module loaded?\n");
441                 return -EIO;
442         }
443         ni = *nip;
444
445         rc = PtlEQAlloc(ni, 128, request_callback, NULL, &req_eq);
446         if (rc != PTL_OK)
447                 printk("PtlEQAlloc failed: %d\n", rc);
448
449         rc = PtlEQAlloc(ni, 128, bulk_source_callback, NULL, &bulk_source_eq);
450         if (rc != PTL_OK)
451                 printk("PtlEQAlloc failed: %d\n", rc);
452
453         rc = PtlEQAlloc(ni, 128, bulk_sink_callback, NULL, &bulk_sink_eq);
454         if (rc != PTL_OK)
455                 printk("PtlEQAlloc failed: %d\n", rc);
456
457         return rc;
458 }
459
460 static int __init ptlrpc_init(void)
461 {
462         return req_init_portals();
463 }
464
465 static void __exit ptlrpc_exit(void)
466 {
467         PtlEQFree(req_eq);
468         PtlEQFree(bulk_source_eq);
469         PtlEQFree(bulk_sink_eq);
470
471         inter_module_put(LUSTRE_NAL "_ni");
472
473         return;
474 }
475
476 MODULE_AUTHOR("Peter J. Braam <braam@clusterfs.com>");
477 MODULE_DESCRIPTION("Lustre Request Processor v1.0");
478 MODULE_LICENSE("GPL"); 
479
480 module_init(ptlrpc_init);
481 module_exit(ptlrpc_exit);