1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2002 Cluster File Systems, Inc.
6 * This file is part of Lustre, http://www.lustre.org.
8 * Lustre is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Lustre is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Lustre; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 #include <linux/config.h>
26 #include <linux/module.h>
27 #include <linux/kernel.h>
29 #define DEBUG_SUBSYSTEM S_RPC
31 #include <linux/obd_support.h>
32 #include <linux/lustre_net.h>
34 static ptl_handle_eq_t req_eq, bulk_source_eq, bulk_sink_eq;
37 * 1. Free the request buffer after it has gone out on the wire
38 * 2. Wake up the thread waiting for the reply once it comes in.
40 static int request_callback(ptl_event_t *ev, void *data)
42 struct ptlrpc_request *rpc = ev->mem_desc.user_ptr;
46 if (ev->type == PTL_EVENT_SENT) {
47 OBD_FREE(ev->mem_desc.start, ev->mem_desc.length);
48 } else if (ev->type == PTL_EVENT_PUT) {
49 rpc->rq_repbuf = ev->mem_desc.start + ev->offset;
50 wake_up_interruptible(&rpc->rq_wait_for_rep);
57 static int incoming_callback(ptl_event_t *ev, void *data)
59 struct ptlrpc_service *service = data;
62 if (ev->rlength != ev->mlength)
63 printk("Warning: Possibly truncated rpc (%d/%d)\n",
64 ev->mlength, ev->rlength);
66 /* The ME is unlinked when there is less than 1024 bytes free
67 * on its MD. This ensures we are always able to handle the rpc,
68 * although the 1024 value is a guess as to the size of a
69 * large rpc (the known safe margin should be determined).
71 * NOTE: The portals API by default unlinks all MD's associated
72 * with an ME when it's unlinked. For now, this behavior
73 * has been commented out of the portals library so the
74 * MD can be unlinked when its ref count drops to zero.
75 * A new MD and ME will then be created that use the same
76 * kmalloc()'ed memory and inserted at the ring tail.
79 service->srv_ref_count[service->srv_md_active]++;
81 if (ev->offset >= (service->srv_buf_size - 1024)) {
82 printk("Unlinking ME %d\n", service->srv_me_active);
84 rc = PtlMEUnlink(service->srv_me_h[service->srv_me_active]);
85 service->srv_me_h[service->srv_me_active] = 0;
88 printk("PtlMEUnlink failed: %d\n", rc);
92 service->srv_me_active = NEXT_INDEX(service->srv_me_active,
93 service->srv_ring_length);
95 if (service->srv_me_h[service->srv_me_active] == 0)
96 printk("All %d ring ME's are unlinked!\n",
97 service->srv_ring_length);
101 if (ev->type == PTL_EVENT_PUT) {
102 wake_up(service->srv_wait_queue);
104 printk("Unexpected event type: %d\n", ev->type);
110 static int bulk_source_callback(ptl_event_t *ev, void *data)
112 struct ptlrpc_request *rpc = ev->mem_desc.user_ptr;
116 if (ev->type == PTL_EVENT_SENT) {
118 } else if (ev->type == PTL_EVENT_ACK) {
119 wake_up_interruptible(&rpc->rq_wait_for_bulk);
121 printk("Unexpected event type in " __FUNCTION__ "!\n");
128 static int bulk_sink_callback(ptl_event_t *ev, void *data)
130 struct ptlrpc_request *rpc = ev->mem_desc.user_ptr;
134 if (ev->type == PTL_EVENT_PUT) {
135 if (rpc->rq_bulkbuf != ev->mem_desc.start + ev->offset)
136 printk(__FUNCTION__ ": bulkbuf != mem_desc -- why?\n");
137 wake_up_interruptible(&rpc->rq_wait_for_bulk);
139 printk("Unexpected event type in " __FUNCTION__ "!\n");
146 int ptl_send_buf(struct ptlrpc_request *request, struct lustre_peer *peer,
147 int portal, int is_request)
150 ptl_process_id_t remote_id;
151 ptl_handle_md_t md_h;
153 /* FIXME: This is bad. */
154 if (request->rq_bulklen) {
155 request->rq_req_md.start = request->rq_bulkbuf;
156 request->rq_req_md.length = request->rq_bulklen;
157 request->rq_req_md.eventq = bulk_source_eq;
158 } else if (is_request) {
159 request->rq_req_md.start = request->rq_reqbuf;
160 request->rq_req_md.length = request->rq_reqlen;
161 request->rq_req_md.eventq = req_eq;
163 request->rq_req_md.start = request->rq_repbuf;
164 request->rq_req_md.length = request->rq_replen;
165 request->rq_req_md.eventq = req_eq;
167 request->rq_req_md.threshold = 1;
168 request->rq_req_md.options = PTL_MD_OP_PUT;
169 request->rq_req_md.user_ptr = request;
171 rc = PtlMDBind(peer->peer_ni, request->rq_req_md, &md_h);
173 printk(__FUNCTION__ ": PtlMDBind failed: %d\n", rc);
177 remote_id.addr_kind = PTL_ADDR_NID;
178 remote_id.nid = peer->peer_nid;
181 if (request->rq_bulklen) {
182 rc = PtlPut(md_h, PTL_ACK_REQ, remote_id, portal, 0,
183 request->rq_xid, 0, 0);
185 rc = PtlPut(md_h, PTL_NOACK_REQ, remote_id, portal, 0,
186 request->rq_xid, 0, 0);
189 printk(__FUNCTION__ ": PtlPut failed: %d\n", rc);
190 /* FIXME: tear down md */
196 int ptl_send_rpc(struct ptlrpc_request *request, struct lustre_peer *peer)
198 ptl_handle_me_t me_h, bulk_me_h;
199 ptl_process_id_t local_id;
204 if (request->rq_replen == 0) {
205 printk(__FUNCTION__ ": request->rq_replen is 0!\n");
210 OBD_ALLOC(request->rq_repbuf, request->rq_replen);
211 if (!request->rq_repbuf) {
216 local_id.addr_kind = PTL_ADDR_GID;
217 local_id.gid = PTL_ID_ANY;
218 local_id.rid = PTL_ID_ANY;
220 rc = PtlMEAttach(peer->peer_ni, request->rq_reply_portal, local_id,
221 request->rq_xid, 0, PTL_UNLINK, &me_h);
224 /* FIXME: tear down EQ, free reqbuf */
228 request->rq_reply_md.start = request->rq_repbuf;
229 request->rq_reply_md.length = request->rq_replen;
230 request->rq_reply_md.threshold = 1;
231 request->rq_reply_md.options = PTL_MD_OP_PUT;
232 request->rq_reply_md.user_ptr = request;
233 request->rq_reply_md.eventq = req_eq;
235 rc = PtlMDAttach(me_h, request->rq_reply_md, PTL_UNLINK,
236 &request->rq_reply_md_h);
242 if (request->rq_bulklen != 0) {
243 rc = PtlMEAttach(peer->peer_ni, request->rq_bulk_portal,
244 local_id, request->rq_xid, 0, PTL_UNLINK,
251 request->rq_bulk_md.start = request->rq_bulkbuf;
252 request->rq_bulk_md.length = request->rq_bulklen;
253 request->rq_bulk_md.threshold = 1;
254 request->rq_bulk_md.options = PTL_MD_OP_PUT;
255 request->rq_bulk_md.user_ptr = request;
256 request->rq_bulk_md.eventq = bulk_sink_eq;
258 rc = PtlMDAttach(bulk_me_h, request->rq_bulk_md, PTL_UNLINK,
259 &request->rq_bulk_md_h);
266 return ptl_send_buf(request, peer, request->rq_req_portal, 1);
269 /* ptl_received_rpc() should be called by the sleeping process once
270 * it finishes processing an event. This ensures the ref count is
271 * decremented and that the rpc ring buffer cycles properly.
273 int ptl_received_rpc(struct ptlrpc_service *service) {
276 index = service->srv_md_active;
277 CDEBUG(D_INFO, "MD index=%d Ref Count=%d\n", index,
278 service->srv_ref_count[index]);
279 service->srv_ref_count[index]--;
281 if ((service->srv_ref_count[index] <= 0) &&
282 (service->srv_me_h[index] == 0)) {
284 rc = PtlMDUnlink(service->srv_md_h[index]);
285 CDEBUG(D_INFO, "Removing MD at index %d, rc %d\n", index, rc);
289 ": PtlMDUnlink failed: index %d rc %d\n",
292 /* Replace the unlinked ME and MD */
294 rc = PtlMEInsert(service->srv_me_h[service->srv_me_tail],
295 service->srv_id, 0, ~0, PTL_RETAIN,
296 PTL_INS_AFTER, &(service->srv_me_h[index]));
297 CDEBUG(D_INFO, "Inserting new ME and MD in ring, rc %d\n", rc);
298 service->srv_me_tail = index;
299 service->srv_ref_count[index] = 0;
302 printk("PtlMEInsert failed: %d\n", rc);
306 service->srv_md[index].start = service->srv_buf[index];
307 service->srv_md[index].length = service->srv_buf_size;
308 service->srv_md[index].threshold = PTL_MD_THRESH_INF;
309 service->srv_md[index].options = PTL_MD_OP_PUT;
310 service->srv_md[index].user_ptr = service;
311 service->srv_md[index].eventq = service->srv_eq_h;
313 rc = PtlMDAttach(service->srv_me_h[index], service->srv_md[index],
314 PTL_RETAIN, &(service->srv_md_h[index]));
316 CDEBUG(D_INFO, "Attach MD in ring, rc %d\n", rc);
319 printk("PtlMDAttach failed: %d\n", rc);
323 service->srv_md_active = NEXT_INDEX(index,
324 service->srv_ring_length);
330 int rpc_register_service(struct ptlrpc_service *service, char *uuid)
332 struct lustre_peer peer;
335 rc = kportal_uuid_to_peer(uuid, &peer);
337 printk("Invalid uuid \"%s\"\n", uuid);
341 service->srv_ring_length = RPC_RING_LENGTH;
342 service->srv_me_active = 0;
343 service->srv_md_active = 0;
345 service->srv_id.addr_kind = PTL_ADDR_GID;
346 service->srv_id.gid = PTL_ID_ANY;
347 service->srv_id.rid = PTL_ID_ANY;
349 rc = PtlEQAlloc(peer.peer_ni, 128, incoming_callback,
350 service, &(service->srv_eq_h));
353 printk("PtlEQAlloc failed: %d\n", rc);
357 /* Attach the leading ME on which we build the ring */
358 rc = PtlMEAttach(peer.peer_ni, service->srv_portal,
359 service->srv_id, 0, ~0, PTL_RETAIN,
360 &(service->srv_me_h[0]));
363 printk("PtlMEAttach failed: %d\n", rc);
367 for (i = 0; i < service->srv_ring_length; i++) {
368 OBD_ALLOC(service->srv_buf[i], service->srv_buf_size);
370 if (service->srv_buf[i] == NULL) {
371 printk(__FUNCTION__ ": no memory\n");
375 /* Insert additional ME's to the ring */
377 rc = PtlMEInsert(service->srv_me_h[i-1],
378 service->srv_id, 0, ~0, PTL_RETAIN,
379 PTL_INS_AFTER, &(service->srv_me_h[i]));
380 service->srv_me_tail = i;
383 printk("PtlMEInsert failed: %d\n", rc);
388 service->srv_ref_count[i] = 0;
389 service->srv_md[i].start = service->srv_buf[i];
390 service->srv_md[i].length = service->srv_buf_size;
391 service->srv_md[i].threshold = PTL_MD_THRESH_INF;
392 service->srv_md[i].options = PTL_MD_OP_PUT;
393 service->srv_md[i].user_ptr = service;
394 service->srv_md[i].eventq = service->srv_eq_h;
396 rc = PtlMDAttach(service->srv_me_h[i], service->srv_md[i],
397 PTL_RETAIN, &(service->srv_md_h[i]));
401 printk("PtlMDAttach failed: %d\n", rc);
409 int rpc_unregister_service(struct ptlrpc_service *service)
413 for (i = 0; i < service->srv_ring_length; i++) {
414 rc = PtlMDUnlink(service->srv_md_h[i]);
416 printk(__FUNCTION__ ": PtlMDUnlink failed: %d\n", rc);
418 rc = PtlMEUnlink(service->srv_me_h[i]);
420 printk(__FUNCTION__ ": PtlMEUnlink failed: %d\n", rc);
422 OBD_FREE(service->srv_buf[i], service->srv_buf_size);
425 rc = PtlEQFree(service->srv_eq_h);
427 printk(__FUNCTION__ ": PtlEQFree failed: %d\n", rc);
432 static int req_init_portals(void)
435 const ptl_handle_ni_t *nip;
438 nip = inter_module_get_request(LUSTRE_NAL "_ni", LUSTRE_NAL);
440 printk("get_ni failed: is the NAL module loaded?\n");
445 rc = PtlEQAlloc(ni, 128, request_callback, NULL, &req_eq);
447 printk("PtlEQAlloc failed: %d\n", rc);
449 rc = PtlEQAlloc(ni, 128, bulk_source_callback, NULL, &bulk_source_eq);
451 printk("PtlEQAlloc failed: %d\n", rc);
453 rc = PtlEQAlloc(ni, 128, bulk_sink_callback, NULL, &bulk_sink_eq);
455 printk("PtlEQAlloc failed: %d\n", rc);
460 static int __init ptlrpc_init(void)
462 return req_init_portals();
465 static void __exit ptlrpc_exit(void)
468 PtlEQFree(bulk_source_eq);
469 PtlEQFree(bulk_sink_eq);
471 inter_module_put(LUSTRE_NAL "_ni");
476 MODULE_AUTHOR("Peter J. Braam <braam@clusterfs.com>");
477 MODULE_DESCRIPTION("Lustre Request Processor v1.0");
478 MODULE_LICENSE("GPL");
480 module_init(ptlrpc_init);
481 module_exit(ptlrpc_exit);