1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2002 Cluster File Systems, Inc.
6 * This file is part of Lustre, http://www.lustre.org.
8 * Lustre is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Lustre is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Lustre; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 #include <linux/config.h>
26 #include <linux/module.h>
27 #include <linux/kernel.h>
29 #define DEBUG_SUBSYSTEM S_RPC
31 #include <linux/obd_support.h>
32 #include <linux/lustre_net.h>
34 static ptl_handle_eq_t req_eq, bulk_source_eq, bulk_sink_eq;
37 * 1. Free the request buffer after it has gone out on the wire
38 * 2. Wake up the thread waiting for the reply once it comes in.
40 static int client_packet_callback(ptl_event_t *ev, void *data)
42 struct ptlrpc_request *rpc = ev->mem_desc.user_ptr;
45 // XXX make sure we understand all events, including ACK's
47 if (ev->type == PTL_EVENT_SENT) {
48 OBD_FREE(ev->mem_desc.start, ev->mem_desc.length);
49 } else if (ev->type == PTL_EVENT_PUT) {
50 rpc->rq_repbuf = ev->mem_desc.start + ev->offset;
51 wake_up_interruptible(&rpc->rq_wait_for_rep);
58 static int server_request_callback(ptl_event_t *ev, void *data)
60 struct ptlrpc_service *service = data;
63 if (ev->rlength != ev->mlength)
64 CERROR("Warning: Possibly truncated rpc (%d/%d)\n",
65 ev->mlength, ev->rlength);
67 /* The ME is unlinked when there is less than 1024 bytes free
68 * on its MD. This ensures we are always able to handle the rpc,
69 * although the 1024 value is a guess as to the size of a
70 * large rpc (the known safe margin should be determined).
72 * NOTE: The portals API by default unlinks all MD's associated
73 * with an ME when it's unlinked. For now, this behavior
74 * has been commented out of the portals library so the
75 * MD can be unlinked when its ref count drops to zero.
76 * A new MD and ME will then be created that use the same
77 * kmalloc()'ed memory and inserted at the ring tail.
80 service->srv_ref_count[service->srv_md_active]++;
82 if (ev->offset >= (service->srv_buf_size - 1024)) {
83 CDEBUG(D_INODE, "Unlinking ME %d\n", service->srv_me_active);
85 rc = PtlMEUnlink(service->srv_me_h[service->srv_me_active]);
86 service->srv_me_h[service->srv_me_active] = 0;
89 CERROR("PtlMEUnlink failed - DROPPING soon: %d\n", rc);
93 service->srv_me_active = NEXT_INDEX(service->srv_me_active,
94 service->srv_ring_length);
96 if (service->srv_me_h[service->srv_me_active] == 0)
97 CERROR("All %d ring ME's are unlinked!\n",
98 service->srv_ring_length);
102 if (ev->type == PTL_EVENT_PUT) {
103 wake_up(service->srv_wait_queue);
105 CERROR("Unexpected event type: %d\n", ev->type);
111 static int bulk_source_callback(ptl_event_t *ev, void *data)
113 struct ptlrpc_request *rpc = ev->mem_desc.user_ptr;
117 if (ev->type == PTL_EVENT_SENT) {
119 } else if (ev->type == PTL_EVENT_ACK) {
120 wake_up_interruptible(&rpc->rq_wait_for_bulk);
122 CERROR("Unexpected event type!\n");
129 static int bulk_sink_callback(ptl_event_t *ev, void *data)
131 struct ptlrpc_request *rpc = ev->mem_desc.user_ptr;
135 if (ev->type == PTL_EVENT_PUT) {
136 if (rpc->rq_bulkbuf != ev->mem_desc.start + ev->offset)
137 CERROR("bulkbuf != mem_desc -- why?\n");
138 wake_up_interruptible(&rpc->rq_wait_for_bulk);
140 CERROR("Unexpected event type!\n");
147 int ptl_send_buf(struct ptlrpc_request *request, struct lustre_peer *peer,
148 int portal, int is_request)
151 ptl_process_id_t remote_id;
152 ptl_handle_md_t md_h;
154 /* FIXME: This is bad. */
155 if (request->rq_bulklen) {
156 request->rq_req_md.start = request->rq_bulkbuf;
157 request->rq_req_md.length = request->rq_bulklen;
158 request->rq_req_md.eventq = bulk_source_eq;
159 } else if (is_request) {
160 request->rq_req_md.start = request->rq_reqbuf;
161 request->rq_req_md.length = request->rq_reqlen;
162 request->rq_req_md.eventq = req_eq;
164 request->rq_req_md.start = request->rq_repbuf;
165 request->rq_req_md.length = request->rq_replen;
166 request->rq_req_md.eventq = req_eq;
168 request->rq_req_md.threshold = 1;
169 request->rq_req_md.options = PTL_MD_OP_PUT;
170 request->rq_req_md.user_ptr = request;
172 rc = PtlMDBind(peer->peer_ni, request->rq_req_md, &md_h);
174 CERROR("PtlMDBind failed: %d\n", rc);
178 remote_id.addr_kind = PTL_ADDR_NID;
179 remote_id.nid = peer->peer_nid;
182 if (request->rq_bulklen) {
183 rc = PtlPut(md_h, PTL_ACK_REQ, remote_id, portal, 0,
184 request->rq_xid, 0, 0);
186 rc = PtlPut(md_h, PTL_NOACK_REQ, remote_id, portal, 0,
187 request->rq_xid, 0, 0);
190 CERROR("PtlPut failed: %d\n", rc);
191 /* FIXME: tear down md */
197 int ptl_send_rpc(struct ptlrpc_request *request, struct lustre_peer *peer)
199 ptl_handle_me_t me_h, bulk_me_h;
200 ptl_process_id_t local_id;
205 if (request->rq_replen == 0) {
206 CERROR("request->rq_replen is 0!\n");
211 OBD_ALLOC(request->rq_repbuf, request->rq_replen);
212 if (!request->rq_repbuf) {
217 local_id.addr_kind = PTL_ADDR_GID;
218 local_id.gid = PTL_ID_ANY;
219 local_id.rid = PTL_ID_ANY;
221 rc = PtlMEAttach(peer->peer_ni, request->rq_reply_portal, local_id,
222 request->rq_xid, 0, PTL_UNLINK, &me_h);
225 /* FIXME: tear down EQ, free reqbuf */
229 request->rq_reply_md.start = request->rq_repbuf;
230 request->rq_reply_md.length = request->rq_replen;
231 request->rq_reply_md.threshold = 1;
232 request->rq_reply_md.options = PTL_MD_OP_PUT;
233 request->rq_reply_md.user_ptr = request;
234 request->rq_reply_md.eventq = req_eq;
236 rc = PtlMDAttach(me_h, request->rq_reply_md, PTL_UNLINK,
237 &request->rq_reply_md_h);
243 if (request->rq_bulklen != 0) {
244 rc = PtlMEAttach(peer->peer_ni, request->rq_bulk_portal,
245 local_id, request->rq_xid, 0, PTL_UNLINK,
252 request->rq_bulk_md.start = request->rq_bulkbuf;
253 request->rq_bulk_md.length = request->rq_bulklen;
254 request->rq_bulk_md.threshold = 1;
255 request->rq_bulk_md.options = PTL_MD_OP_PUT;
256 request->rq_bulk_md.user_ptr = request;
257 request->rq_bulk_md.eventq = bulk_sink_eq;
259 rc = PtlMDAttach(bulk_me_h, request->rq_bulk_md, PTL_UNLINK,
260 &request->rq_bulk_md_h);
267 return ptl_send_buf(request, peer, request->rq_req_portal, 1);
270 /* ptl_received_rpc() should be called by the sleeping process once
271 * it finishes processing an event. This ensures the ref count is
272 * decremented and that the rpc ring buffer cycles properly.
274 int ptl_received_rpc(struct ptlrpc_service *service) {
277 index = service->srv_md_active;
278 CDEBUG(D_INFO, "MD index=%d Ref Count=%d\n", index,
279 service->srv_ref_count[index]);
280 service->srv_ref_count[index]--;
282 if ((service->srv_ref_count[index] <= 0) &&
283 (service->srv_me_h[index] == 0)) {
285 /* Replace the unlinked ME and MD */
286 rc = PtlMEInsert(service->srv_me_h[service->srv_me_tail],
287 service->srv_id, 0, ~0, PTL_RETAIN,
288 PTL_INS_AFTER, &(service->srv_me_h[index]));
289 CDEBUG(D_INFO, "Inserting new ME and MD in ring, rc %d\n", rc);
290 service->srv_me_tail = index;
291 service->srv_ref_count[index] = 0;
294 CERROR("PtlMEInsert failed: %d\n", rc);
298 service->srv_md[index].start = service->srv_buf[index];
299 service->srv_md[index].length = service->srv_buf_size;
300 service->srv_md[index].threshold = PTL_MD_THRESH_INF;
301 service->srv_md[index].options = PTL_MD_OP_PUT;
302 service->srv_md[index].user_ptr = service;
303 service->srv_md[index].eventq = service->srv_eq_h;
305 rc = PtlMDAttach(service->srv_me_h[index], service->srv_md[index],
306 PTL_RETAIN, &(service->srv_md_h[index]));
308 CDEBUG(D_INFO, "Attach MD in ring, rc %d\n", rc);
311 CERROR("PtlMDAttach failed: %d\n", rc);
315 service->srv_md_active = NEXT_INDEX(index,
316 service->srv_ring_length);
322 int rpc_register_service(struct ptlrpc_service *service, char *uuid)
324 struct lustre_peer peer;
327 rc = kportal_uuid_to_peer(uuid, &peer);
329 CERROR("Invalid uuid \"%s\"\n", uuid);
333 service->srv_ring_length = RPC_RING_LENGTH;
334 service->srv_me_active = 0;
335 service->srv_md_active = 0;
337 service->srv_id.addr_kind = PTL_ADDR_GID;
338 service->srv_id.gid = PTL_ID_ANY;
339 service->srv_id.rid = PTL_ID_ANY;
341 rc = PtlEQAlloc(peer.peer_ni, 128, server_request_callback,
342 service, &(service->srv_eq_h));
345 CERROR("PtlEQAlloc failed: %d\n", rc);
349 /* Attach the leading ME on which we build the ring */
350 rc = PtlMEAttach(peer.peer_ni, service->srv_portal,
351 service->srv_id, 0, ~0, PTL_RETAIN,
352 &(service->srv_me_h[0]));
355 CERROR("PtlMEAttach failed: %d\n", rc);
359 for (i = 0; i < service->srv_ring_length; i++) {
360 OBD_ALLOC(service->srv_buf[i], service->srv_buf_size);
362 if (service->srv_buf[i] == NULL) {
363 CERROR("no memory\n");
367 /* Insert additional ME's to the ring */
369 rc = PtlMEInsert(service->srv_me_h[i-1],
370 service->srv_id, 0, ~0, PTL_RETAIN,
371 PTL_INS_AFTER, &(service->srv_me_h[i]));
372 service->srv_me_tail = i;
375 CERROR("PtlMEInsert failed: %d\n", rc);
380 service->srv_ref_count[i] = 0;
381 service->srv_md[i].start = service->srv_buf[i];
382 service->srv_md[i].length = service->srv_buf_size;
383 service->srv_md[i].threshold = PTL_MD_THRESH_INF;
384 service->srv_md[i].options = PTL_MD_OP_PUT;
385 service->srv_md[i].user_ptr = service;
386 service->srv_md[i].eventq = service->srv_eq_h;
388 rc = PtlMDAttach(service->srv_me_h[i], service->srv_md[i],
389 PTL_RETAIN, &(service->srv_md_h[i]));
393 CERROR("PtlMDAttach failed: %d\n", rc);
401 int rpc_unregister_service(struct ptlrpc_service *service)
405 for (i = 0; i < service->srv_ring_length; i++) {
406 rc = PtlMDUnlink(service->srv_md_h[i]);
408 CERROR("PtlMDUnlink failed: %d\n", rc);
410 rc = PtlMEUnlink(service->srv_me_h[i]);
412 CERROR("PtlMEUnlink failed: %d\n", rc);
414 OBD_FREE(service->srv_buf[i], service->srv_buf_size);
417 rc = PtlEQFree(service->srv_eq_h);
419 CERROR("PtlEQFree failed: %d\n", rc);
424 static int req_init_portals(void)
427 const ptl_handle_ni_t *nip;
430 nip = inter_module_get_request(LUSTRE_NAL "_ni", LUSTRE_NAL);
432 CERROR("get_ni failed: is the NAL module loaded?\n");
437 rc = PtlEQAlloc(ni, 128, client_packet_callback, NULL, &req_eq);
439 CERROR("PtlEQAlloc failed: %d\n", rc);
441 rc = PtlEQAlloc(ni, 128, bulk_source_callback, NULL, &bulk_source_eq);
443 CERROR("PtlEQAlloc failed: %d\n", rc);
445 rc = PtlEQAlloc(ni, 128, bulk_sink_callback, NULL, &bulk_sink_eq);
447 CERROR("PtlEQAlloc failed: %d\n", rc);
452 static int __init ptlrpc_init(void)
454 return req_init_portals();
457 static void __exit ptlrpc_exit(void)
460 PtlEQFree(bulk_source_eq);
461 PtlEQFree(bulk_sink_eq);
463 inter_module_put(LUSTRE_NAL "_ni");
468 MODULE_AUTHOR("Peter J. Braam <braam@clusterfs.com>");
469 MODULE_DESCRIPTION("Lustre Request Processor v1.0");
470 MODULE_LICENSE("GPL");
472 module_init(ptlrpc_init);
473 module_exit(ptlrpc_exit);