Whamcloud - gitweb
reply message initialisation fix
[fs/lustre-release.git] / lustre / ptlrpc / niobuf.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (C) 2002 Cluster File Systems, Inc.
5  *
6  *   This file is part of Lustre, http://www.lustre.org.
7  *
8  *   Lustre is free software; you can redistribute it and/or
9  *   modify it under the terms of version 2 of the GNU General Public
10  *   License as published by the Free Software Foundation.
11  *
12  *   Lustre is distributed in the hope that it will be useful,
13  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *   GNU General Public License for more details.
16  *
17  *   You should have received a copy of the GNU General Public License
18  *   along with Lustre; if not, write to the Free Software
19  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  *
21  */
22
23 #define DEBUG_SUBSYSTEM S_RPC
24
25 #include <linux/obd_support.h>
26 #include <linux/lustre_net.h>
27 #include <linux/lustre_lib.h>
28
29 extern ptl_handle_eq_t request_out_eq, reply_in_eq, reply_out_eq,
30         bulk_source_eq, bulk_sink_eq;
31 static ptl_process_id_t local_id = {PTL_NID_ANY, PTL_PID_ANY};
32
33 static int ptl_send_buf(struct ptlrpc_request *request,
34                         struct ptlrpc_connection *conn, int portal)
35 {
36         int rc;
37         ptl_process_id_t remote_id;
38         ptl_handle_md_t md_h;
39         ptl_ack_req_t ack;
40
41         request->rq_req_md.user_ptr = request;
42
43         switch (request->rq_type) {
44         case PTL_RPC_TYPE_REQUEST:
45                 request->rq_req_md.start = request->rq_reqmsg;
46                 request->rq_req_md.length = request->rq_reqlen;
47                 request->rq_req_md.eventq = request_out_eq;
48                 request->rq_req_md.threshold = 1;
49                 ack = PTL_NOACK_REQ;
50                 break;
51         case PTL_RPC_TYPE_REPLY:
52                 request->rq_req_md.start = request->rq_repmsg;
53                 request->rq_req_md.length = request->rq_replen;
54                 request->rq_req_md.eventq = reply_out_eq;
55                 request->rq_req_md.threshold = 1;
56                 ack = PTL_NOACK_REQ;
57                 break;
58         default:
59                 LBUG();
60                 return -1; /* notreached */
61         }
62         request->rq_req_md.options = PTL_MD_OP_PUT;
63         request->rq_req_md.user_ptr = request;
64
65         rc = PtlMDBind(conn->c_peer.peer_ni, request->rq_req_md, &md_h);
66         //CERROR("MDBind (outgoing req/rep/bulk): %Lu\n", (__u64)md_h);
67         if (rc != 0) {
68                 CERROR("PtlMDBind failed: %d\n", rc);
69                 LBUG();
70                 return rc;
71         }
72
73         remote_id.nid = conn->c_peer.peer_nid;
74         remote_id.pid = 0;
75
76         CDEBUG(D_NET, "Sending %d bytes to portal %d, xid %Ld\n",
77                request->rq_req_md.length, portal, request->rq_xid);
78
79         rc = PtlPut(md_h, ack, remote_id, portal, 0, request->rq_xid,
80                     0, 0);
81         if (rc != PTL_OK) {
82                 CERROR("PtlPut(%Lu, %d, %Ld) failed: %d\n", remote_id.nid,
83                        portal, request->rq_xid, rc);
84                 PtlMDUnlink(md_h);
85         }
86
87         return rc;
88 }
89
90 static inline struct iovec *
91 ptlrpc_get_bulk_iov (struct ptlrpc_bulk_desc *desc)
92 {
93         struct iovec *iov;
94         
95         if (desc->bd_page_count <= sizeof (desc->bd_iov)/sizeof (struct iovec))
96                 return (desc->bd_iov);
97         
98         OBD_ALLOC (iov, desc->bd_page_count * sizeof (struct iovec));
99         if (iov == NULL)
100                 LBUG();
101         
102         return (iov);
103 }
104
105 static inline void
106 ptlrpc_put_bulk_iov (struct ptlrpc_bulk_desc *desc, struct iovec *iov)
107 {
108         if (desc->bd_page_count <= sizeof (desc->bd_iov)/sizeof (struct iovec))
109                 return;
110
111         OBD_FREE (iov, desc->bd_page_count * sizeof (struct iovec));
112 }
113
114 int ptlrpc_send_bulk(struct ptlrpc_bulk_desc *desc)
115 {
116         int rc;
117         struct list_head *tmp, *next;
118         ptl_process_id_t remote_id;
119         __u32 xid = 0;
120         struct iovec *iov;
121         ENTRY;
122
123         iov = ptlrpc_get_bulk_iov (desc);
124         if (iov == NULL)
125                 RETURN (-ENOMEM);
126
127         desc->bd_md.start = iov;
128         desc->bd_md.niov = 0;
129         desc->bd_md.length = 0;
130         desc->bd_md.eventq = bulk_source_eq;
131         desc->bd_md.threshold = 2; /* SENT and ACK */
132         desc->bd_md.options = PTL_MD_OP_PUT | PTL_MD_IOV;
133         desc->bd_md.user_ptr = desc;
134
135         list_for_each_safe(tmp, next, &desc->bd_page_list) {
136                 struct ptlrpc_bulk_page *bulk;
137                 bulk = list_entry(tmp, struct ptlrpc_bulk_page, bp_link);
138
139                 LASSERT (desc->bd_md.niov < desc->bd_page_count);
140
141                 if (desc->bd_md.niov == 0)
142                         xid = bulk->bp_xid;
143                 LASSERT (xid == bulk->bp_xid);   /* should all be the same */
144                 
145                 iov[desc->bd_md.niov].iov_base = bulk->bp_buf;
146                 iov[desc->bd_md.niov].iov_len = bulk->bp_buflen;
147                 desc->bd_md.niov++;
148                 desc->bd_md.length += bulk->bp_buflen;
149         }
150         
151         LASSERT (desc->bd_md.niov == desc->bd_page_count);
152         LASSERT (desc->bd_md.niov != 0);
153         
154         rc = PtlMDBind(desc->bd_connection->c_peer.peer_ni, desc->bd_md,
155                        &desc->bd_md_h);
156
157         ptlrpc_put_bulk_iov (desc, iov);        /* move down to reduce latency to send */
158
159         if (rc != PTL_OK) {
160                 CERROR("PtlMDBind failed: %d\n", rc);
161                 LBUG();
162                 RETURN(rc);
163         }
164         
165         remote_id.nid = desc->bd_connection->c_peer.peer_nid;
166         remote_id.pid = 0;
167         
168         CDEBUG(D_NET, "Sending %u pages %u bytes to portal %d nid %Lx pid %d xid %d\n",
169                desc->bd_md.niov, desc->bd_md.length, 
170                desc->bd_portal, remote_id.nid, remote_id.pid, xid);
171         
172         rc = PtlPut(desc->bd_md_h, PTL_ACK_REQ, remote_id,
173                     desc->bd_portal, 0, xid, 0, 0);
174         if (rc != PTL_OK) {
175                 CERROR("PtlPut(%Lu, %d, %d) failed: %d\n",
176                        remote_id.nid, desc->bd_portal, xid, rc);
177                 PtlMDUnlink(desc->bd_md_h);
178                 LBUG();
179                 RETURN(rc);
180         }
181
182         RETURN(0);
183 }
184
185 int ptlrpc_register_bulk(struct ptlrpc_bulk_desc *desc)
186 {
187         struct list_head *tmp, *next;
188         int rc;
189         __u32 xid = 0;
190         struct iovec *iov;
191         ENTRY;
192
193         iov = ptlrpc_get_bulk_iov (desc);
194         if (iov == NULL)
195                 return (-ENOMEM);
196
197         desc->bd_md.start = iov;
198         desc->bd_md.niov = 0;
199         desc->bd_md.length = 0;
200         desc->bd_md.threshold = 1;
201         desc->bd_md.options = PTL_MD_OP_PUT | PTL_MD_IOV;
202         desc->bd_md.user_ptr = desc;
203         desc->bd_md.eventq = bulk_sink_eq;
204
205         list_for_each_safe(tmp, next, &desc->bd_page_list) {
206                 struct ptlrpc_bulk_page *bulk;
207                 bulk = list_entry(tmp, struct ptlrpc_bulk_page, bp_link);
208
209                 LASSERT (desc->bd_md.niov < desc->bd_page_count);
210
211                 if (desc->bd_md.niov == 0)
212                         xid = bulk->bp_xid;
213                 LASSERT (xid == bulk->bp_xid);   /* should all be the same */
214                 
215                 iov[desc->bd_md.niov].iov_base = bulk->bp_buf;
216                 iov[desc->bd_md.niov].iov_len = bulk->bp_buflen;
217                 desc->bd_md.niov++;
218                 desc->bd_md.length += bulk->bp_buflen;
219         }
220
221         LASSERT (desc->bd_md.niov == desc->bd_page_count);
222         LASSERT (desc->bd_md.niov != 0);
223         
224         rc = PtlMEAttach(desc->bd_connection->c_peer.peer_ni,
225                          desc->bd_portal, local_id, xid, 0,
226                          PTL_UNLINK, PTL_INS_AFTER, &desc->bd_me_h);
227
228         ptlrpc_put_bulk_iov (desc, iov);
229
230         if (rc != PTL_OK) {
231                 CERROR("PtlMEAttach failed: %d\n", rc);
232                 LBUG();
233                 GOTO(cleanup, rc);
234         }
235         
236         rc = PtlMDAttach(desc->bd_me_h, desc->bd_md, PTL_UNLINK,
237                          &desc->bd_md_h);
238         if (rc != PTL_OK) {
239                 CERROR("PtlMDAttach failed: %d\n", rc);
240                 LBUG();
241                 GOTO(cleanup, rc);
242         }
243         
244         CDEBUG(D_NET, "Setup bulk sink buffers: %u pages %u bytes, xid %u, "
245                "portal %u\n", desc->bd_md.niov, desc->bd_md.length, 
246                xid, desc->bd_portal);
247
248         RETURN(0);
249
250  cleanup:
251         ptlrpc_abort_bulk(desc);
252
253         return rc;
254 }
255
256 int ptlrpc_abort_bulk(struct ptlrpc_bulk_desc *desc)
257 {
258         /* This should be safe: these handles are initialized to be
259          * invalid in ptlrpc_prep_bulk() */
260         PtlMDUnlink(desc->bd_md_h);
261         PtlMEUnlink(desc->bd_me_h);
262
263         return 0;
264 }
265
266 int ptlrpc_reply(struct ptlrpc_service *svc, struct ptlrpc_request *req)
267 {
268         if (req->rq_repmsg == NULL) {
269                 CERROR("bad: someone called ptlrpc_reply when they meant "
270                        "ptlrpc_error\n");
271                 return -EINVAL;
272         }
273
274         /* FIXME: we need to increment the count of handled events */
275         req->rq_type = PTL_RPC_TYPE_REPLY;
276         //req->rq_repmsg->conn = req->rq_connection->c_remote_conn;
277         //req->rq_repmsg->token = req->rq_connection->c_remote_token;
278         req->rq_repmsg->status = HTON__u32(req->rq_status);
279         req->rq_repmsg->type = HTON__u32(req->rq_type);
280         return ptl_send_buf(req, req->rq_connection, svc->srv_rep_portal);
281 }
282
283 int ptlrpc_error(struct ptlrpc_service *svc, struct ptlrpc_request *req)
284 {
285         int rc;
286         ENTRY;
287
288         if (req->rq_repmsg) {
289                 CERROR("req already has repmsg\n");
290                 LBUG();
291         }
292
293         rc = lustre_pack_msg(0, NULL, NULL, &req->rq_replen, &req->rq_repmsg);
294         if (rc)
295                 RETURN(rc);
296
297         req->rq_repmsg->type = HTON__u32(PTL_RPC_MSG_ERR);
298
299         rc = ptlrpc_reply(svc, req);
300         RETURN(rc);
301 }
302
303
304 int ptl_send_rpc(struct ptlrpc_request *request)
305 {
306         int rc;
307         char *repbuf;
308
309         ENTRY;
310
311         if (NTOH__u32(request->rq_reqmsg->type) != PTL_RPC_MSG_REQUEST) {
312                 CERROR("wrong packet type sent %d\n",
313                        NTOH__u32(request->rq_reqmsg->type));
314                 LBUG();
315                 RETURN(EINVAL);
316         }
317         if (request->rq_replen == 0) {
318                 CERROR("request->rq_replen is 0!\n");
319                 RETURN(EINVAL);
320         }
321
322         /* request->rq_repmsg is set only when the reply comes in, in
323          * client_packet_callback() */
324         if (request->rq_reply_md.start)
325                 OBD_FREE(request->rq_reply_md.start, request->rq_replen);
326
327         OBD_ALLOC(repbuf, request->rq_replen);
328         if (!repbuf) {
329                 LBUG();
330                 RETURN(ENOMEM);
331         }
332
333         // down(&request->rq_client->cli_rpc_sem);
334
335         rc = PtlMEAttach(request->rq_connection->c_peer.peer_ni,
336                          request->rq_import->imp_client->cli_reply_portal,
337                          local_id, request->rq_xid, 0, PTL_UNLINK,
338                          PTL_INS_AFTER, &request->rq_reply_me_h);
339         if (rc != PTL_OK) {
340                 CERROR("PtlMEAttach failed: %d\n", rc);
341                 LBUG();
342                 GOTO(cleanup, rc);
343         }
344
345         request->rq_type = PTL_RPC_TYPE_REQUEST;
346         request->rq_reply_md.start = repbuf;
347         request->rq_reply_md.length = request->rq_replen;
348         request->rq_reply_md.threshold = 1;
349         request->rq_reply_md.options = PTL_MD_OP_PUT;
350         request->rq_reply_md.user_ptr = request;
351         request->rq_reply_md.eventq = reply_in_eq;
352
353         rc = PtlMDAttach(request->rq_reply_me_h, request->rq_reply_md,
354                          PTL_UNLINK, &request->rq_reply_md_h);
355         if (rc != PTL_OK) {
356                 CERROR("PtlMDAttach failed: %d\n", rc);
357                 LBUG();
358                 GOTO(cleanup2, rc);
359         }
360
361         CDEBUG(D_NET, "Setup reply buffer: %u bytes, xid %Lu, portal %u\n",
362                request->rq_replen, request->rq_xid,
363                request->rq_import->imp_client->cli_reply_portal);
364
365         rc = ptl_send_buf(request, request->rq_connection,
366                           request->rq_import->imp_client->cli_request_portal);
367         RETURN(rc);
368
369  cleanup2:
370         PtlMEUnlink(request->rq_reply_me_h);
371  cleanup:
372         OBD_FREE(repbuf, request->rq_replen);
373         // up(&request->rq_client->cli_rpc_sem);
374
375         return rc;
376 }
377
378 void ptlrpc_link_svc_me(struct ptlrpc_service *service, int i)
379 {
380         int rc;
381         ptl_md_t dummy;
382         ptl_handle_md_t md_h;
383
384         /* Attach the leading ME on which we build the ring */
385         rc = PtlMEAttach(service->srv_self.peer_ni, service->srv_req_portal,
386                          local_id, 0, ~0, PTL_RETAIN, PTL_INS_BEFORE,
387                          &(service->srv_me_h[i]));
388         if (rc != PTL_OK) {
389                 CERROR("PtlMEAttach failed: %d\n", rc);
390                 LBUG();
391         }
392         
393         if (service->srv_ref_count[i])
394                 LBUG();
395
396         dummy.start         = service->srv_buf[i];
397         dummy.length        = service->srv_buf_size;
398         dummy.max_offset    = service->srv_buf_size;
399         dummy.threshold     = PTL_MD_THRESH_INF;
400         dummy.options       = PTL_MD_OP_PUT | PTL_MD_AUTO_UNLINK;
401         dummy.user_ptr      = service;
402         dummy.eventq        = service->srv_eq_h;
403         dummy.max_offset    = service->srv_buf_size;
404         
405         rc = PtlMDAttach(service->srv_me_h[i], dummy, PTL_UNLINK, &md_h);
406         if (rc != PTL_OK) {
407                 /* cleanup */
408                 CERROR("PtlMDAttach failed: %d\n", rc);
409                 LBUG();
410         }
411 }        
412
413 /* ptl_handled_rpc() should be called by the sleeping process once
414  * it finishes processing an event.  This ensures the ref count is
415  * decremented and that the rpc ring buffer cycles properly.
416  */ 
417 int ptl_handled_rpc(struct ptlrpc_service *service, void *start) 
418 {
419         int index;
420
421         spin_lock(&service->srv_lock);
422         for (index = 0; index < service->srv_ring_length; index++)
423                 if (service->srv_buf[index] == start) 
424                         break;
425
426         if (index == service->srv_ring_length)
427                 LBUG();
428
429         CDEBUG(D_INFO, "MD index=%d Ref Count=%d\n", index,
430                service->srv_ref_count[index]);
431         service->srv_ref_count[index]--;
432
433         if (service->srv_ref_count[index] < 0)
434                 LBUG();
435
436         if (service->srv_ref_count[index] == 0 &&
437             !ptl_is_valid_handle(&(service->srv_me_h[index]))) {
438                 CDEBUG(D_NET, "relinking %d\n", index); 
439                 ptlrpc_link_svc_me(service, index); 
440         }
441         
442         spin_unlock(&service->srv_lock);
443         return 0;
444 }