Whamcloud - gitweb
Fixed service request buffer race
[fs/lustre-release.git] / lustre / ptlrpc / niobuf.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (C) 2002 Cluster File Systems, Inc.
5  *
6  *   This file is part of Lustre, http://www.lustre.org.
7  *
8  *   Lustre is free software; you can redistribute it and/or
9  *   modify it under the terms of version 2 of the GNU General Public
10  *   License as published by the Free Software Foundation.
11  *
12  *   Lustre is distributed in the hope that it will be useful,
13  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *   GNU General Public License for more details.
16  *
17  *   You should have received a copy of the GNU General Public License
18  *   along with Lustre; if not, write to the Free Software
19  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  *
21  */
22
23 #define DEBUG_SUBSYSTEM S_RPC
24
25 #include <linux/obd_support.h>
26 #include <linux/lustre_net.h>
27 #include <linux/lustre_lib.h>
28
29 extern ptl_handle_eq_t request_out_eq, reply_in_eq, reply_out_eq,
30         bulk_source_eq, bulk_sink_eq;
31
32 static int ptl_send_buf(struct ptlrpc_request *request,
33                         struct ptlrpc_connection *conn, int portal)
34 {
35         int rc;
36         ptl_process_id_t remote_id;
37         ptl_handle_md_t md_h;
38
39         request->rq_req_md.user_ptr = request;
40
41         switch (request->rq_type) {
42         case PTL_RPC_MSG_REQUEST:
43                 request->rq_reqmsg->type = HTON__u32(request->rq_type);
44                 request->rq_req_md.start = request->rq_reqmsg;
45                 request->rq_req_md.length = request->rq_reqlen;
46                 request->rq_req_md.eventq = request_out_eq;
47                 break;
48         case PTL_RPC_MSG_REPLY:
49                 request->rq_repmsg->type = HTON__u32(request->rq_type);
50                 request->rq_req_md.start = request->rq_repmsg;
51                 request->rq_req_md.length = request->rq_replen;
52                 request->rq_req_md.eventq = reply_out_eq;
53                 break;
54         default:
55                 LBUG();
56                 return -1; /* notreached */
57         }
58         request->rq_req_md.threshold = 1;
59         request->rq_req_md.options = PTL_MD_OP_PUT;
60         request->rq_req_md.user_ptr = request;
61
62         rc = PtlMDBind(conn->c_peer.peer_ni, request->rq_req_md, &md_h);
63         if (rc != 0) {
64                 CERROR("PtlMDBind failed: %d\n", rc);
65                 LBUG();
66                 return rc;
67         }
68
69         remote_id.nid = conn->c_peer.peer_nid;
70         remote_id.pid = 0;
71
72         CDEBUG(D_NET, "Sending %d bytes to portal %d, xid %Ld\n",
73                request->rq_req_md.length, portal, request->rq_xid);
74
75         if (!portal)
76                 LBUG();
77         rc = PtlPut(md_h, PTL_NOACK_REQ, remote_id, portal, 0, request->rq_xid,
78                     0, 0);
79         if (rc != PTL_OK) {
80                 CERROR("PtlPut(%Lu, %d, %Ld) failed: %d\n", remote_id.nid,
81                        portal, request->rq_xid, rc);
82                 PtlMDUnlink(md_h);
83         }
84
85         return rc;
86 }
87
88 static inline struct iovec *
89 ptlrpc_get_bulk_iov (struct ptlrpc_bulk_desc *desc)
90 {
91         struct iovec *iov;
92
93         if (desc->bd_page_count <= sizeof (desc->bd_iov)/sizeof (struct iovec))
94                 return (desc->bd_iov);
95
96         OBD_ALLOC (iov, desc->bd_page_count * sizeof (struct iovec));
97         if (iov == NULL)
98                 LBUG();
99
100         return (iov);
101 }
102
103 static inline void
104 ptlrpc_put_bulk_iov (struct ptlrpc_bulk_desc *desc, struct iovec *iov)
105 {
106         if (desc->bd_page_count <= sizeof (desc->bd_iov)/sizeof (struct iovec))
107                 return;
108
109         OBD_FREE (iov, desc->bd_page_count * sizeof (struct iovec));
110 }
111
112 int ptlrpc_send_bulk(struct ptlrpc_bulk_desc *desc)
113 {
114         int rc;
115         struct list_head *tmp, *next;
116         ptl_process_id_t remote_id;
117         __u32 xid = 0;
118         struct iovec *iov;
119         ENTRY;
120
121         iov = ptlrpc_get_bulk_iov (desc);
122         if (iov == NULL)
123                 RETURN (-ENOMEM);
124
125         desc->bd_md.start = iov;
126         desc->bd_md.niov = 0;
127         desc->bd_md.length = 0;
128         desc->bd_md.eventq = bulk_source_eq;
129         desc->bd_md.threshold = 2; /* SENT and ACK */
130         desc->bd_md.options = PTL_MD_OP_PUT | PTL_MD_IOV;
131         desc->bd_md.user_ptr = desc;
132
133         list_for_each_safe(tmp, next, &desc->bd_page_list) {
134                 struct ptlrpc_bulk_page *bulk;
135                 bulk = list_entry(tmp, struct ptlrpc_bulk_page, bp_link);
136
137                 LASSERT (desc->bd_md.niov < desc->bd_page_count);
138
139                 if (desc->bd_md.niov == 0)
140                         xid = bulk->bp_xid;
141                 LASSERT (xid == bulk->bp_xid);   /* should all be the same */
142
143                 iov[desc->bd_md.niov].iov_base = bulk->bp_buf;
144                 iov[desc->bd_md.niov].iov_len = bulk->bp_buflen;
145                 desc->bd_md.niov++;
146                 desc->bd_md.length += bulk->bp_buflen;
147         }
148
149         LASSERT (desc->bd_md.niov == desc->bd_page_count);
150         LASSERT (desc->bd_md.niov != 0);
151
152         rc = PtlMDBind(desc->bd_connection->c_peer.peer_ni, desc->bd_md,
153                        &desc->bd_md_h);
154
155         ptlrpc_put_bulk_iov (desc, iov);        /* move down to reduce latency to send */
156
157         if (rc != PTL_OK) {
158                 CERROR("PtlMDBind failed: %d\n", rc);
159                 LBUG();
160                 RETURN(rc);
161         }
162
163         remote_id.nid = desc->bd_connection->c_peer.peer_nid;
164         remote_id.pid = 0;
165
166         CDEBUG(D_NET, "Sending %u pages %u bytes to portal %d nid %Lx pid %d xid %d\n",
167                desc->bd_md.niov, desc->bd_md.length,
168                desc->bd_portal, remote_id.nid, remote_id.pid, xid);
169
170         rc = PtlPut(desc->bd_md_h, PTL_ACK_REQ, remote_id,
171                     desc->bd_portal, 0, xid, 0, 0);
172         if (rc != PTL_OK) {
173                 CERROR("PtlPut(%Lu, %d, %d) failed: %d\n",
174                        remote_id.nid, desc->bd_portal, xid, rc);
175                 PtlMDUnlink(desc->bd_md_h);
176                 LBUG();
177                 RETURN(rc);
178         }
179
180         RETURN(0);
181 }
182
183 int ptlrpc_register_bulk(struct ptlrpc_bulk_desc *desc)
184 {
185         struct list_head *tmp, *next;
186         int rc;
187         __u32 xid = 0;
188         struct iovec *iov;
189         ptl_process_id_t source_id;
190         ENTRY;
191
192         iov = ptlrpc_get_bulk_iov (desc);
193         if (iov == NULL)
194                 return (-ENOMEM);
195
196         desc->bd_md.start = iov;
197         desc->bd_md.niov = 0;
198         desc->bd_md.length = 0;
199         desc->bd_md.threshold = 1;
200         desc->bd_md.options = PTL_MD_OP_PUT | PTL_MD_IOV;
201         desc->bd_md.user_ptr = desc;
202         desc->bd_md.eventq = bulk_sink_eq;
203
204         list_for_each_safe(tmp, next, &desc->bd_page_list) {
205                 struct ptlrpc_bulk_page *bulk;
206                 bulk = list_entry(tmp, struct ptlrpc_bulk_page, bp_link);
207
208                 LASSERT (desc->bd_md.niov < desc->bd_page_count);
209
210                 if (desc->bd_md.niov == 0)
211                         xid = bulk->bp_xid;
212                 LASSERT (xid == bulk->bp_xid);   /* should all be the same */
213
214                 iov[desc->bd_md.niov].iov_base = bulk->bp_buf;
215                 iov[desc->bd_md.niov].iov_len = bulk->bp_buflen;
216                 desc->bd_md.niov++;
217                 desc->bd_md.length += bulk->bp_buflen;
218         }
219
220         LASSERT (desc->bd_md.niov == desc->bd_page_count);
221         LASSERT (desc->bd_md.niov != 0);
222
223         source_id.nid = desc->bd_connection->c_peer.peer_nid;
224         source_id.pid = PTL_PID_ANY;
225
226         rc = PtlMEAttach(desc->bd_connection->c_peer.peer_ni,
227                          desc->bd_portal, source_id, xid, 0,
228                          PTL_UNLINK, PTL_INS_AFTER, &desc->bd_me_h);
229
230         ptlrpc_put_bulk_iov (desc, iov);
231
232         if (rc != PTL_OK) {
233                 CERROR("PtlMEAttach failed: %d\n", rc);
234                 LBUG();
235                 GOTO(cleanup, rc);
236         }
237
238         rc = PtlMDAttach(desc->bd_me_h, desc->bd_md, PTL_UNLINK,
239                          &desc->bd_md_h);
240         if (rc != PTL_OK) {
241                 CERROR("PtlMDAttach failed: %d\n", rc);
242                 LBUG();
243                 GOTO(cleanup, rc);
244         }
245
246         CDEBUG(D_NET, "Setup bulk sink buffers: %u pages %u bytes, xid %u, "
247                "portal %u\n", desc->bd_md.niov, desc->bd_md.length,
248                xid, desc->bd_portal);
249
250         RETURN(0);
251
252  cleanup:
253         ptlrpc_abort_bulk(desc);
254
255         return rc;
256 }
257
258 int ptlrpc_abort_bulk(struct ptlrpc_bulk_desc *desc)
259 {
260         /* This should be safe: these handles are initialized to be
261          * invalid in ptlrpc_prep_bulk() */
262         PtlMDUnlink(desc->bd_md_h);
263         PtlMEUnlink(desc->bd_me_h);
264
265         return 0;
266 }
267
268 int ptlrpc_reply(struct ptlrpc_service *svc, struct ptlrpc_request *req)
269 {
270         if (req->rq_repmsg == NULL) {
271                 CERROR("bad: someone called ptlrpc_reply when they meant "
272                        "ptlrpc_error\n");
273                 return -EINVAL;
274         }
275
276         /* FIXME: we need to increment the count of handled events */
277         if (req->rq_type != PTL_RPC_MSG_ERR)
278                 req->rq_type = PTL_RPC_MSG_REPLY;
279         //req->rq_repmsg->conn = req->rq_connection->c_remote_conn;
280         //req->rq_repmsg->token = req->rq_connection->c_remote_token;
281         req->rq_repmsg->status = HTON__u32(req->rq_status);
282         return ptl_send_buf(req, req->rq_connection, svc->srv_rep_portal);
283 }
284
285 int ptlrpc_error(struct ptlrpc_service *svc, struct ptlrpc_request *req)
286 {
287         int rc;
288         ENTRY;
289
290         if (req->rq_repmsg) {
291                 CERROR("req already has repmsg\n");
292                 LBUG();
293         }
294
295         rc = lustre_pack_msg(0, NULL, NULL, &req->rq_replen, &req->rq_repmsg);
296         if (rc)
297                 RETURN(rc);
298
299         req->rq_repmsg->type = HTON__u32(PTL_RPC_MSG_ERR);
300
301         rc = ptlrpc_reply(svc, req);
302         RETURN(rc);
303 }
304
305 int ptl_send_rpc(struct ptlrpc_request *request)
306 {
307         int rc;
308         char *repbuf;
309         ptl_process_id_t source_id;
310
311         ENTRY;
312
313         if (request->rq_type != PTL_RPC_MSG_REQUEST) {
314                 CERROR("wrong packet type sent %d\n",
315                        NTOH__u32(request->rq_reqmsg->type));
316                 LBUG();
317                 RETURN(EINVAL);
318         }
319         if (request->rq_replen == 0) {
320                 CERROR("request->rq_replen is 0!\n");
321                 RETURN(EINVAL);
322         }
323
324         /* request->rq_repmsg is set only when the reply comes in, in
325          * client_packet_callback() */
326         if (request->rq_reply_md.start)
327                 OBD_FREE(request->rq_reply_md.start, request->rq_replen);
328
329         OBD_ALLOC(repbuf, request->rq_replen);
330         if (!repbuf) {
331                 LBUG();
332                 RETURN(ENOMEM);
333         }
334
335         // down(&request->rq_client->cli_rpc_sem);
336
337         source_id.nid = request->rq_connection->c_peer.peer_nid;
338         source_id.pid = PTL_PID_ANY;
339
340         rc = PtlMEAttach(request->rq_connection->c_peer.peer_ni,
341                          request->rq_import->imp_client->cli_reply_portal,
342                          source_id, request->rq_xid, 0, PTL_UNLINK,
343                          PTL_INS_AFTER, &request->rq_reply_me_h);
344         if (rc != PTL_OK) {
345                 CERROR("PtlMEAttach failed: %d\n", rc);
346                 LBUG();
347                 GOTO(cleanup, rc);
348         }
349
350         request->rq_reply_md.start = repbuf;
351         request->rq_reply_md.length = request->rq_replen;
352         request->rq_reply_md.threshold = 1;
353         request->rq_reply_md.options = PTL_MD_OP_PUT;
354         request->rq_reply_md.user_ptr = request;
355         request->rq_reply_md.eventq = reply_in_eq;
356
357         rc = PtlMDAttach(request->rq_reply_me_h, request->rq_reply_md,
358                          PTL_UNLINK, &request->rq_reply_md_h);
359         if (rc != PTL_OK) {
360                 CERROR("PtlMDAttach failed: %d\n", rc);
361                 LBUG();
362                 GOTO(cleanup2, rc);
363         }
364
365         CDEBUG(D_NET, "Setup reply buffer: %u bytes, xid %Lu, portal %u\n",
366                request->rq_replen, request->rq_xid,
367                request->rq_import->imp_client->cli_reply_portal);
368
369         rc = ptl_send_buf(request, request->rq_connection,
370                           request->rq_import->imp_client->cli_request_portal);
371         RETURN(rc);
372
373  cleanup2:
374         PtlMEUnlink(request->rq_reply_me_h);
375  cleanup:
376         OBD_FREE(repbuf, request->rq_replen);
377         // up(&request->rq_client->cli_rpc_sem);
378
379         return rc;
380 }
381
382 void ptlrpc_link_svc_me(struct ptlrpc_request_buffer_desc *rqbd)
383 {
384         struct ptlrpc_service *service = rqbd->rqbd_service;
385         static ptl_process_id_t match_id = {PTL_NID_ANY, PTL_PID_ANY};
386         int rc;
387         ptl_md_t dummy;
388         ptl_handle_md_t md_h;
389
390         /* Attach the leading ME on which we build the ring */
391         rc = PtlMEAttach(service->srv_self.peer_ni, service->srv_req_portal,
392                          match_id, 0, ~0, 
393                          PTL_UNLINK, PTL_INS_AFTER, &rqbd->rqbd_me_h);
394         if (rc != PTL_OK) {
395                 CERROR("PtlMEAttach failed: %d\n", rc);
396                 LBUG();
397         }
398
399         dummy.start         = rqbd->rqbd_buffer;
400         dummy.length        = service->srv_buf_size;
401         dummy.max_offset    = service->srv_buf_size;
402         dummy.threshold     = 1;
403         dummy.options       = PTL_MD_OP_PUT;
404         dummy.user_ptr      = rqbd;
405         dummy.eventq        = service->srv_eq_h;
406
407         rc = PtlMDAttach(rqbd->rqbd_me_h, dummy, PTL_UNLINK, &md_h);
408         if (rc != PTL_OK) {
409                 /* cleanup */
410                 CERROR("PtlMDAttach failed: %d\n", rc);
411                 LBUG();
412         }
413 }