Whamcloud - gitweb
81ae6b28d360848123e702940ce17314f01e9928
[fs/lustre-release.git] / lustre / ptlrpc / niobuf.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (C) 2002 Cluster File Systems, Inc.
5  *
6  *   This file is part of Lustre, http://www.lustre.org.
7  *
8  *   Lustre is free software; you can redistribute it and/or
9  *   modify it under the terms of version 2 of the GNU General Public
10  *   License as published by the Free Software Foundation.
11  *
12  *   Lustre is distributed in the hope that it will be useful,
13  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *   GNU General Public License for more details.
16  *
17  *   You should have received a copy of the GNU General Public License
18  *   along with Lustre; if not, write to the Free Software
19  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  *
21  */
22
23 #define DEBUG_SUBSYSTEM S_RPC
24
25 #include <linux/obd_support.h>
26 #include <linux/lustre_net.h>
27 #include <linux/lustre_lib.h>
28 #include <linux/obd.h>
29
30 extern ptl_handle_eq_t request_out_eq, reply_in_eq, reply_out_eq,
31         bulk_source_eq, bulk_sink_eq;
32
33 static int ptl_send_buf(struct ptlrpc_request *request,
34                         struct ptlrpc_connection *conn, int portal)
35 {
36         int rc;
37         ptl_process_id_t remote_id;
38         ptl_handle_md_t md_h;
39
40         LASSERT(conn);
41
42         request->rq_req_md.user_ptr = request;
43
44         switch (request->rq_type) {
45         case PTL_RPC_MSG_REQUEST:
46                 request->rq_reqmsg->type = HTON__u32(request->rq_type);
47                 request->rq_req_md.start = request->rq_reqmsg;
48                 request->rq_req_md.length = request->rq_reqlen;
49                 request->rq_req_md.eventq = request_out_eq;
50                 break;
51         case PTL_RPC_MSG_ERR:
52         case PTL_RPC_MSG_REPLY:
53                 request->rq_repmsg->type = HTON__u32(request->rq_type);
54                 request->rq_req_md.start = request->rq_repmsg;
55                 request->rq_req_md.length = request->rq_replen;
56                 request->rq_req_md.eventq = reply_out_eq;
57                 break;
58         default:
59                 LBUG();
60                 return -1; /* notreached */
61         }
62         request->rq_req_md.threshold = 1;
63         request->rq_req_md.options = PTL_MD_OP_PUT;
64         request->rq_req_md.user_ptr = request;
65
66         rc = PtlMDBind(conn->c_peer.peer_ni, request->rq_req_md, &md_h);
67         if (rc != 0) {
68                 CERROR("PtlMDBind failed: %d\n", rc);
69                 LBUG();
70                 return rc;
71         }
72
73         remote_id.nid = conn->c_peer.peer_nid;
74         remote_id.pid = 0;
75
76         CDEBUG(D_NET, "Sending %d bytes to portal %d, xid "LPD64"\n",
77                request->rq_req_md.length, portal, request->rq_xid);
78
79         if (!portal)
80                 LBUG();
81         rc = PtlPut(md_h, PTL_NOACK_REQ, remote_id, portal, 0, request->rq_xid,
82                     0, 0);
83         if (rc != PTL_OK) {
84                 CERROR("PtlPut("LPU64", %d, "LPD64") failed: %d\n",
85                        remote_id.nid, portal, request->rq_xid, rc);
86                 PtlMDUnlink(md_h);
87         }
88
89         return rc;
90 }
91
92 static inline struct iovec *
93 ptlrpc_get_bulk_iov (struct ptlrpc_bulk_desc *desc)
94 {
95         struct iovec *iov;
96
97         if (desc->bd_page_count <= sizeof (desc->bd_iov)/sizeof (struct iovec))
98                 return (desc->bd_iov);
99
100         OBD_ALLOC (iov, desc->bd_page_count * sizeof (struct iovec));
101         if (iov == NULL)
102                 LBUG();
103
104         return (iov);
105 }
106
107 static inline void
108 ptlrpc_put_bulk_iov (struct ptlrpc_bulk_desc *desc, struct iovec *iov)
109 {
110         if (desc->bd_page_count <= sizeof (desc->bd_iov)/sizeof (struct iovec))
111                 return;
112
113         OBD_FREE (iov, desc->bd_page_count * sizeof (struct iovec));
114 }
115
116 int ptlrpc_send_bulk(struct ptlrpc_bulk_desc *desc)
117 {
118         int rc;
119         struct list_head *tmp, *next;
120         ptl_process_id_t remote_id;
121         __u32 xid = 0;
122         struct iovec *iov;
123         ENTRY;
124
125         iov = ptlrpc_get_bulk_iov (desc);
126         if (iov == NULL)
127                 RETURN (-ENOMEM);
128
129         desc->bd_md.start = iov;
130         desc->bd_md.niov = 0;
131         desc->bd_md.length = 0;
132         desc->bd_md.eventq = bulk_source_eq;
133         desc->bd_md.threshold = 2; /* SENT and ACK */
134         desc->bd_md.options = PTL_MD_OP_PUT | PTL_MD_IOV;
135         desc->bd_md.user_ptr = desc;
136
137         atomic_set (&desc->bd_source_callback_count, 2);
138
139         list_for_each_safe(tmp, next, &desc->bd_page_list) {
140                 struct ptlrpc_bulk_page *bulk;
141                 bulk = list_entry(tmp, struct ptlrpc_bulk_page, bp_link);
142
143                 LASSERT (desc->bd_md.niov < desc->bd_page_count);
144
145                 if (desc->bd_md.niov == 0)
146                         xid = bulk->bp_xid;
147                 LASSERT (xid == bulk->bp_xid);   /* should all be the same */
148
149                 iov[desc->bd_md.niov].iov_base = bulk->bp_buf;
150                 iov[desc->bd_md.niov].iov_len = bulk->bp_buflen;
151                 desc->bd_md.niov++;
152                 desc->bd_md.length += bulk->bp_buflen;
153         }
154
155         LASSERT (desc->bd_md.niov == desc->bd_page_count);
156         LASSERT (desc->bd_md.niov != 0);
157
158         rc = PtlMDBind(desc->bd_connection->c_peer.peer_ni, desc->bd_md,
159                        &desc->bd_md_h);
160
161         ptlrpc_put_bulk_iov (desc, iov); /*move down to reduce latency to send*/
162
163         if (rc != PTL_OK) {
164                 CERROR("PtlMDBind failed: %d\n", rc);
165                 LBUG();
166                 RETURN(rc);
167         }
168
169         remote_id.nid = desc->bd_connection->c_peer.peer_nid;
170         remote_id.pid = 0;
171
172         CDEBUG(D_NET, "Sending %u pages %u bytes to portal %d nid "LPX64" pid "
173                "%d xid %d\n", desc->bd_md.niov, desc->bd_md.length,
174                desc->bd_portal, remote_id.nid, remote_id.pid, xid);
175
176         rc = PtlPut(desc->bd_md_h, PTL_ACK_REQ, remote_id,
177                     desc->bd_portal, 0, xid, 0, 0);
178         if (rc != PTL_OK) {
179                 CERROR("PtlPut("LPU64", %d, %d) failed: %d\n",
180                        remote_id.nid, desc->bd_portal, xid, rc);
181                 PtlMDUnlink(desc->bd_md_h);
182                 LBUG();
183                 RETURN(rc);
184         }
185
186         RETURN(0);
187 }
188
189 int ptlrpc_register_bulk(struct ptlrpc_bulk_desc *desc)
190 {
191         struct list_head *tmp, *next;
192         int rc;
193         __u32 xid = 0;
194         struct iovec *iov;
195         ptl_process_id_t source_id;
196         ENTRY;
197
198         if (desc->bd_page_count > PTL_MD_MAX_IOV) {
199                 CERROR("iov longer than %d pages not supported (count=%d)\n",
200                        PTL_MD_MAX_IOV, desc->bd_page_count);
201                 RETURN(-EINVAL);
202         }
203
204         iov = ptlrpc_get_bulk_iov (desc);
205         if (iov == NULL)
206                 return (-ENOMEM);
207
208         desc->bd_md.start = iov;
209         desc->bd_md.niov = 0;
210         desc->bd_md.length = 0;
211         desc->bd_md.threshold = 1;
212         desc->bd_md.options = PTL_MD_OP_PUT | PTL_MD_IOV;
213         desc->bd_md.user_ptr = desc;
214         desc->bd_md.eventq = bulk_sink_eq;
215
216         list_for_each_safe(tmp, next, &desc->bd_page_list) {
217                 struct ptlrpc_bulk_page *bulk;
218                 bulk = list_entry(tmp, struct ptlrpc_bulk_page, bp_link);
219
220                 LASSERT (desc->bd_md.niov < desc->bd_page_count);
221
222                 if (desc->bd_md.niov == 0)
223                         xid = bulk->bp_xid;
224                 LASSERT (xid == bulk->bp_xid);   /* should all be the same */
225
226                 iov[desc->bd_md.niov].iov_base = bulk->bp_buf;
227                 iov[desc->bd_md.niov].iov_len = bulk->bp_buflen;
228                 desc->bd_md.niov++;
229                 desc->bd_md.length += bulk->bp_buflen;
230         }
231
232         LASSERT (desc->bd_md.niov == desc->bd_page_count);
233         LASSERT (desc->bd_md.niov != 0);
234
235         source_id.nid = desc->bd_connection->c_peer.peer_nid;
236         source_id.pid = PTL_PID_ANY;
237
238         rc = PtlMEAttach(desc->bd_connection->c_peer.peer_ni,
239                          desc->bd_portal, source_id, xid, 0,
240                          PTL_UNLINK, PTL_INS_AFTER, &desc->bd_me_h);
241
242         if (rc != PTL_OK) {
243                 CERROR("PtlMEAttach failed: %d\n", rc);
244                 LBUG();
245                 GOTO(cleanup, rc);
246         }
247
248         rc = PtlMDAttach(desc->bd_me_h, desc->bd_md, PTL_UNLINK,
249                          &desc->bd_md_h);
250         if (rc != PTL_OK) {
251                 CERROR("PtlMDAttach failed: %d\n", rc);
252                 LBUG();
253                 GOTO(cleanup, rc);
254         }
255
256         ptlrpc_put_bulk_iov (desc, iov);
257
258         CDEBUG(D_NET, "Setup bulk sink buffers: %u pages %u bytes, xid %u, "
259                "portal %u\n", desc->bd_md.niov, desc->bd_md.length,
260                xid, desc->bd_portal);
261
262         RETURN(0);
263
264  cleanup:
265         ptlrpc_put_bulk_iov (desc, iov);
266         ptlrpc_abort_bulk(desc);
267
268         return rc;
269 }
270
271 int ptlrpc_abort_bulk(struct ptlrpc_bulk_desc *desc)
272 {
273         /* This should be safe: these handles are initialized to be
274          * invalid in ptlrpc_prep_bulk() */
275         PtlMDUnlink(desc->bd_md_h);
276         PtlMEUnlink(desc->bd_me_h);
277
278         return 0;
279 }
280
281 void obd_brw_set_add(struct obd_brw_set *set, struct ptlrpc_bulk_desc *desc)
282 {
283         atomic_inc(&desc->bd_refcount);
284         atomic_inc(&set->brw_refcount);
285         desc->bd_brw_set = set;
286         list_add(&desc->bd_set_chain, &set->brw_desc_head);
287 }
288
289 struct obd_brw_set *obd_brw_set_new(void)
290 {
291         struct obd_brw_set *set;
292
293         OBD_ALLOC(set, sizeof(*set));
294
295         if (set != NULL) {
296                 init_waitqueue_head(&set->brw_waitq);
297                 INIT_LIST_HEAD(&set->brw_desc_head);
298                 atomic_set(&set->brw_refcount, 0);
299         }
300
301         return set;
302 }
303
304 void obd_brw_set_free(struct obd_brw_set *set)
305 {
306         struct list_head *tmp, *next;
307         ENTRY;
308
309         if (!list_empty(&set->brw_desc_head)) {
310                 EXIT;
311                 return;
312         }
313
314         list_for_each_safe(tmp, next, &set->brw_desc_head) {
315                 struct ptlrpc_bulk_desc *desc =
316                         list_entry(tmp, struct ptlrpc_bulk_desc, bd_set_chain);
317
318                 CERROR("Unfinished bulk descriptor: %p\n", desc);
319
320                 ptlrpc_abort_bulk(desc);
321         }
322         OBD_FREE(set, sizeof(*set));
323         EXIT;
324         return;
325 }
326
327 int ptlrpc_reply(struct ptlrpc_service *svc, struct ptlrpc_request *req)
328 {
329         if (req->rq_repmsg == NULL) {
330                 CERROR("bad: someone called ptlrpc_reply when they meant "
331                        "ptlrpc_error\n");
332                 return -EINVAL;
333         }
334
335         /* FIXME: we need to increment the count of handled events */
336         if (req->rq_type != PTL_RPC_MSG_ERR)
337                 req->rq_type = PTL_RPC_MSG_REPLY;
338         //req->rq_repmsg->conn = req->rq_connection->c_remote_conn;
339         //req->rq_repmsg->token = req->rq_connection->c_remote_token;
340         req->rq_repmsg->status = HTON__u32(req->rq_status);
341         return ptl_send_buf(req, req->rq_connection, svc->srv_rep_portal);
342 }
343
344 int ptlrpc_error(struct ptlrpc_service *svc, struct ptlrpc_request *req)
345 {
346         int rc;
347         ENTRY;
348
349         if (req->rq_repmsg) {
350                 CERROR("req already has repmsg\n");
351                 LBUG();
352         }
353
354         rc = lustre_pack_msg(0, NULL, NULL, &req->rq_replen, &req->rq_repmsg);
355         if (rc)
356                 RETURN(rc);
357
358         req->rq_type = PTL_RPC_MSG_ERR;
359
360         rc = ptlrpc_reply(svc, req);
361         RETURN(rc);
362 }
363
364 int ptl_send_rpc(struct ptlrpc_request *request)
365 {
366         int rc;
367         char *repbuf;
368         ptl_process_id_t source_id;
369
370         ENTRY;
371
372         if (request->rq_type != PTL_RPC_MSG_REQUEST) {
373                 CERROR("wrong packet type sent %d\n",
374                        NTOH__u32(request->rq_reqmsg->type));
375                 LBUG();
376                 RETURN(EINVAL);
377         }
378
379         source_id.nid = request->rq_connection->c_peer.peer_nid;
380         source_id.pid = PTL_PID_ANY;
381
382         /* add a ref, which will be balanced in request_out_callback */
383         atomic_inc(&request->rq_refcount);
384         if (request->rq_replen != 0) {
385                 /* request->rq_repmsg is set only when the reply comes in, in
386                  * client_packet_callback() */
387                 if (request->rq_reply_md.start) {
388                         PtlMEUnlink(request->rq_reply_me_h);
389                         OBD_FREE(request->rq_reply_md.start,
390                                  request->rq_replen);
391                         /* If we're resending, rq_repmsg needs to be NULLed out
392                          * again so that ptlrpc_check_reply doesn't trip early.
393                          */
394                         request->rq_repmsg = NULL;
395                 }
396                 OBD_ALLOC(repbuf, request->rq_replen);
397                 if (!repbuf) {
398                         LBUG();
399                         RETURN(ENOMEM);
400                 }
401
402                 rc = PtlMEAttach(request->rq_connection->c_peer.peer_ni,
403                              request->rq_reply_portal,/* XXX FIXME bug 625069 */
404                                  source_id, request->rq_xid, 0, PTL_UNLINK,
405                                  PTL_INS_AFTER, &request->rq_reply_me_h);
406                 if (rc != PTL_OK) {
407                         CERROR("PtlMEAttach failed: %d\n", rc);
408                         LBUG();
409                         GOTO(cleanup, rc);
410                 }
411
412                 request->rq_reply_md.start = repbuf;
413                 request->rq_reply_md.length = request->rq_replen;
414                 request->rq_reply_md.threshold = 1;
415                 request->rq_reply_md.options = PTL_MD_OP_PUT;
416                 request->rq_reply_md.user_ptr = request;
417                 request->rq_reply_md.eventq = reply_in_eq;
418
419                 rc = PtlMDAttach(request->rq_reply_me_h, request->rq_reply_md,
420                                  PTL_UNLINK, NULL);
421                 if (rc != PTL_OK) {
422                         CERROR("PtlMDAttach failed: %d\n", rc);
423                         LBUG();
424                         GOTO(cleanup2, rc);
425                 }
426
427                 CDEBUG(D_NET, "Setup reply buffer: %u bytes, xid "LPU64
428                        ", portal %u\n",
429                        request->rq_replen, request->rq_xid,
430                        request->rq_reply_portal);
431         }
432
433         /* Clear any flags that may be present from previous sends,
434          * except for REPLAY. */
435         request->rq_flags &= PTL_RPC_FL_REPLAY;
436         rc = ptl_send_buf(request, request->rq_connection,
437                           request->rq_request_portal);
438         RETURN(rc);
439
440  cleanup2:
441         PtlMEUnlink(request->rq_reply_me_h);
442  cleanup:
443         OBD_FREE(repbuf, request->rq_replen);
444         // up(&request->rq_client->cli_rpc_sem);
445
446         return rc;
447 }
448
449 void ptlrpc_link_svc_me(struct ptlrpc_request_buffer_desc *rqbd)
450 {
451         struct ptlrpc_service *service = rqbd->rqbd_service;
452         static ptl_process_id_t match_id = {PTL_NID_ANY, PTL_PID_ANY};
453         int rc;
454         ptl_md_t dummy;
455         ptl_handle_md_t md_h;
456
457         LASSERT (atomic_read (&rqbd->rqbd_refcount) == 0);
458
459         /* Attach the leading ME on which we build the ring */
460         rc = PtlMEAttach(service->srv_self.peer_ni, service->srv_req_portal,
461                          match_id, 0, ~0,
462                          PTL_UNLINK, PTL_INS_AFTER, &rqbd->rqbd_me_h);
463         if (rc != PTL_OK) {
464                 CERROR("PtlMEAttach failed: %d\n", rc);
465                 LBUG();
466         }
467
468         dummy.start      = rqbd->rqbd_buffer;
469         dummy.length     = service->srv_buf_size;
470         dummy.max_size   = service->srv_max_req_size;
471         dummy.threshold  = PTL_MD_THRESH_INF;
472         dummy.options    = PTL_MD_OP_PUT | PTL_MD_MAX_SIZE | PTL_MD_AUTO_UNLINK;
473         dummy.user_ptr   = rqbd;
474         dummy.eventq     = service->srv_eq_h;
475
476         atomic_inc (&service->srv_nrqbds_receiving);
477         atomic_set (&rqbd->rqbd_refcount, 1);   /* 1 ref for portals */
478
479         rc = PtlMDAttach(rqbd->rqbd_me_h, dummy, PTL_UNLINK, &md_h);
480         if (rc != PTL_OK) {
481                 CERROR("PtlMDAttach failed: %d\n", rc);
482                 LBUG();
483 #warning proper cleanup required
484                 PtlMEUnlink (rqbd->rqbd_me_h);
485                 atomic_set (&rqbd->rqbd_refcount, 0);
486                 atomic_dec (&service->srv_nrqbds_receiving);
487         }
488 }