Whamcloud - gitweb
Merge b_md into HEAD
[fs/lustre-release.git] / lustre / ptlrpc / niobuf.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (c) 2002, 2003 Cluster File Systems, Inc.
5  *
6  *   This file is part of Lustre, http://www.lustre.org.
7  *
8  *   Lustre is free software; you can redistribute it and/or
9  *   modify it under the terms of version 2 of the GNU General Public
10  *   License as published by the Free Software Foundation.
11  *
12  *   Lustre is distributed in the hope that it will be useful,
13  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *   GNU General Public License for more details.
16  *
17  *   You should have received a copy of the GNU General Public License
18  *   along with Lustre; if not, write to the Free Software
19  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  *
21  */
22
23 #define DEBUG_SUBSYSTEM S_RPC
24
25 #include <linux/obd_support.h>
26 #include <linux/lustre_net.h>
27 #include <linux/lustre_lib.h>
28 #include <linux/obd.h>
29
30 extern ptl_handle_eq_t request_out_eq, reply_in_eq, reply_out_eq,
31         bulk_put_source_eq, bulk_put_sink_eq, 
32         bulk_get_source_eq, bulk_get_sink_eq;
33
34 static int ptl_send_buf(struct ptlrpc_request *request,
35                         struct ptlrpc_connection *conn, int portal)
36 {
37         int rc;
38         ptl_process_id_t remote_id;
39         ptl_handle_md_t md_h;
40
41         LASSERT(conn);
42
43         request->rq_req_md.user_ptr = request;
44
45         switch (request->rq_type) {
46         case PTL_RPC_MSG_REQUEST:
47                 request->rq_reqmsg->type = HTON__u32(request->rq_type);
48                 request->rq_req_md.start = request->rq_reqmsg;
49                 request->rq_req_md.length = request->rq_reqlen;
50                 request->rq_req_md.eventq = request_out_eq;
51                 break;
52         case PTL_RPC_MSG_ERR:
53         case PTL_RPC_MSG_REPLY:
54                 request->rq_repmsg->type = HTON__u32(request->rq_type);
55                 request->rq_req_md.start = request->rq_repmsg;
56                 request->rq_req_md.length = request->rq_replen;
57                 request->rq_req_md.eventq = reply_out_eq;
58                 break;
59         default:
60                 LBUG();
61                 return -1; /* notreached */
62         }
63         request->rq_req_md.threshold = 1;
64         request->rq_req_md.options = PTL_MD_OP_PUT;
65         request->rq_req_md.user_ptr = request;
66
67         rc = PtlMDBind(conn->c_peer.peer_ni, request->rq_req_md, &md_h);
68         if (rc != 0) {
69                 CERROR("PtlMDBind failed: %d\n", rc);
70                 LBUG();
71                 return rc;
72         }
73
74         remote_id.nid = conn->c_peer.peer_nid;
75         remote_id.pid = 0;
76
77         CDEBUG(D_NET, "Sending %d bytes to portal %d, xid "LPD64"\n",
78                request->rq_req_md.length, portal, request->rq_xid);
79
80         if (!portal)
81                 LBUG();
82         rc = PtlPut(md_h, PTL_NOACK_REQ, remote_id, portal, 0, request->rq_xid,
83                     0, 0);
84         if (rc != PTL_OK) {
85                 CERROR("PtlPut("LPU64", %d, "LPD64") failed: %d\n",
86                        remote_id.nid, portal, request->rq_xid, rc);
87                 PtlMDUnlink(md_h);
88         }
89
90         return rc;
91 }
92
93 static inline struct iovec *
94 ptlrpc_get_bulk_iov (struct ptlrpc_bulk_desc *desc)
95 {
96         struct iovec *iov;
97
98         if (desc->bd_page_count <= sizeof (desc->bd_iov)/sizeof (struct iovec))
99                 return (desc->bd_iov);
100
101         OBD_ALLOC (iov, desc->bd_page_count * sizeof (struct iovec));
102         if (iov == NULL)
103                 LBUG();
104
105         return (iov);
106 }
107
108 static inline void
109 ptlrpc_put_bulk_iov (struct ptlrpc_bulk_desc *desc, struct iovec *iov)
110 {
111         if (desc->bd_page_count <= sizeof (desc->bd_iov)/sizeof (struct iovec))
112                 return;
113
114         OBD_FREE (iov, desc->bd_page_count * sizeof (struct iovec));
115 }
116
117 int ptlrpc_bulk_put(struct ptlrpc_bulk_desc *desc)
118 {
119         int rc;
120         struct list_head *tmp, *next;
121         ptl_process_id_t remote_id;
122         __u32 xid = 0;
123         struct iovec *iov;
124         ENTRY;
125
126         iov = ptlrpc_get_bulk_iov (desc);
127         if (iov == NULL)
128                 RETURN (-ENOMEM);
129
130         desc->bd_md.start = iov;
131         desc->bd_md.niov = 0;
132         desc->bd_md.length = 0;
133         desc->bd_md.eventq = bulk_put_source_eq;
134         desc->bd_md.threshold = 2; /* SENT and ACK */
135         desc->bd_md.options = PTL_MD_OP_PUT | PTL_MD_IOV;
136         desc->bd_md.user_ptr = desc;
137
138         atomic_set(&desc->bd_source_callback_count, 2);
139
140         list_for_each_safe(tmp, next, &desc->bd_page_list) {
141                 struct ptlrpc_bulk_page *bulk;
142                 bulk = list_entry(tmp, struct ptlrpc_bulk_page, bp_link);
143
144                 LASSERT(desc->bd_md.niov < desc->bd_page_count);
145
146                 if (desc->bd_md.niov == 0)
147                         xid = bulk->bp_xid;
148                 LASSERT(xid == bulk->bp_xid);   /* should all be the same */
149
150                 iov[desc->bd_md.niov].iov_base = bulk->bp_buf;
151                 iov[desc->bd_md.niov].iov_len = bulk->bp_buflen;
152                 if (iov[desc->bd_md.niov].iov_len <= 0) {
153                         CERROR("bad bp_buflen[%d] @ %p: %d\n", desc->bd_md.niov,
154                                bulk->bp_buf, bulk->bp_buflen);
155                         CERROR("desc: xid %u, pages %d, ptl %d, ref %d\n",
156                                xid, desc->bd_page_count, desc->bd_portal,
157                                atomic_read(&desc->bd_refcount));
158                         LBUG();
159                 }
160                 desc->bd_md.niov++;
161                 desc->bd_md.length += bulk->bp_buflen;
162         }
163
164         LASSERT(desc->bd_md.niov == desc->bd_page_count);
165         LASSERT(desc->bd_md.niov != 0);
166
167         rc = PtlMDBind(desc->bd_connection->c_peer.peer_ni, desc->bd_md,
168                        &desc->bd_md_h);
169
170         ptlrpc_put_bulk_iov (desc, iov); /*move down to reduce latency to send*/
171
172         if (rc != PTL_OK) {
173                 CERROR("PtlMDBind failed: %d\n", rc);
174                 LBUG();
175                 RETURN(rc);
176         }
177
178         remote_id.nid = desc->bd_connection->c_peer.peer_nid;
179         remote_id.pid = 0;
180
181         CDEBUG(D_NET, "Sending %u pages %u bytes to portal %d nid "LPX64" pid "
182                "%d xid %d\n", desc->bd_md.niov, desc->bd_md.length,
183                desc->bd_portal, remote_id.nid, remote_id.pid, xid);
184
185         rc = PtlPut(desc->bd_md_h, PTL_ACK_REQ, remote_id,
186                     desc->bd_portal, 0, xid, 0, 0);
187         if (rc != PTL_OK) {
188                 CERROR("PtlPut("LPU64", %d, %d) failed: %d\n",
189                        remote_id.nid, desc->bd_portal, xid, rc);
190                 PtlMDUnlink(desc->bd_md_h);
191                 LBUG();
192                 RETURN(rc);
193         }
194
195         RETURN(0);
196 }
197
198 int ptlrpc_bulk_get(struct ptlrpc_bulk_desc *desc)
199 {
200         int rc;
201         struct list_head *tmp, *next;
202         ptl_process_id_t remote_id;
203         __u32 xid = 0;
204         struct iovec *iov;
205         ENTRY;
206
207         iov = ptlrpc_get_bulk_iov (desc);
208         if (iov == NULL)
209                 RETURN (-ENOMEM);
210
211         desc->bd_md.start = iov;
212         desc->bd_md.niov = 0;
213         desc->bd_md.length = 0;
214         desc->bd_md.eventq = bulk_get_sink_eq;
215         desc->bd_md.threshold = 2; /* SENT and REPLY */
216         desc->bd_md.options = PTL_MD_OP_GET | PTL_MD_IOV;
217         desc->bd_md.user_ptr = desc;
218
219         atomic_set(&desc->bd_source_callback_count, 2);
220
221         list_for_each_safe(tmp, next, &desc->bd_page_list) {
222                 struct ptlrpc_bulk_page *bulk;
223                 bulk = list_entry(tmp, struct ptlrpc_bulk_page, bp_link);
224
225                 LASSERT(desc->bd_md.niov < desc->bd_page_count);
226
227                 if (desc->bd_md.niov == 0)
228                         xid = bulk->bp_xid;
229                 LASSERT(xid == bulk->bp_xid);   /* should all be the same */
230
231                 iov[desc->bd_md.niov].iov_base = bulk->bp_buf;
232                 iov[desc->bd_md.niov].iov_len = bulk->bp_buflen;
233                 if (iov[desc->bd_md.niov].iov_len <= 0) {
234                         CERROR("bad bp_buflen[%d] @ %p: %d\n", desc->bd_md.niov,
235                                bulk->bp_buf, bulk->bp_buflen);
236                         CERROR("desc: xid %u, pages %d, ptl %d, ref %d\n",
237                                xid, desc->bd_page_count, desc->bd_portal,
238                                atomic_read(&desc->bd_refcount));
239                         LBUG();
240                 }
241                 desc->bd_md.niov++;
242                 desc->bd_md.length += bulk->bp_buflen;
243         }
244
245         LASSERT(desc->bd_md.niov == desc->bd_page_count);
246         LASSERT(desc->bd_md.niov != 0);
247
248         rc = PtlMDBind(desc->bd_connection->c_peer.peer_ni, desc->bd_md,
249                        &desc->bd_md_h);
250
251         ptlrpc_put_bulk_iov (desc, iov); /*move down to reduce latency to send*/
252
253         if (rc != PTL_OK) {
254                 CERROR("PtlMDBind failed: %d\n", rc);
255                 LBUG();
256                 RETURN(rc);
257         }
258
259         remote_id.nid = desc->bd_connection->c_peer.peer_nid;
260         remote_id.pid = 0;
261
262         CDEBUG(D_NET, "Sending %u pages %u bytes to portal %d nid "LPX64" pid "
263                "%d xid %d\n", desc->bd_md.niov, desc->bd_md.length,
264                desc->bd_portal, remote_id.nid, remote_id.pid, xid);
265
266         rc = PtlGet(desc->bd_md_h, remote_id, desc->bd_portal, 0, xid, 0);
267         if (rc != PTL_OK) {
268                 CERROR("PtlGet("LPU64", %d, %d) failed: %d\n",
269                        remote_id.nid, desc->bd_portal, xid, rc);
270                 PtlMDUnlink(desc->bd_md_h);
271                 LBUG();
272                 RETURN(rc);
273         }
274
275         RETURN(0);
276 }
277
278 static int ptlrpc_register_bulk_shared(struct ptlrpc_bulk_desc *desc)
279 {
280         struct list_head *tmp, *next;
281         int rc;
282         __u32 xid = 0;
283         struct iovec *iov;
284         ptl_process_id_t source_id;
285         ENTRY;
286
287         if (desc->bd_page_count > PTL_MD_MAX_IOV) {
288                 CERROR("iov longer than %d pages not supported (count=%d)\n",
289                        PTL_MD_MAX_IOV, desc->bd_page_count);
290                 RETURN(-EINVAL);
291         }
292
293         iov = ptlrpc_get_bulk_iov (desc);
294         if (iov == NULL)
295                 return (-ENOMEM);
296
297         desc->bd_md.start = iov;
298         desc->bd_md.niov = 0;
299         desc->bd_md.length = 0;
300         desc->bd_md.threshold = 1;
301         desc->bd_md.user_ptr = desc;
302
303         list_for_each_safe(tmp, next, &desc->bd_page_list) {
304                 struct ptlrpc_bulk_page *bulk;
305                 bulk = list_entry(tmp, struct ptlrpc_bulk_page, bp_link);
306
307                 LASSERT(desc->bd_md.niov < desc->bd_page_count);
308
309                 if (desc->bd_md.niov == 0)
310                         xid = bulk->bp_xid;
311                 LASSERT(xid == bulk->bp_xid);   /* should all be the same */
312
313                 iov[desc->bd_md.niov].iov_base = bulk->bp_buf;
314                 iov[desc->bd_md.niov].iov_len = bulk->bp_buflen;
315                 desc->bd_md.niov++;
316                 desc->bd_md.length += bulk->bp_buflen;
317         }
318
319         LASSERT(desc->bd_md.niov == desc->bd_page_count);
320         LASSERT(desc->bd_md.niov != 0);
321
322         source_id.nid = desc->bd_connection->c_peer.peer_nid;
323         source_id.pid = PTL_PID_ANY;
324
325         rc = PtlMEAttach(desc->bd_connection->c_peer.peer_ni,
326                          desc->bd_portal, source_id, xid, 0,
327                          PTL_UNLINK, PTL_INS_AFTER, &desc->bd_me_h);
328
329         if (rc != PTL_OK) {
330                 CERROR("PtlMEAttach failed: %d\n", rc);
331                 LBUG();
332                 GOTO(cleanup, rc);
333         }
334
335         rc = PtlMDAttach(desc->bd_me_h, desc->bd_md, PTL_UNLINK,
336                          &desc->bd_md_h);
337         if (rc != PTL_OK) {
338                 CERROR("PtlMDAttach failed: %d\n", rc);
339                 LBUG();
340                 GOTO(cleanup, rc);
341         }
342
343         ptlrpc_put_bulk_iov (desc, iov);
344
345         CDEBUG(D_NET, "Setup bulk sink buffers: %u pages %u bytes, xid %u, "
346                "portal %u\n", desc->bd_md.niov, desc->bd_md.length,
347                xid, desc->bd_portal);
348
349         RETURN(0);
350
351  cleanup:
352         ptlrpc_put_bulk_iov (desc, iov);
353         ptlrpc_abort_bulk(desc);
354
355         return rc;
356 }
357
358 int ptlrpc_register_bulk_get(struct ptlrpc_bulk_desc *desc)
359 {
360         desc->bd_md.options = PTL_MD_OP_GET | PTL_MD_IOV;
361         desc->bd_md.eventq = bulk_get_source_eq;
362
363         return ptlrpc_register_bulk_shared(desc);
364 }
365
366 int ptlrpc_register_bulk_put(struct ptlrpc_bulk_desc *desc)
367 {
368         desc->bd_md.options = PTL_MD_OP_PUT | PTL_MD_IOV;
369         desc->bd_md.eventq = bulk_put_sink_eq;
370
371         return ptlrpc_register_bulk_shared(desc);
372 }
373
374 int ptlrpc_abort_bulk(struct ptlrpc_bulk_desc *desc)
375 {
376         /* This should be safe: these handles are initialized to be
377          * invalid in ptlrpc_prep_bulk() */
378         PtlMDUnlink(desc->bd_md_h);
379         PtlMEUnlink(desc->bd_me_h);
380
381         return 0;
382 }
383
384 void obd_brw_set_add(struct obd_brw_set *set, struct ptlrpc_bulk_desc *desc)
385 {
386         LASSERT(list_empty(&desc->bd_set_chain));
387
388         ptlrpc_bulk_addref(desc);
389         atomic_inc(&set->brw_refcount);
390         desc->bd_brw_set = set;
391         list_add(&desc->bd_set_chain, &set->brw_desc_head);
392 }
393
394 struct obd_brw_set *obd_brw_set_new(void)
395 {
396         struct obd_brw_set *set;
397
398         OBD_ALLOC(set, sizeof(*set));
399
400         if (set != NULL) {
401                 init_waitqueue_head(&set->brw_waitq);
402                 INIT_LIST_HEAD(&set->brw_desc_head);
403                 atomic_set(&set->brw_refcount, 0);
404         }
405
406         return set;
407 }
408
409 void obd_brw_set_free(struct obd_brw_set *set)
410 {
411         struct list_head *tmp, *next;
412         ENTRY;
413
414         if (!list_empty(&set->brw_desc_head)) {
415                 EXIT;
416                 return;
417         }
418
419         list_for_each_safe(tmp, next, &set->brw_desc_head) {
420                 struct ptlrpc_bulk_desc *desc =
421                         list_entry(tmp, struct ptlrpc_bulk_desc, bd_set_chain);
422
423                 CERROR("Unfinished bulk descriptor: %p\n", desc);
424
425                 ptlrpc_abort_bulk(desc);
426         }
427         OBD_FREE(set, sizeof(*set));
428         EXIT;
429         return;
430 }
431
432 int ptlrpc_reply(struct ptlrpc_service *svc, struct ptlrpc_request *req)
433 {
434         if (req->rq_repmsg == NULL) {
435                 CERROR("bad: someone called ptlrpc_reply when they meant "
436                        "ptlrpc_error\n");
437                 return -EINVAL;
438         }
439
440         /* FIXME: we need to increment the count of handled events */
441         if (req->rq_type != PTL_RPC_MSG_ERR)
442                 req->rq_type = PTL_RPC_MSG_REPLY;
443         //req->rq_repmsg->conn = req->rq_connection->c_remote_conn;
444         //req->rq_repmsg->token = req->rq_connection->c_remote_token;
445         req->rq_repmsg->status = HTON__u32(req->rq_status);
446         return ptl_send_buf(req, req->rq_connection, svc->srv_rep_portal);
447 }
448
449 int ptlrpc_error(struct ptlrpc_service *svc, struct ptlrpc_request *req)
450 {
451         int rc;
452         ENTRY;
453
454         if (!req->rq_repmsg) {
455                 rc = lustre_pack_msg(0, NULL, NULL, &req->rq_replen,
456                                      &req->rq_repmsg);
457                 if (rc)
458                         RETURN(rc);
459         }
460
461
462         req->rq_type = PTL_RPC_MSG_ERR;
463
464         rc = ptlrpc_reply(svc, req);
465         RETURN(rc);
466 }
467
468 int ptl_send_rpc(struct ptlrpc_request *request)
469 {
470         int rc;
471         char *repbuf;
472         ptl_process_id_t source_id;
473
474         ENTRY;
475
476         if (request->rq_type != PTL_RPC_MSG_REQUEST) {
477                 CERROR("wrong packet type sent %d\n",
478                        NTOH__u32(request->rq_reqmsg->type));
479                 LBUG();
480                 RETURN(EINVAL);
481         }
482
483         source_id.nid = request->rq_connection->c_peer.peer_nid;
484         source_id.pid = PTL_PID_ANY;
485
486         /* add a ref, which will be balanced in request_out_callback */
487         ptlrpc_request_addref(request);
488         if (request->rq_replen != 0) {
489                 if (request->rq_reply_md.start != NULL) {
490                         rc = PtlMEUnlink(request->rq_reply_me_h);
491                         if (rc != PTL_OK && rc != PTL_INV_ME) {
492                                 CERROR("rc %d\n", rc);
493                                 LBUG();
494                         }
495                         repbuf = (char *)request->rq_reply_md.start;
496                         request->rq_repmsg = NULL;
497                 } else {
498                         OBD_ALLOC(repbuf, request->rq_replen);
499                         if (!repbuf) {
500                                 LBUG();
501                                 RETURN(ENOMEM);
502                         }
503                 }
504
505                 rc = PtlMEAttach(request->rq_connection->c_peer.peer_ni,
506                              request->rq_reply_portal,/* XXX FIXME bug 625069 */
507                                  source_id, request->rq_xid, 0, PTL_UNLINK,
508                                  PTL_INS_AFTER, &request->rq_reply_me_h);
509                 if (rc != PTL_OK) {
510                         CERROR("PtlMEAttach failed: %d\n", rc);
511                         LBUG();
512                         GOTO(cleanup, rc);
513                 }
514
515                 request->rq_reply_md.start = repbuf;
516                 request->rq_reply_md.length = request->rq_replen;
517                 request->rq_reply_md.threshold = 1;
518                 request->rq_reply_md.options = PTL_MD_OP_PUT;
519                 request->rq_reply_md.user_ptr = request;
520                 request->rq_reply_md.eventq = reply_in_eq;
521
522                 rc = PtlMDAttach(request->rq_reply_me_h, request->rq_reply_md,
523                                  PTL_UNLINK, NULL);
524                 if (rc != PTL_OK) {
525                         CERROR("PtlMDAttach failed: %d\n", rc);
526                         LBUG();
527                         GOTO(cleanup2, rc);
528                 }
529
530                 CDEBUG(D_NET, "Setup reply buffer: %u bytes, xid "LPU64
531                        ", portal %u\n",
532                        request->rq_replen, request->rq_xid,
533                        request->rq_reply_portal);
534         }
535
536         /* Clear any flags that may be present from previous sends,
537          * except for REPLAY. */
538         request->rq_flags &= PTL_RPC_FL_REPLAY;
539         rc = ptl_send_buf(request, request->rq_connection,
540                           request->rq_request_portal);
541         RETURN(rc);
542
543  cleanup2:
544         PtlMEUnlink(request->rq_reply_me_h);
545  cleanup:
546         OBD_FREE(repbuf, request->rq_replen);
547         // up(&request->rq_client->cli_rpc_sem);
548
549         return rc;
550 }
551
552 void ptlrpc_link_svc_me(struct ptlrpc_request_buffer_desc *rqbd)
553 {
554         struct ptlrpc_service *service = rqbd->rqbd_service;
555         static ptl_process_id_t match_id = {PTL_NID_ANY, PTL_PID_ANY};
556         int rc;
557         ptl_md_t dummy;
558         ptl_handle_md_t md_h;
559
560         LASSERT(atomic_read(&rqbd->rqbd_refcount) == 0);
561
562         /* Attach the leading ME on which we build the ring */
563         rc = PtlMEAttach(service->srv_self.peer_ni, service->srv_req_portal,
564                          match_id, 0, ~0,
565                          PTL_UNLINK, PTL_INS_AFTER, &rqbd->rqbd_me_h);
566         if (rc != PTL_OK) {
567                 CERROR("PtlMEAttach failed: %d\n", rc);
568                 LBUG();
569         }
570
571         dummy.start      = rqbd->rqbd_buffer;
572         dummy.length     = service->srv_buf_size;
573         dummy.max_size   = service->srv_max_req_size;
574         dummy.threshold  = PTL_MD_THRESH_INF;
575         dummy.options    = PTL_MD_OP_PUT | PTL_MD_MAX_SIZE | PTL_MD_AUTO_UNLINK;
576         dummy.user_ptr   = rqbd;
577         dummy.eventq     = service->srv_eq_h;
578
579         atomic_inc(&service->srv_nrqbds_receiving);
580         atomic_set(&rqbd->rqbd_refcount, 1);   /* 1 ref for portals */
581
582         rc = PtlMDAttach(rqbd->rqbd_me_h, dummy, PTL_UNLINK, &md_h);
583         if (rc != PTL_OK) {
584                 CERROR("PtlMDAttach failed: %d\n", rc);
585                 LBUG();
586 #warning proper cleanup required
587                 PtlMEUnlink (rqbd->rqbd_me_h);
588                 atomic_set(&rqbd->rqbd_refcount, 0);
589                 atomic_dec(&service->srv_nrqbds_receiving);
590         }
591 }