Whamcloud - gitweb
* Compiles after merging b1_4
[fs/lustre-release.git] / lustre / ptlrpc / niobuf.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (c) 2002, 2003 Cluster File Systems, Inc.
5  *
6  *   This file is part of Lustre, http://www.lustre.org.
7  *
8  *   Lustre is free software; you can redistribute it and/or
9  *   modify it under the terms of version 2 of the GNU General Public
10  *   License as published by the Free Software Foundation.
11  *
12  *   Lustre is distributed in the hope that it will be useful,
13  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *   GNU General Public License for more details.
16  *
17  *   You should have received a copy of the GNU General Public License
18  *   along with Lustre; if not, write to the Free Software
19  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  *
21  */
22
23 #define DEBUG_SUBSYSTEM S_RPC
24 #ifndef __KERNEL__
25 #include <liblustre.h>
26 #endif
27 #include <linux/obd_support.h>
28 #include <linux/lustre_net.h>
29 #include <linux/lustre_lib.h>
30 #include <linux/obd.h>
31 #include "ptlrpc_internal.h"
32
33 static int ptl_send_buf (ptl_handle_md_t *mdh, void *base, int len,
34                          ptl_ack_req_t ack, struct ptlrpc_cb_id *cbid,
35                          struct ptlrpc_connection *conn, int portal, __u64 xid)
36 {
37         int              rc;
38         ptl_md_t         md;
39         ENTRY;
40
41         LASSERT (portal != 0);
42         LASSERT (conn != NULL);
43         CDEBUG (D_INFO, "conn=%p id %s\n", conn, libcfs_id2str(conn->c_peer));
44         md.start     = base;
45         md.length    = len;
46         md.threshold = (ack == PTL_ACK_REQ) ? 2 : 1;
47         md.options   = PTLRPC_MD_OPTIONS;
48         md.user_ptr  = cbid;
49         md.eq_handle = ptlrpc_eq_h;
50
51         if (ack == PTL_ACK_REQ &&
52             OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_ACK | OBD_FAIL_ONCE)) {
53                 /* don't ask for the ack to simulate failing client */
54                 ack = PTL_NOACK_REQ;
55                 obd_fail_loc |= OBD_FAIL_ONCE | OBD_FAILED;
56         }
57
58         rc = PtlMDBind (ptlrpc_ni_h, md, PTL_UNLINK, mdh);
59         if (rc != PTL_OK) {
60                 CERROR ("PtlMDBind failed: %d\n", rc);
61                 LASSERT (rc == PTL_NO_SPACE);
62                 RETURN (-ENOMEM);
63         }
64
65         CDEBUG(D_NET, "Sending %d bytes to portal %d, xid "LPD64"\n",
66                len, portal, xid);
67
68         rc = PtlPut (*mdh, ack, conn->c_peer, portal, 0, xid, 0, 0);
69         if (rc != PTL_OK) {
70                 int rc2;
71                 /* We're going to get an UNLINK event when I unlink below,
72                  * which will complete just like any other failed send, so
73                  * I fall through and return success here! */
74                 CERROR("PtlPut(%s, %d, "LPD64") failed: %d\n",
75                        libcfs_id2str(conn->c_peer), portal, xid, rc);
76                 rc2 = PtlMDUnlink(*mdh);
77                 LASSERTF(rc2 == PTL_OK, "rc2 = %d\n", rc2);
78         }
79
80         RETURN (0);
81 }
82
83 int ptlrpc_start_bulk_transfer (struct ptlrpc_bulk_desc *desc)
84 {
85         int                 rc;
86         int                 rc2;
87         ptl_process_id_t    peer;
88         ptl_md_t            md;
89         __u64               xid;
90         ENTRY;
91
92         if (OBD_FAIL_CHECK_ONCE(OBD_FAIL_PTLRPC_BULK_PUT_NET)) 
93                 RETURN(0);
94
95         /* NB no locking required until desc is on the network */
96         LASSERT (!desc->bd_network_rw);
97         LASSERT (desc->bd_type == BULK_PUT_SOURCE ||
98                  desc->bd_type == BULK_GET_SINK);
99         desc->bd_success = 0;
100         peer = desc->bd_export->exp_connection->c_peer;
101
102         md.user_ptr = &desc->bd_cbid;
103         md.eq_handle = ptlrpc_eq_h;
104         md.threshold = 2; /* SENT and ACK/REPLY */
105         md.options = PTLRPC_MD_OPTIONS;
106         ptlrpc_fill_bulk_md(&md, desc);
107
108         LASSERT (desc->bd_cbid.cbid_fn == server_bulk_callback);
109         LASSERT (desc->bd_cbid.cbid_arg == desc);
110
111         /* NB total length may be 0 for a read past EOF, so we send a 0
112          * length bulk, since the client expects a bulk event. */
113
114         rc = PtlMDBind(ptlrpc_ni_h, md, PTL_UNLINK, &desc->bd_md_h);
115         if (rc != PTL_OK) {
116                 CERROR("PtlMDBind failed: %d\n", rc);
117                 LASSERT (rc == PTL_NO_SPACE);
118                 RETURN(-ENOMEM);
119         }
120
121         /* Client's bulk and reply matchbits are the same */
122         xid = desc->bd_req->rq_xid;
123         CDEBUG(D_NET, "Transferring %u pages %u bytes via portal %d "
124                "id %s xid "LPX64"\n", desc->bd_iov_count,
125                desc->bd_nob, desc->bd_portal, libcfs_id2str(peer), xid);
126
127         /* Network is about to get at the memory */
128         desc->bd_network_rw = 1;
129
130         if (desc->bd_type == BULK_PUT_SOURCE)
131                 rc = PtlPut (desc->bd_md_h, PTL_ACK_REQ, peer,
132                              desc->bd_portal, 0, xid, 0, 0);
133         else
134                 rc = PtlGet (desc->bd_md_h, peer,
135                              desc->bd_portal, 0, xid, 0);
136
137         if (rc != PTL_OK) {
138                 /* Can't send, so we unlink the MD bound above.  The UNLINK
139                  * event this creates will signal completion with failure,
140                  * so we return SUCCESS here! */
141                 CERROR("Transfer(%s, %d, "LPX64") failed: %d\n",
142                        libcfs_id2str(peer), desc->bd_portal, xid, rc);
143                 rc2 = PtlMDUnlink(desc->bd_md_h);
144                 LASSERT (rc2 == PTL_OK);
145         }
146
147         RETURN(0);
148 }
149
150 void ptlrpc_abort_bulk (struct ptlrpc_bulk_desc *desc)
151 {
152         /* Server side bulk abort. Idempotent. Not thread-safe (i.e. only
153          * serialises with completion callback) */
154         struct l_wait_info lwi;
155         int                rc;
156
157         LASSERT (!in_interrupt ());             /* might sleep */
158
159         if (!ptlrpc_bulk_active(desc))          /* completed or */
160                 return;                         /* never started */
161         
162         /* The unlink ensures the callback happens ASAP and is the last
163          * one.  If it fails, it must be because completion just happened,
164          * but we must still l_wait_event() in this case, to give liblustre
165          * a chance to run server_bulk_callback()*/
166
167         PtlMDUnlink (desc->bd_md_h);
168
169         for (;;) {
170                 /* Network access will complete in finite time but the HUGE
171                  * timeout lets us CWARN for visibility of sluggish NALs */
172                 lwi = LWI_TIMEOUT (300 * HZ, NULL, NULL);
173                 rc = l_wait_event(desc->bd_waitq, 
174                                   !ptlrpc_bulk_active(desc), &lwi);
175                 if (rc == 0)
176                         return;
177
178                 LASSERT(rc == -ETIMEDOUT);
179                 CWARN("Unexpectedly long timeout: desc %p\n", desc);
180         }
181 }
182
183 int ptlrpc_register_bulk (struct ptlrpc_request *req)
184 {
185         struct ptlrpc_bulk_desc *desc = req->rq_bulk;
186         ptl_process_id_t peer;
187         int rc;
188         int rc2;
189         ptl_handle_me_t  me_h;
190         ptl_md_t         md;
191         ENTRY;
192
193         if (OBD_FAIL_CHECK_ONCE(OBD_FAIL_PTLRPC_BULK_GET_NET)) 
194                 RETURN(0);
195
196         /* NB no locking required until desc is on the network */
197         LASSERT (desc->bd_nob > 0);
198         LASSERT (!desc->bd_network_rw);
199         LASSERT (desc->bd_iov_count <= PTLRPC_MAX_BRW_PAGES);
200         LASSERT (desc->bd_req != NULL);
201         LASSERT (desc->bd_type == BULK_PUT_SINK ||
202                  desc->bd_type == BULK_GET_SOURCE);
203
204         desc->bd_success = 0;
205
206         peer = desc->bd_import->imp_connection->c_peer;
207
208         md.user_ptr = &desc->bd_cbid;
209         md.eq_handle = ptlrpc_eq_h;
210         md.threshold = 1;                       /* PUT or GET */
211         md.options = PTLRPC_MD_OPTIONS | 
212                      ((desc->bd_type == BULK_GET_SOURCE) ? 
213                       PTL_MD_OP_GET : PTL_MD_OP_PUT);
214         ptlrpc_fill_bulk_md(&md, desc);
215
216         LASSERT (desc->bd_cbid.cbid_fn == client_bulk_callback);
217         LASSERT (desc->bd_cbid.cbid_arg == desc);
218
219         /* XXX Registering the same xid on retried bulk makes my head
220          * explode trying to understand how the original request's bulk
221          * might interfere with the retried request -eeb */
222         LASSERTF (!desc->bd_registered || req->rq_xid != desc->bd_last_xid,
223                   "registered: %d  rq_xid: "LPU64" bd_last_xid: "LPU64"\n",
224                   desc->bd_registered, req->rq_xid, desc->bd_last_xid);
225         desc->bd_registered = 1;
226         desc->bd_last_xid = req->rq_xid;
227
228         rc = PtlMEAttach(ptlrpc_ni_h, desc->bd_portal, peer,
229                          req->rq_xid, 0, PTL_UNLINK, PTL_INS_AFTER, &me_h);
230         if (rc != PTL_OK) {
231                 CERROR("PtlMEAttach failed: %d\n", rc);
232                 LASSERT (rc == PTL_NO_SPACE);
233                 RETURN (-ENOMEM);
234         }
235
236         /* About to let the network at it... */
237         desc->bd_network_rw = 1;
238         rc = PtlMDAttach(me_h, md, PTL_UNLINK, &desc->bd_md_h);
239         if (rc != PTL_OK) {
240                 CERROR("PtlMDAttach failed: %d\n", rc);
241                 LASSERT (rc == PTL_NO_SPACE);
242                 desc->bd_network_rw = 0;
243                 rc2 = PtlMEUnlink (me_h);
244                 LASSERT (rc2 == PTL_OK);
245                 RETURN (-ENOMEM);
246         }
247
248         CDEBUG(D_NET, "Setup bulk %s buffers: %u pages %u bytes, xid "LPX64", "
249                "portal %u\n",
250                desc->bd_type == BULK_GET_SOURCE ? "get-source" : "put-sink",
251                desc->bd_iov_count, desc->bd_nob,
252                req->rq_xid, desc->bd_portal);
253         RETURN(0);
254 }
255
256 void ptlrpc_unregister_bulk (struct ptlrpc_request *req)
257 {
258         /* Disconnect a bulk desc from the network. Idempotent. Not
259          * thread-safe (i.e. only interlocks with completion callback). */
260         struct ptlrpc_bulk_desc *desc = req->rq_bulk;
261         wait_queue_head_t       *wq;
262         struct l_wait_info       lwi;
263         int                      rc;
264
265         LASSERT (!in_interrupt ());     /* might sleep */
266
267         if (!ptlrpc_bulk_active(desc))  /* completed or */
268                 return;                 /* never registered */
269
270         LASSERT (desc->bd_req == req);  /* bd_req NULL until registered */
271
272         /* the unlink ensures the callback happens ASAP and is the last
273          * one.  If it fails, it must be because completion just happened,
274          * but we must still l_wait_event() in this case to give liblustre
275          * a chance to run client_bulk_callback() */
276
277         PtlMDUnlink (desc->bd_md_h);
278         
279         if (req->rq_set != NULL)
280                 wq = &req->rq_set->set_waitq;
281         else
282                 wq = &req->rq_reply_waitq;
283
284         for (;;) {
285                 /* Network access will complete in finite time but the HUGE
286                  * timeout lets us CWARN for visibility of sluggish NALs */
287                 lwi = LWI_TIMEOUT (300 * HZ, NULL, NULL);
288                 rc = l_wait_event(*wq, !ptlrpc_bulk_active(desc), &lwi);
289                 if (rc == 0)
290                         return;
291
292                 LASSERT (rc == -ETIMEDOUT);
293                 DEBUG_REQ(D_WARNING,req,"Unexpectedly long timeout: desc %p\n",
294                           desc);
295         }
296 }
297
298 int ptlrpc_send_reply (struct ptlrpc_request *req, int may_be_difficult)
299 {
300         struct ptlrpc_service     *svc = req->rq_rqbd->rqbd_service;
301         struct ptlrpc_reply_state *rs = req->rq_reply_state;
302         struct ptlrpc_connection  *conn;
303         int                        rc;
304
305         /* We must already have a reply buffer (only ptlrpc_error() may be
306          * called without one).  We must also have a request buffer which
307          * is either the actual (swabbed) incoming request, or a saved copy
308          * if this is a req saved in target_queue_final_reply(). */
309         LASSERT (req->rq_reqmsg != NULL);
310         LASSERT (rs != NULL);
311         LASSERT (req->rq_repmsg != NULL);
312         LASSERT (may_be_difficult || !rs->rs_difficult);
313         LASSERT (req->rq_repmsg == &rs->rs_msg);
314         LASSERT (rs->rs_cb_id.cbid_fn == reply_out_callback);
315         LASSERT (rs->rs_cb_id.cbid_arg == rs);
316         LASSERT (req->rq_repmsg != NULL);
317
318         if (req->rq_export && req->rq_export->exp_obd &&
319             req->rq_export->exp_obd->obd_fail) {
320                 /* Failed obd's only send ENODEV */
321                 req->rq_type = PTL_RPC_MSG_ERR;
322                 req->rq_status = -ENODEV;
323                 CDEBUG(D_HA, "sending ENODEV from failed obd %d\n",
324                        req->rq_export->exp_obd->obd_minor);
325         }
326
327         if (req->rq_type != PTL_RPC_MSG_ERR)
328                 req->rq_type = PTL_RPC_MSG_REPLY;
329
330         req->rq_repmsg->type   = req->rq_type;
331         req->rq_repmsg->status = req->rq_status;
332         req->rq_repmsg->opc    = req->rq_reqmsg->opc;
333
334         if (req->rq_export == NULL) 
335                 conn = ptlrpc_get_connection(req->rq_peer, NULL);
336         else
337                 conn = ptlrpc_connection_addref(req->rq_export->exp_connection);
338
339         atomic_inc (&svc->srv_outstanding_replies);
340         ptlrpc_rs_addref(rs);                   /* +1 ref for the network */
341
342         rc = ptl_send_buf (&rs->rs_md_h, req->rq_repmsg, req->rq_replen,
343                            rs->rs_difficult ? PTL_ACK_REQ : PTL_NOACK_REQ,
344                            &rs->rs_cb_id, conn,
345                            svc->srv_rep_portal, req->rq_xid);
346         if (rc != 0) {
347                 atomic_dec (&svc->srv_outstanding_replies);
348                 ptlrpc_rs_decref(rs);
349         }
350         ptlrpc_put_connection(conn);
351         return rc;
352 }
353
354 int ptlrpc_reply (struct ptlrpc_request *req)
355 {
356         return (ptlrpc_send_reply (req, 0));
357 }
358
359 int ptlrpc_error(struct ptlrpc_request *req)
360 {
361         int rc;
362         ENTRY;
363
364         if (!req->rq_repmsg) {
365                 rc = lustre_pack_reply(req, 0, NULL, NULL);
366                 if (rc)
367                         RETURN(rc);
368         }
369
370         req->rq_type = PTL_RPC_MSG_ERR;
371
372         rc = ptlrpc_send_reply (req, 0);
373         RETURN(rc);
374 }
375
376 int ptl_send_rpc(struct ptlrpc_request *request)
377 {
378         int rc;
379         int rc2;
380         struct ptlrpc_connection *connection;
381         unsigned long flags;
382         ptl_handle_me_t  reply_me_h;
383         ptl_md_t         reply_md;
384         ENTRY;
385
386         OBD_FAIL_RETURN(OBD_FAIL_PTLRPC_DROP_RPC, 0); 
387
388         LASSERT (request->rq_type == PTL_RPC_MSG_REQUEST);
389
390         /* If this is a re-transmit, we're required to have disengaged
391          * cleanly from the previous attempt */
392         LASSERT (!request->rq_receiving_reply);
393
394         if (request->rq_import->imp_obd &&
395             request->rq_import->imp_obd->obd_fail) {
396                 CDEBUG(D_HA, "muting rpc for failed imp obd %s\n",
397                        request->rq_import->imp_obd->obd_name);
398                 /* this prevents us from waiting in ptlrpc_queue_wait */
399                 request->rq_err = 1;
400                 RETURN(-ENODEV);
401         }
402         
403         connection = request->rq_import->imp_connection;
404
405         if (request->rq_bulk != NULL) {
406                 rc = ptlrpc_register_bulk (request);
407                 if (rc != 0)
408                         RETURN(rc);
409         }
410
411         request->rq_reqmsg->handle = request->rq_import->imp_remote_handle;
412         request->rq_reqmsg->type = PTL_RPC_MSG_REQUEST;
413         request->rq_reqmsg->conn_cnt = request->rq_import->imp_conn_cnt;
414
415         LASSERT (request->rq_replen != 0);
416         if (request->rq_repmsg == NULL)
417                 OBD_ALLOC(request->rq_repmsg, request->rq_replen);
418         if (request->rq_repmsg == NULL)
419                 GOTO(cleanup_bulk, rc = -ENOMEM);
420
421         rc = PtlMEAttach(ptlrpc_ni_h,
422                          request->rq_reply_portal, /* XXX FIXME bug 249 */
423                          connection->c_peer, request->rq_xid, 0,
424                          PTL_UNLINK, PTL_INS_AFTER, &reply_me_h);
425         if (rc != PTL_OK) {
426                 CERROR("PtlMEAttach failed: %d\n", rc);
427                 LASSERT (rc == PTL_NO_SPACE);
428                 GOTO(cleanup_repmsg, rc = -ENOMEM);
429         }
430
431         spin_lock_irqsave (&request->rq_lock, flags);
432         /* If the MD attach succeeds, there _will_ be a reply_in callback */
433         request->rq_receiving_reply = 1;
434         /* Clear any flags that may be present from previous sends. */
435         request->rq_replied = 0;
436         request->rq_err = 0;
437         request->rq_timedout = 0;
438         request->rq_net_err = 0;
439         request->rq_resend = 0;
440         request->rq_restart = 0;
441         spin_unlock_irqrestore (&request->rq_lock, flags);
442
443         reply_md.start     = request->rq_repmsg;
444         reply_md.length    = request->rq_replen;
445         reply_md.threshold = 1;
446         reply_md.options   = PTLRPC_MD_OPTIONS | PTL_MD_OP_PUT;
447         reply_md.user_ptr  = &request->rq_reply_cbid;
448         reply_md.eq_handle = ptlrpc_eq_h;
449
450         rc = PtlMDAttach(reply_me_h, reply_md, PTL_UNLINK, 
451                          &request->rq_reply_md_h);
452         if (rc != PTL_OK) {
453                 CERROR("PtlMDAttach failed: %d\n", rc);
454                 LASSERT (rc == PTL_NO_SPACE);
455                 spin_lock_irqsave (&request->rq_lock, flags);
456                 /* ...but the MD attach didn't succeed... */
457                 request->rq_receiving_reply = 0;
458                 spin_unlock_irqrestore (&request->rq_lock, flags);
459                 GOTO(cleanup_me, rc -ENOMEM);
460         }
461
462         CDEBUG(D_NET, "Setup reply buffer: %u bytes, xid "LPU64
463                ", portal %u\n",
464                request->rq_replen, request->rq_xid,
465                request->rq_reply_portal);
466
467         ptlrpc_request_addref(request);       /* +1 ref for the SENT callback */
468
469         request->rq_sent = CURRENT_SECONDS;
470         ptlrpc_pinger_sending_on_import(request->rq_import);
471         rc = ptl_send_buf(&request->rq_req_md_h, 
472                           request->rq_reqmsg, request->rq_reqlen,
473                           PTL_NOACK_REQ, &request->rq_req_cbid, 
474                           connection,
475                           request->rq_request_portal,
476                           request->rq_xid);
477         if (rc == 0) {
478                 ptlrpc_lprocfs_rpc_sent(request);
479                 RETURN(rc);
480         }
481
482         ptlrpc_req_finished (request);          /* drop callback ref */
483
484  cleanup_me:
485         /* MEUnlink is safe; the PUT didn't even get off the ground, and
486          * nobody apart from the PUT's target has the right nid+XID to
487          * access the reply buffer. */
488         rc2 = PtlMEUnlink(reply_me_h);
489         LASSERT (rc2 == PTL_OK);
490         /* UNLINKED callback called synchronously */
491         LASSERT (!request->rq_receiving_reply);
492
493  cleanup_repmsg:
494         OBD_FREE(request->rq_repmsg, request->rq_replen);
495         request->rq_repmsg = NULL;
496
497  cleanup_bulk:
498         if (request->rq_bulk != NULL)
499                 ptlrpc_unregister_bulk(request);
500
501         return rc;
502 }
503
504 int ptlrpc_register_rqbd (struct ptlrpc_request_buffer_desc *rqbd)
505 {
506         struct ptlrpc_service   *service = rqbd->rqbd_service;
507         static ptl_process_id_t  match_id = {PTL_NID_ANY, PTL_PID_ANY};
508         int                      rc;
509         ptl_md_t                 md;
510         ptl_handle_me_t          me_h;
511
512         CDEBUG(D_NET, "PtlMEAttach: portal %d\n",
513                service->srv_req_portal);
514
515         if (OBD_FAIL_CHECK_ONCE(OBD_FAIL_PTLRPC_RQBD))
516                 return (-ENOMEM);
517
518         rc = PtlMEAttach(ptlrpc_ni_h, service->srv_req_portal,
519                          match_id, 0, ~0, PTL_UNLINK, PTL_INS_AFTER, &me_h);
520         if (rc != PTL_OK) {
521                 CERROR("PtlMEAttach failed: %d\n", rc);
522                 return (-ENOMEM);
523         }
524
525         LASSERT(rqbd->rqbd_refcount == 0);
526         rqbd->rqbd_refcount = 1;
527
528         md.start     = rqbd->rqbd_buffer;
529         md.length    = service->srv_buf_size;
530         md.max_size  = service->srv_max_req_size;
531         md.threshold = PTL_MD_THRESH_INF;
532         md.options   = PTLRPC_MD_OPTIONS | PTL_MD_OP_PUT | PTL_MD_MAX_SIZE;
533         md.user_ptr  = &rqbd->rqbd_cbid;
534         md.eq_handle = ptlrpc_eq_h;
535         
536         rc = PtlMDAttach(me_h, md, PTL_UNLINK, &rqbd->rqbd_md_h);
537         if (rc == PTL_OK)
538                 return (0);
539
540         CERROR("PtlMDAttach failed: %d; \n", rc);
541         LASSERT (rc == PTL_NO_SPACE);
542         rc = PtlMEUnlink (me_h);
543         LASSERT (rc == PTL_OK);
544         rqbd->rqbd_refcount = 0;
545         
546         return (-ENOMEM);
547 }