Whamcloud - gitweb
b=17167 libcfs: ensure all libcfs exported symbols to have cfs_ prefix
[fs/lustre-release.git] / lustre / ptlrpc / niobuf.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright  2008 Sun Microsystems, Inc. All rights reserved
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  */
36
37 #define DEBUG_SUBSYSTEM S_RPC
38 #ifndef __KERNEL__
39 #include <liblustre.h>
40 #endif
41 #include <obd_support.h>
42 #include <lustre_net.h>
43 #include <lustre_lib.h>
44 #include <obd.h>
45 #include "ptlrpc_internal.h"
46
47 static int ptl_send_buf (lnet_handle_md_t *mdh, void *base, int len,
48                          lnet_ack_req_t ack, struct ptlrpc_cb_id *cbid,
49                          struct ptlrpc_connection *conn, int portal, __u64 xid,
50                          unsigned int offset)
51 {
52         int              rc;
53         lnet_md_t         md;
54         ENTRY;
55
56         LASSERT (portal != 0);
57         LASSERT (conn != NULL);
58         CDEBUG (D_INFO, "conn=%p id %s\n", conn, libcfs_id2str(conn->c_peer));
59         md.start     = base;
60         md.length    = len;
61         md.threshold = (ack == LNET_ACK_REQ) ? 2 : 1;
62         md.options   = PTLRPC_MD_OPTIONS;
63         md.user_ptr  = cbid;
64         md.eq_handle = ptlrpc_eq_h;
65
66         if (unlikely(ack == LNET_ACK_REQ &&
67                      OBD_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_ACK, OBD_FAIL_ONCE))){
68                 /* don't ask for the ack to simulate failing client */
69                 ack = LNET_NOACK_REQ;
70         }
71
72         rc = LNetMDBind (md, LNET_UNLINK, mdh);
73         if (unlikely(rc != 0)) {
74                 CERROR ("LNetMDBind failed: %d\n", rc);
75                 LASSERT (rc == -ENOMEM);
76                 RETURN (-ENOMEM);
77         }
78
79         CDEBUG(D_NET, "Sending %d bytes to portal %d, xid "LPD64", offset %u\n",
80                len, portal, xid, offset);
81
82         rc = LNetPut (conn->c_self, *mdh, ack,
83                       conn->c_peer, portal, xid, offset, 0);
84         if (unlikely(rc != 0)) {
85                 int rc2;
86                 /* We're going to get an UNLINK event when I unlink below,
87                  * which will complete just like any other failed send, so
88                  * I fall through and return success here! */
89                 CERROR("LNetPut(%s, %d, "LPD64") failed: %d\n",
90                        libcfs_id2str(conn->c_peer), portal, xid, rc);
91                 rc2 = LNetMDUnlink(*mdh);
92                 LASSERTF(rc2 == 0, "rc2 = %d\n", rc2);
93         }
94
95         RETURN (0);
96 }
97
98 int ptlrpc_start_bulk_transfer(struct ptlrpc_bulk_desc *desc)
99 {
100         struct ptlrpc_connection *conn = desc->bd_export->exp_connection;
101         int                       rc;
102         int                       rc2;
103         lnet_md_t                 md;
104         __u64                     xid;
105         ENTRY;
106
107         if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_BULK_PUT_NET))
108                 RETURN(0);
109
110         /* NB no locking required until desc is on the network */
111         LASSERT (!desc->bd_network_rw);
112         LASSERT (desc->bd_type == BULK_PUT_SOURCE ||
113                  desc->bd_type == BULK_GET_SINK);
114         desc->bd_success = 0;
115
116         md.user_ptr = &desc->bd_cbid;
117         md.eq_handle = ptlrpc_eq_h;
118         md.threshold = 2; /* SENT and ACK/REPLY */
119         md.options = PTLRPC_MD_OPTIONS;
120         ptlrpc_fill_bulk_md(&md, desc);
121
122         LASSERT (desc->bd_cbid.cbid_fn == server_bulk_callback);
123         LASSERT (desc->bd_cbid.cbid_arg == desc);
124
125         /* NB total length may be 0 for a read past EOF, so we send a 0
126          * length bulk, since the client expects a bulk event. */
127
128         rc = LNetMDBind(md, LNET_UNLINK, &desc->bd_md_h);
129         if (rc != 0) {
130                 CERROR("LNetMDBind failed: %d\n", rc);
131                 LASSERT (rc == -ENOMEM);
132                 RETURN(-ENOMEM);
133         }
134
135         /* Client's bulk and reply matchbits are the same */
136         xid = desc->bd_req->rq_xid;
137         CDEBUG(D_NET, "Transferring %u pages %u bytes via portal %d "
138                "id %s xid "LPX64"\n", desc->bd_iov_count,
139                desc->bd_nob, desc->bd_portal,
140                libcfs_id2str(conn->c_peer), xid);
141
142         /* Network is about to get at the memory */
143         desc->bd_network_rw = 1;
144
145         if (desc->bd_type == BULK_PUT_SOURCE)
146                 rc = LNetPut (conn->c_self, desc->bd_md_h, LNET_ACK_REQ,
147                               conn->c_peer, desc->bd_portal, xid, 0, 0);
148         else
149                 rc = LNetGet (conn->c_self, desc->bd_md_h,
150                               conn->c_peer, desc->bd_portal, xid, 0);
151
152         if (rc != 0) {
153                 /* Can't send, so we unlink the MD bound above.  The UNLINK
154                  * event this creates will signal completion with failure,
155                  * so we return SUCCESS here! */
156                 CERROR("Transfer(%s, %d, "LPX64") failed: %d\n",
157                        libcfs_id2str(conn->c_peer), desc->bd_portal, xid, rc);
158                 rc2 = LNetMDUnlink(desc->bd_md_h);
159                 LASSERT (rc2 == 0);
160         }
161
162         RETURN(0);
163 }
164
165 /* Server side bulk abort. Idempotent. Not thread-safe (i.e. only
166  * serialises with completion callback) */
167 void ptlrpc_abort_bulk(struct ptlrpc_bulk_desc *desc)
168 {
169         struct l_wait_info       lwi;
170         int                      rc;
171
172         LASSERT(!cfs_in_interrupt());           /* might sleep */
173
174         if (!ptlrpc_server_bulk_active(desc))   /* completed or */
175                 return;                         /* never started */
176
177         /* We used to poison the pages with 0xab here because we did not want to
178          * send any meaningful data over the wire for evicted clients (bug 9297)
179          * However, this is no longer safe now that we use the page cache on the
180          * OSS (bug 20560) */
181
182         /* The unlink ensures the callback happens ASAP and is the last
183          * one.  If it fails, it must be because completion just happened,
184          * but we must still l_wait_event() in this case, to give liblustre
185          * a chance to run server_bulk_callback()*/
186
187         LNetMDUnlink(desc->bd_md_h);
188
189         for (;;) {
190                 /* Network access will complete in finite time but the HUGE
191                  * timeout lets us CWARN for visibility of sluggish NALs */
192                 lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(LONG_UNLINK),
193                                            cfs_time_seconds(1), NULL, NULL);
194                 rc = l_wait_event(desc->bd_waitq,
195                                   !ptlrpc_server_bulk_active(desc), &lwi);
196                 if (rc == 0)
197                         return;
198
199                 LASSERT(rc == -ETIMEDOUT);
200                 CWARN("Unexpectedly long timeout: desc %p\n", desc);
201         }
202 }
203
204 int ptlrpc_register_bulk(struct ptlrpc_request *req)
205 {
206         struct ptlrpc_bulk_desc *desc = req->rq_bulk;
207         lnet_process_id_t peer;
208         int rc;
209         int rc2;
210         lnet_handle_me_t  me_h;
211         lnet_md_t         md;
212         ENTRY;
213
214         if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_BULK_GET_NET))
215                 RETURN(0);
216
217         /* NB no locking required until desc is on the network */
218         LASSERT (desc->bd_nob > 0);
219         LASSERT (!desc->bd_network_rw);
220         LASSERT (desc->bd_iov_count <= PTLRPC_MAX_BRW_PAGES);
221         LASSERT (desc->bd_req != NULL);
222         LASSERT (desc->bd_type == BULK_PUT_SINK ||
223                  desc->bd_type == BULK_GET_SOURCE);
224
225         desc->bd_success = 0;
226
227         peer = desc->bd_import->imp_connection->c_peer;
228
229         md.user_ptr = &desc->bd_cbid;
230         md.eq_handle = ptlrpc_eq_h;
231         md.threshold = 1;                       /* PUT or GET */
232         md.options = PTLRPC_MD_OPTIONS |
233                      ((desc->bd_type == BULK_GET_SOURCE) ?
234                       LNET_MD_OP_GET : LNET_MD_OP_PUT);
235         ptlrpc_fill_bulk_md(&md, desc);
236
237         LASSERT (desc->bd_cbid.cbid_fn == client_bulk_callback);
238         LASSERT (desc->bd_cbid.cbid_arg == desc);
239
240         /* XXX Registering the same xid on retried bulk makes my head
241          * explode trying to understand how the original request's bulk
242          * might interfere with the retried request -eeb */
243         LASSERTF (!desc->bd_registered || req->rq_xid != desc->bd_last_xid,
244                   "registered: %d  rq_xid: "LPU64" bd_last_xid: "LPU64"\n",
245                   desc->bd_registered, req->rq_xid, desc->bd_last_xid);
246         desc->bd_registered = 1;
247         desc->bd_last_xid = req->rq_xid;
248
249         rc = LNetMEAttach(desc->bd_portal, peer,
250                          req->rq_xid, 0, LNET_UNLINK, LNET_INS_AFTER, &me_h);
251         if (rc != 0) {
252                 CERROR("LNetMEAttach failed: %d\n", rc);
253                 LASSERT (rc == -ENOMEM);
254                 RETURN (-ENOMEM);
255         }
256
257         /* About to let the network at it... */
258         desc->bd_network_rw = 1;
259         rc = LNetMDAttach(me_h, md, LNET_UNLINK, &desc->bd_md_h);
260         if (rc != 0) {
261                 CERROR("LNetMDAttach failed: %d\n", rc);
262                 LASSERT (rc == -ENOMEM);
263                 desc->bd_network_rw = 0;
264                 rc2 = LNetMEUnlink (me_h);
265                 LASSERT (rc2 == 0);
266                 RETURN (-ENOMEM);
267         }
268
269         CDEBUG(D_NET, "Setup bulk %s buffers: %u pages %u bytes, xid "LPU64", "
270                "portal %u\n",
271                desc->bd_type == BULK_GET_SOURCE ? "get-source" : "put-sink",
272                desc->bd_iov_count, desc->bd_nob,
273                req->rq_xid, desc->bd_portal);
274         RETURN(0);
275 }
276
277 /* Disconnect a bulk desc from the network. Idempotent. Not
278  * thread-safe (i.e. only interlocks with completion callback). */
279 int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async)
280 {
281         struct ptlrpc_bulk_desc *desc = req->rq_bulk;
282         cfs_waitq_t             *wq;
283         struct l_wait_info       lwi;
284         int                      rc;
285         ENTRY;
286
287         LASSERT(!cfs_in_interrupt());     /* might sleep */
288
289         /* Let's setup deadline for reply unlink. */
290         if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) &&
291             async && req->rq_bulk_deadline == 0)
292                 req->rq_bulk_deadline = cfs_time_current_sec() + LONG_UNLINK;
293
294         if (!ptlrpc_client_bulk_active(req))  /* completed or */
295                 RETURN(1);                    /* never registered */
296
297         LASSERT(desc->bd_req == req);  /* bd_req NULL until registered */
298
299         /* the unlink ensures the callback happens ASAP and is the last
300          * one.  If it fails, it must be because completion just happened,
301          * but we must still l_wait_event() in this case to give liblustre
302          * a chance to run client_bulk_callback() */
303
304         LNetMDUnlink(desc->bd_md_h);
305
306         if (!ptlrpc_client_bulk_active(req))  /* completed or */
307                 RETURN(1);                    /* never registered */
308
309         /* Move to "Unregistering" phase as bulk was not unlinked yet. */
310         ptlrpc_rqphase_move(req, RQ_PHASE_UNREGISTERING);
311
312         /* Do not wait for unlink to finish. */
313         if (async)
314                 RETURN(0);
315
316         if (req->rq_set != NULL)
317                 wq = &req->rq_set->set_waitq;
318         else
319                 wq = &req->rq_reply_waitq;
320
321         for (;;) {
322                 /* Network access will complete in finite time but the HUGE
323                  * timeout lets us CWARN for visibility of sluggish NALs */
324                 lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(LONG_UNLINK),
325                                            cfs_time_seconds(1), NULL, NULL);
326                 rc = l_wait_event(*wq, !ptlrpc_client_bulk_active(req), &lwi);
327                 if (rc == 0) {
328                         ptlrpc_rqphase_move(req, req->rq_next_phase);
329                         RETURN(1);
330                 }
331
332                 LASSERT(rc == -ETIMEDOUT);
333                 DEBUG_REQ(D_WARNING, req, "Unexpectedly long timeout: desc %p",
334                           desc);
335         }
336         RETURN(0);
337 }
338
339 static void ptlrpc_at_set_reply(struct ptlrpc_request *req, int flags)
340 {
341         struct ptlrpc_service *svc = req->rq_rqbd->rqbd_service;
342         int service_time = max_t(int, cfs_time_current_sec() -
343                                  req->rq_arrival_time.tv_sec, 1);
344
345         if (!(flags & PTLRPC_REPLY_EARLY) &&
346             (req->rq_type != PTL_RPC_MSG_ERR) &&
347             (req->rq_reqmsg != NULL) &&
348             !(lustre_msg_get_flags(req->rq_reqmsg) &
349               (MSG_RESENT | MSG_REPLAY |
350                MSG_REQ_REPLAY_DONE | MSG_LOCK_REPLAY_DONE))) {
351                 /* early replies, errors and recovery requests don't count
352                  * toward our service time estimate */
353                 int oldse = at_add(&svc->srv_at_estimate, service_time);
354                 if (oldse != 0)
355                         DEBUG_REQ(D_ADAPTTO, req,
356                                   "svc %s changed estimate from %d to %d",
357                                   svc->srv_name, oldse,
358                                   at_get(&svc->srv_at_estimate));
359         }
360         /* Report actual service time for client latency calc */
361         lustre_msg_set_service_time(req->rq_repmsg, service_time);
362         /* Report service time estimate for future client reqs, but report 0
363          * (to be ignored by client) if it's a error reply during recovery.
364          * (bz15815) */
365         if (req->rq_type == PTL_RPC_MSG_ERR &&
366             (req->rq_export == NULL || req->rq_export->exp_obd->obd_recovering))
367                 lustre_msg_set_timeout(req->rq_repmsg, 0);
368         else
369                 lustre_msg_set_timeout(req->rq_repmsg,
370                                        at_get(&svc->srv_at_estimate));
371
372         if (req->rq_reqmsg &&
373             !(lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)) {
374                 CDEBUG(D_ADAPTTO, "No early reply support: flags=%#x "
375                        "req_flags=%#x magic=%d:%x/%x len=%d\n",
376                        flags, lustre_msg_get_flags(req->rq_reqmsg),
377                        lustre_msg_is_v1(req->rq_reqmsg),
378                        lustre_msg_get_magic(req->rq_reqmsg),
379                        lustre_msg_get_magic(req->rq_repmsg), req->rq_replen);
380         }
381 }
382
383 int ptlrpc_send_reply(struct ptlrpc_request *req, int flags)
384 {
385         struct ptlrpc_service     *svc = req->rq_rqbd->rqbd_service;
386         struct ptlrpc_reply_state *rs = req->rq_reply_state;
387         struct ptlrpc_connection  *conn;
388         int                        rc;
389
390         /* We must already have a reply buffer (only ptlrpc_error() may be
391          * called without one). The reply generated by sptlrpc layer (e.g.
392          * error notify, etc.) might have NULL rq->reqmsg; Otherwise we must
393          * have a request buffer which is either the actual (swabbed) incoming
394          * request, or a saved copy if this is a req saved in
395          * target_queue_final_reply().
396          */
397         LASSERT (req->rq_no_reply == 0);
398         LASSERT (req->rq_reqbuf != NULL);
399         LASSERT (rs != NULL);
400         LASSERT ((flags & PTLRPC_REPLY_MAYBE_DIFFICULT) || !rs->rs_difficult);
401         LASSERT (req->rq_repmsg != NULL);
402         LASSERT (req->rq_repmsg == rs->rs_msg);
403         LASSERT (rs->rs_cb_id.cbid_fn == reply_out_callback);
404         LASSERT (rs->rs_cb_id.cbid_arg == rs);
405
406         /* There may be no rq_export during failover */
407
408         if (unlikely(req->rq_export && req->rq_export->exp_obd &&
409                      req->rq_export->exp_obd->obd_fail)) {
410                 /* Failed obd's only send ENODEV */
411                 req->rq_type = PTL_RPC_MSG_ERR;
412                 req->rq_status = -ENODEV;
413                 CDEBUG(D_HA, "sending ENODEV from failed obd %d\n",
414                        req->rq_export->exp_obd->obd_minor);
415         }
416
417         if (req->rq_type != PTL_RPC_MSG_ERR)
418                 req->rq_type = PTL_RPC_MSG_REPLY;
419
420         lustre_msg_set_type(req->rq_repmsg, req->rq_type);
421         lustre_msg_set_status(req->rq_repmsg, req->rq_status);
422         lustre_msg_set_opc(req->rq_repmsg,
423                 req->rq_reqmsg ? lustre_msg_get_opc(req->rq_reqmsg) : 0);
424
425         target_pack_pool_reply(req);
426
427         ptlrpc_at_set_reply(req, flags);
428
429         if (req->rq_export == NULL || req->rq_export->exp_connection == NULL)
430                 conn = ptlrpc_connection_get(req->rq_peer, req->rq_self, NULL);
431         else
432                 conn = ptlrpc_connection_addref(req->rq_export->exp_connection);
433
434         if (unlikely(conn == NULL)) {
435                 CERROR("not replying on NULL connection\n"); /* bug 9635 */
436                 return -ENOTCONN;
437         }
438         cfs_atomic_inc (&svc->srv_outstanding_replies);
439         ptlrpc_rs_addref(rs);                   /* +1 ref for the network */
440
441         rc = sptlrpc_svc_wrap_reply(req);
442         if (unlikely(rc))
443                 goto out;
444
445         req->rq_sent = cfs_time_current_sec();
446
447         rc = ptl_send_buf (&rs->rs_md_h, rs->rs_repbuf, rs->rs_repdata_len,
448                            (rs->rs_difficult && !rs->rs_no_ack) ?
449                            LNET_ACK_REQ : LNET_NOACK_REQ,
450                            &rs->rs_cb_id, conn, svc->srv_rep_portal,
451                            req->rq_xid, req->rq_reply_off);
452 out:
453         if (unlikely(rc != 0)) {
454                 cfs_atomic_dec (&svc->srv_outstanding_replies);
455                 ptlrpc_req_drop_rs(req);
456         }
457         ptlrpc_connection_put(conn);
458         return rc;
459 }
460
461 int ptlrpc_reply (struct ptlrpc_request *req)
462 {
463         if (req->rq_no_reply)
464                 return 0;
465         else
466                 return (ptlrpc_send_reply(req, 0));
467 }
468
469 int ptlrpc_send_error(struct ptlrpc_request *req, int may_be_difficult)
470 {
471         int rc;
472         ENTRY;
473
474         if (req->rq_no_reply)
475                 RETURN(0);
476
477         if (!req->rq_repmsg) {
478                 rc = lustre_pack_reply(req, 1, NULL, NULL);
479                 if (rc)
480                         RETURN(rc);
481         }
482
483         req->rq_type = PTL_RPC_MSG_ERR;
484
485         rc = ptlrpc_send_reply(req, may_be_difficult);
486         RETURN(rc);
487 }
488
489 int ptlrpc_error(struct ptlrpc_request *req)
490 {
491         return ptlrpc_send_error(req, 0);
492 }
493
494 int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
495 {
496         int rc;
497         int rc2;
498         struct ptlrpc_connection *connection;
499         lnet_handle_me_t  reply_me_h;
500         lnet_md_t         reply_md;
501         struct obd_device *obd = request->rq_import->imp_obd;
502         ENTRY;
503
504         if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DROP_RPC))
505                 RETURN(0);
506
507         LASSERT(request->rq_type == PTL_RPC_MSG_REQUEST);
508         LASSERT(request->rq_wait_ctx == 0);
509
510         /* If this is a re-transmit, we're required to have disengaged
511          * cleanly from the previous attempt */
512         LASSERT(!request->rq_receiving_reply);
513
514         if (request->rq_import->imp_obd &&
515             request->rq_import->imp_obd->obd_fail) {
516                 CDEBUG(D_HA, "muting rpc for failed imp obd %s\n",
517                        request->rq_import->imp_obd->obd_name);
518                 /* this prevents us from waiting in ptlrpc_queue_wait */
519                 request->rq_err = 1;
520                 request->rq_status = -ENODEV;
521                 RETURN(-ENODEV);
522         }
523
524         connection = request->rq_import->imp_connection;
525
526         lustre_msg_set_handle(request->rq_reqmsg,
527                               &request->rq_import->imp_remote_handle);
528         lustre_msg_set_type(request->rq_reqmsg, PTL_RPC_MSG_REQUEST);
529         lustre_msg_set_conn_cnt(request->rq_reqmsg,
530                                 request->rq_import->imp_conn_cnt);
531         lustre_msghdr_set_flags(request->rq_reqmsg,
532                                 request->rq_import->imp_msghdr_flags);
533
534         if (request->rq_resend)
535                 lustre_msg_add_flags(request->rq_reqmsg, MSG_RESENT);
536
537         rc = sptlrpc_cli_wrap_request(request);
538         if (rc)
539                 RETURN(rc);
540
541         /* bulk register should be done after wrap_request() */
542         if (request->rq_bulk != NULL) {
543                 rc = ptlrpc_register_bulk (request);
544                 if (rc != 0)
545                         RETURN(rc);
546         }
547
548         if (!noreply) {
549                 LASSERT (request->rq_replen != 0);
550                 if (request->rq_repbuf == NULL) {
551                         LASSERT(request->rq_repdata == NULL);
552                         LASSERT(request->rq_repmsg == NULL);
553                         rc = sptlrpc_cli_alloc_repbuf(request,
554                                                       request->rq_replen);
555                         if (rc) {
556                                 /* this prevents us from looping in
557                                  * ptlrpc_queue_wait */
558                                 request->rq_err = 1;
559                                 request->rq_status = rc;
560                                 GOTO(cleanup_bulk, rc);
561                         }
562                 } else {
563                         request->rq_repdata = NULL;
564                         request->rq_repmsg = NULL;
565                 }
566
567                 rc = LNetMEAttach(request->rq_reply_portal,/*XXX FIXME bug 249*/
568                                   connection->c_peer, request->rq_xid, 0,
569                                   LNET_UNLINK, LNET_INS_AFTER, &reply_me_h);
570                 if (rc != 0) {
571                         CERROR("LNetMEAttach failed: %d\n", rc);
572                         LASSERT (rc == -ENOMEM);
573                         GOTO(cleanup_bulk, rc = -ENOMEM);
574                 }
575         }
576
577         cfs_spin_lock(&request->rq_lock);
578         /* If the MD attach succeeds, there _will_ be a reply_in callback */
579         request->rq_receiving_reply = !noreply;
580         /* We are responsible for unlinking the reply buffer */
581         request->rq_must_unlink = !noreply;
582         /* Clear any flags that may be present from previous sends. */
583         request->rq_replied = 0;
584         request->rq_err = 0;
585         request->rq_timedout = 0;
586         request->rq_net_err = 0;
587         request->rq_resend = 0;
588         request->rq_restart = 0;
589         request->rq_reply_truncate = 0;
590         cfs_spin_unlock(&request->rq_lock);
591
592         if (!noreply) {
593                 reply_md.start     = request->rq_repbuf;
594                 reply_md.length    = request->rq_repbuf_len;
595                 /* Allow multiple early replies */
596                 reply_md.threshold = LNET_MD_THRESH_INF;
597                 /* Manage remote for early replies */
598                 reply_md.options   = PTLRPC_MD_OPTIONS | LNET_MD_OP_PUT |
599                         LNET_MD_MANAGE_REMOTE |
600                         LNET_MD_TRUNCATE; /* allow to make EOVERFLOW error */;
601                 reply_md.user_ptr  = &request->rq_reply_cbid;
602                 reply_md.eq_handle = ptlrpc_eq_h;
603
604                 /* We must see the unlink callback to unset rq_must_unlink,
605                    so we can't auto-unlink */
606                 rc = LNetMDAttach(reply_me_h, reply_md, LNET_RETAIN,
607                                   &request->rq_reply_md_h);
608                 if (rc != 0) {
609                         CERROR("LNetMDAttach failed: %d\n", rc);
610                         LASSERT (rc == -ENOMEM);
611                         cfs_spin_lock(&request->rq_lock);
612                         /* ...but the MD attach didn't succeed... */
613                         request->rq_receiving_reply = 0;
614                         cfs_spin_unlock(&request->rq_lock);
615                         GOTO(cleanup_me, rc = -ENOMEM);
616                 }
617
618                 CDEBUG(D_NET, "Setup reply buffer: %u bytes, xid "LPU64
619                        ", portal %u\n",
620                        request->rq_repbuf_len, request->rq_xid,
621                        request->rq_reply_portal);
622         }
623
624         /* add references on request for request_out_callback */
625         ptlrpc_request_addref(request);
626         if (obd->obd_svc_stats != NULL)
627                 lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQACTIVE_CNTR,
628                         cfs_atomic_read(&request->rq_import->imp_inflight));
629
630         OBD_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_DELAY_SEND, request->rq_timeout + 5);
631
632         cfs_gettimeofday(&request->rq_arrival_time);
633         request->rq_sent = cfs_time_current_sec();
634         /* We give the server rq_timeout secs to process the req, and
635            add the network latency for our local timeout. */
636         request->rq_deadline = request->rq_sent + request->rq_timeout +
637                 ptlrpc_at_get_net_latency(request);
638
639         ptlrpc_pinger_sending_on_import(request->rq_import);
640
641         DEBUG_REQ(D_INFO, request, "send flg=%x",
642                   lustre_msg_get_flags(request->rq_reqmsg));
643         rc = ptl_send_buf(&request->rq_req_md_h,
644                           request->rq_reqbuf, request->rq_reqdata_len,
645                           LNET_NOACK_REQ, &request->rq_req_cbid,
646                           connection,
647                           request->rq_request_portal,
648                           request->rq_xid, 0);
649         if (rc == 0)
650                 RETURN(rc);
651
652         ptlrpc_req_finished(request);
653         if (noreply)
654                 RETURN(rc);
655
656  cleanup_me:
657         /* MEUnlink is safe; the PUT didn't even get off the ground, and
658          * nobody apart from the PUT's target has the right nid+XID to
659          * access the reply buffer. */
660         rc2 = LNetMEUnlink(reply_me_h);
661         LASSERT (rc2 == 0);
662         /* UNLINKED callback called synchronously */
663         LASSERT(!request->rq_receiving_reply);
664
665  cleanup_bulk:
666         /* We do sync unlink here as there was no real transfer here so
667          * the chance to have long unlink to sluggish net is smaller here. */
668         ptlrpc_unregister_bulk(request, 0);
669         return rc;
670 }
671
672 int ptlrpc_register_rqbd(struct ptlrpc_request_buffer_desc *rqbd)
673 {
674         struct ptlrpc_service   *service = rqbd->rqbd_service;
675         static lnet_process_id_t  match_id = {LNET_NID_ANY, LNET_PID_ANY};
676         int                      rc;
677         lnet_md_t                 md;
678         lnet_handle_me_t          me_h;
679
680         CDEBUG(D_NET, "LNetMEAttach: portal %d\n",
681                service->srv_req_portal);
682
683         if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_RQBD))
684                 return (-ENOMEM);
685
686         rc = LNetMEAttach(service->srv_req_portal,
687                           match_id, 0, ~0, LNET_UNLINK, LNET_INS_AFTER, &me_h);
688         if (rc != 0) {
689                 CERROR("LNetMEAttach failed: %d\n", rc);
690                 return (-ENOMEM);
691         }
692
693         LASSERT(rqbd->rqbd_refcount == 0);
694         rqbd->rqbd_refcount = 1;
695
696         md.start     = rqbd->rqbd_buffer;
697         md.length    = service->srv_buf_size;
698         md.max_size  = service->srv_max_req_size;
699         md.threshold = LNET_MD_THRESH_INF;
700         md.options   = PTLRPC_MD_OPTIONS | LNET_MD_OP_PUT | LNET_MD_MAX_SIZE;
701         md.user_ptr  = &rqbd->rqbd_cbid;
702         md.eq_handle = ptlrpc_eq_h;
703
704         rc = LNetMDAttach(me_h, md, LNET_UNLINK, &rqbd->rqbd_md_h);
705         if (rc == 0)
706                 return (0);
707
708         CERROR("LNetMDAttach failed: %d; \n", rc);
709         LASSERT (rc == -ENOMEM);
710         rc = LNetMEUnlink (me_h);
711         LASSERT (rc == 0);
712         rqbd->rqbd_refcount = 0;
713
714         return (-ENOMEM);
715 }