Whamcloud - gitweb
LU-9679 osc: convert cl_cache_waiters to a wait_queue.
[fs/lustre-release.git] / lustre / ptlrpc / niobuf.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  */
32
33 #define DEBUG_SUBSYSTEM S_RPC
34 #include <obd_support.h>
35 #include <lustre_net.h>
36 #include <lustre_lib.h>
37 #include <obd.h>
38 #include <obd_class.h>
39 #include "ptlrpc_internal.h"
40 #include <lnet/lib-lnet.h> /* for CFS_FAIL_PTLRPC_OST_BULK_CB2 */
41
42 /**
43  * Helper function. Sends \a len bytes from \a base at offset \a offset
44  * over \a conn connection to portal \a portal.
45  * Returns 0 on success or error code.
46  */
47 static int ptl_send_buf(struct lnet_handle_md *mdh, void *base, int len,
48                         enum lnet_ack_req ack, struct ptlrpc_cb_id *cbid,
49                         lnet_nid_t self, struct lnet_process_id peer_id,
50                         int portal, __u64 xid, unsigned int offset,
51                         struct lnet_handle_md *bulk_cookie)
52 {
53         int              rc;
54         struct lnet_md         md;
55         ENTRY;
56
57         LASSERT (portal != 0);
58         CDEBUG (D_INFO, "peer_id %s\n", libcfs_id2str(peer_id));
59         md.start     = base;
60         md.length    = len;
61         md.threshold = (ack == LNET_ACK_REQ) ? 2 : 1;
62         md.options   = PTLRPC_MD_OPTIONS;
63         md.user_ptr  = cbid;
64         md.eq_handle = ptlrpc_eq;
65         LNetInvalidateMDHandle(&md.bulk_handle);
66
67         if (bulk_cookie) {
68                 md.bulk_handle = *bulk_cookie;
69                 md.options |= LNET_MD_BULK_HANDLE;
70         }
71
72         if (unlikely(ack == LNET_ACK_REQ &&
73                      OBD_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_ACK, OBD_FAIL_ONCE))){
74                 /* don't ask for the ack to simulate failing client */
75                 ack = LNET_NOACK_REQ;
76         }
77
78         rc = LNetMDBind (md, LNET_UNLINK, mdh);
79         if (unlikely(rc != 0)) {
80                 CERROR ("LNetMDBind failed: %d\n", rc);
81                 LASSERT (rc == -ENOMEM);
82                 RETURN (-ENOMEM);
83         }
84
85         CDEBUG(D_NET, "Sending %d bytes to portal %d, xid %lld, offset %u\n",
86                len, portal, xid, offset);
87
88         rc = LNetPut(self, *mdh, ack,
89                      peer_id, portal, xid, offset, 0);
90         if (unlikely(rc != 0)) {
91                 int rc2;
92                 /* We're going to get an UNLINK event when I unlink below,
93                  * which will complete just like any other failed send, so
94                  * I fall through and return success here! */
95                 CERROR("LNetPut(%s, %d, %lld) failed: %d\n",
96                        libcfs_id2str(peer_id), portal, xid, rc);
97                 rc2 = LNetMDUnlink(*mdh);
98                 LASSERTF(rc2 == 0, "rc2 = %d\n", rc2);
99         }
100
101         RETURN (0);
102 }
103
104 static void mdunlink_iterate_helper(struct lnet_handle_md *bd_mds, int count)
105 {
106         int i;
107
108         for (i = 0; i < count; i++)
109                 LNetMDUnlink(bd_mds[i]);
110 }
111
112 #ifdef HAVE_SERVER_SUPPORT
113 /**
114  * Prepare bulk descriptor for specified incoming request \a req that
115  * can fit \a nfrags * pages. \a type is bulk type. \a portal is where
116  * the bulk to be sent. Used on server-side after request was already
117  * received.
118  * Returns pointer to newly allocatrd initialized bulk descriptor or NULL on
119  * error.
120  */
121 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_exp(struct ptlrpc_request *req,
122                                               unsigned nfrags, unsigned max_brw,
123                                               unsigned int type,
124                                               unsigned portal,
125                                               const struct ptlrpc_bulk_frag_ops
126                                                 *ops)
127 {
128         struct obd_export *exp = req->rq_export;
129         struct ptlrpc_bulk_desc *desc;
130
131         ENTRY;
132         LASSERT(ptlrpc_is_bulk_op_active(type));
133
134         desc = ptlrpc_new_bulk(nfrags, max_brw, type, portal, ops);
135         if (desc == NULL)
136                 RETURN(NULL);
137
138         desc->bd_export = class_export_get(exp);
139         desc->bd_req = req;
140
141         desc->bd_cbid.cbid_fn  = server_bulk_callback;
142         desc->bd_cbid.cbid_arg = desc;
143
144         /* NB we don't assign rq_bulk here; server-side requests are
145          * re-used, and the handler frees the bulk desc explicitly. */
146
147         return desc;
148 }
149 EXPORT_SYMBOL(ptlrpc_prep_bulk_exp);
150
151 /**
152  * Starts bulk transfer for descriptor \a desc on the server.
153  * Returns 0 on success or error code.
154  */
155 int ptlrpc_start_bulk_transfer(struct ptlrpc_bulk_desc *desc)
156 {
157         struct obd_export        *exp = desc->bd_export;
158         lnet_nid_t                self_nid;
159         struct lnet_process_id    peer_id;
160         int                       rc = 0;
161         __u64                     mbits;
162         int                       posted_md;
163         int                       total_md;
164         struct lnet_md                 md;
165         ENTRY;
166
167         if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_BULK_PUT_NET))
168                 RETURN(0);
169
170         /* NB no locking required until desc is on the network */
171         LASSERT(desc->bd_md_count == 0);
172         LASSERT(ptlrpc_is_bulk_op_active(desc->bd_type));
173
174         LASSERT(desc->bd_cbid.cbid_fn == server_bulk_callback);
175         LASSERT(desc->bd_cbid.cbid_arg == desc);
176
177         /*
178          * Multi-Rail: get the preferred self and peer NIDs from the
179          * request, so they are based on the route taken by the
180          * message.
181          */
182         self_nid = desc->bd_req->rq_self;
183         peer_id = desc->bd_req->rq_source;
184
185         /* NB total length may be 0 for a read past EOF, so we send 0
186          * length bulks, since the client expects bulk events.
187          *
188          * The client may not need all of the bulk mbits for the RPC. The RPC
189          * used the mbits of the highest bulk mbits needed, and the server masks
190          * off high bits to get bulk count for this RPC. LU-1431 */
191         mbits = desc->bd_req->rq_mbits & ~((__u64)desc->bd_md_max_brw - 1);
192         total_md = desc->bd_req->rq_mbits - mbits + 1;
193
194         desc->bd_md_count = total_md;
195         desc->bd_failure = 0;
196
197         md.user_ptr = &desc->bd_cbid;
198         md.eq_handle = ptlrpc_eq;
199         md.threshold = 2; /* SENT and ACK/REPLY */
200
201         for (posted_md = 0; posted_md < total_md; mbits++) {
202                 md.options = PTLRPC_MD_OPTIONS;
203
204                 /* NB it's assumed that source and sink buffer frags are
205                  * page-aligned. Otherwise we'd have to send client bulk
206                  * sizes over and split server buffer accordingly */
207                 ptlrpc_fill_bulk_md(&md, desc, posted_md);
208                 rc = LNetMDBind(md, LNET_UNLINK, &desc->bd_mds[posted_md]);
209                 if (rc != 0) {
210                         CERROR("%s: LNetMDBind failed for MD %u: rc = %d\n",
211                                exp->exp_obd->obd_name, posted_md, rc);
212                         LASSERT(rc == -ENOMEM);
213                         if (posted_md == 0) {
214                                 desc->bd_md_count = 0;
215                                 RETURN(-ENOMEM);
216                         }
217                         break;
218                 }
219
220                 /* sanity.sh 224c: lets skip last md */
221                 if (posted_md == desc->bd_md_max_brw - 1)
222                         OBD_FAIL_CHECK_RESET(OBD_FAIL_PTLRPC_CLIENT_BULK_CB3,
223                                              CFS_FAIL_PTLRPC_OST_BULK_CB2);
224
225                 /* Network is about to get at the memory */
226                 if (ptlrpc_is_bulk_put_source(desc->bd_type))
227                         rc = LNetPut(self_nid, desc->bd_mds[posted_md],
228                                      LNET_ACK_REQ, peer_id,
229                                      desc->bd_portal, mbits, 0, 0);
230                 else
231                         rc = LNetGet(self_nid, desc->bd_mds[posted_md],
232                                      peer_id, desc->bd_portal, mbits, 0, false);
233
234                 posted_md++;
235                 if (rc != 0) {
236                         CERROR("%s: failed bulk transfer with %s:%u x%llu: "
237                                "rc = %d\n", exp->exp_obd->obd_name,
238                                libcfs_id2str(peer_id), desc->bd_portal,
239                                mbits, rc);
240                         break;
241                 }
242         }
243
244         if (rc != 0) {
245                 /* Can't send, so we unlink the MD bound above.  The UNLINK
246                  * event this creates will signal completion with failure,
247                  * so we return SUCCESS here! */
248                 spin_lock(&desc->bd_lock);
249                 desc->bd_md_count -= total_md - posted_md;
250                 spin_unlock(&desc->bd_lock);
251                 LASSERT(desc->bd_md_count >= 0);
252
253                 mdunlink_iterate_helper(desc->bd_mds, posted_md);
254                 RETURN(0);
255         }
256
257         CDEBUG(D_NET, "Transferring %u pages %u bytes via portal %d "
258                "id %s mbits %#llx-%#llx\n", desc->bd_iov_count,
259                desc->bd_nob, desc->bd_portal, libcfs_id2str(peer_id),
260                mbits - posted_md, mbits - 1);
261
262         RETURN(0);
263 }
264
265 /**
266  * Server side bulk abort. Idempotent. Not thread-safe (i.e. only
267  * serialises with completion callback)
268  */
269 void ptlrpc_abort_bulk(struct ptlrpc_bulk_desc *desc)
270 {
271         LASSERT(!in_interrupt());           /* might sleep */
272
273         if (!ptlrpc_server_bulk_active(desc))   /* completed or */
274                 return;                         /* never started */
275
276         /* We used to poison the pages with 0xab here because we did not want to
277          * send any meaningful data over the wire for evicted clients (bug 9297)
278          * However, this is no longer safe now that we use the page cache on the
279          * OSS (bug 20560) */
280
281         /* The unlink ensures the callback happens ASAP and is the last
282          * one.  If it fails, it must be because completion just happened,
283          * but we must still wait_event_idle_timeout() in this case, to give
284          * us a chance to run server_bulk_callback()
285          */
286         mdunlink_iterate_helper(desc->bd_mds, desc->bd_md_max_brw);
287
288         for (;;) {
289                 /* Network access will complete in finite time but the HUGE
290                  * timeout lets us CWARN for visibility of sluggish NALs */
291                 int seconds = LONG_UNLINK;
292
293                 while (seconds > 0 &&
294                        wait_event_idle_timeout(desc->bd_waitq,
295                                                !ptlrpc_server_bulk_active(desc),
296                                                cfs_time_seconds(1)) == 0)
297                         seconds -= 1;
298                 if (seconds > 0)
299                         return;
300
301                 CWARN("Unexpectedly long timeout: desc %p\n", desc);
302         }
303 }
304 #endif /* HAVE_SERVER_SUPPORT */
305
306 /**
307  * Register bulk at the sender for later transfer.
308  * Returns 0 on success or error code.
309  */
310 int ptlrpc_register_bulk(struct ptlrpc_request *req)
311 {
312         struct ptlrpc_bulk_desc *desc = req->rq_bulk;
313         struct lnet_process_id peer;
314         int rc = 0;
315         int posted_md;
316         int total_md;
317         __u64 mbits;
318         struct lnet_me *me;
319         struct lnet_md md;
320         ENTRY;
321
322         if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_BULK_GET_NET))
323                 RETURN(0);
324
325         /* NB no locking required until desc is on the network */
326         LASSERT(desc->bd_nob > 0);
327         LASSERT(desc->bd_md_count == 0);
328         LASSERT(desc->bd_md_max_brw <= PTLRPC_BULK_OPS_COUNT);
329         LASSERT(desc->bd_iov_count <= PTLRPC_MAX_BRW_PAGES);
330         LASSERT(desc->bd_req != NULL);
331         LASSERT(ptlrpc_is_bulk_op_passive(desc->bd_type));
332
333         /* cleanup the state of the bulk for it will be reused */
334         if (req->rq_resend || req->rq_send_state == LUSTRE_IMP_REPLAY)
335                 desc->bd_nob_transferred = 0;
336         else if (desc->bd_nob_transferred != 0)
337                 /* If the network failed after an RPC was sent, this condition
338                  * could happen.  Rather than assert (was here before), return
339                  * an EIO error. */
340                 RETURN(-EIO);
341
342         desc->bd_failure = 0;
343
344         peer = desc->bd_import->imp_connection->c_peer;
345
346         LASSERT(desc->bd_cbid.cbid_fn == client_bulk_callback);
347         LASSERT(desc->bd_cbid.cbid_arg == desc);
348
349         total_md = (desc->bd_iov_count + LNET_MAX_IOV - 1) / LNET_MAX_IOV;
350         /* rq_mbits is matchbits of the final bulk */
351         mbits = req->rq_mbits - total_md + 1;
352
353         LASSERTF(mbits == (req->rq_mbits & PTLRPC_BULK_OPS_MASK),
354                  "first mbits = x%llu, last mbits = x%llu\n",
355                  mbits, req->rq_mbits);
356         LASSERTF(!(desc->bd_registered &&
357                    req->rq_send_state != LUSTRE_IMP_REPLAY) ||
358                  mbits != desc->bd_last_mbits,
359                  "registered: %d  rq_mbits: %llu bd_last_mbits: %llu\n",
360                  desc->bd_registered, mbits, desc->bd_last_mbits);
361
362         desc->bd_registered = 1;
363         desc->bd_last_mbits = mbits;
364         desc->bd_md_count = total_md;
365         md.user_ptr = &desc->bd_cbid;
366         md.eq_handle = ptlrpc_eq;
367         md.threshold = 1;                       /* PUT or GET */
368
369         for (posted_md = 0; posted_md < total_md; posted_md++, mbits++) {
370                 md.options = PTLRPC_MD_OPTIONS |
371                              (ptlrpc_is_bulk_op_get(desc->bd_type) ?
372                               LNET_MD_OP_GET : LNET_MD_OP_PUT);
373                 ptlrpc_fill_bulk_md(&md, desc, posted_md);
374
375                 if (posted_md > 0 && posted_md + 1 == total_md &&
376                     OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_BULK_ATTACH)) {
377                         rc = -ENOMEM;
378                 } else {
379                         me = LNetMEAttach(desc->bd_portal, peer, mbits, 0,
380                                   LNET_UNLINK, LNET_INS_AFTER);
381                         rc = PTR_ERR_OR_ZERO(me);
382                 }
383                 if (rc != 0) {
384                         CERROR("%s: LNetMEAttach failed x%llu/%d: rc = %d\n",
385                                desc->bd_import->imp_obd->obd_name, mbits,
386                                posted_md, rc);
387                         break;
388                 }
389
390                 /* About to let the network at it... */
391                 rc = LNetMDAttach(me, md, LNET_UNLINK,
392                                   &desc->bd_mds[posted_md]);
393                 if (rc != 0) {
394                         CERROR("%s: LNetMDAttach failed x%llu/%d: rc = %d\n",
395                                desc->bd_import->imp_obd->obd_name, mbits,
396                                posted_md, rc);
397                         LNetMEUnlink(me);
398                         break;
399                 }
400         }
401
402         if (rc != 0) {
403                 LASSERT(rc == -ENOMEM);
404                 spin_lock(&desc->bd_lock);
405                 desc->bd_md_count -= total_md - posted_md;
406                 spin_unlock(&desc->bd_lock);
407                 LASSERT(desc->bd_md_count >= 0);
408                 mdunlink_iterate_helper(desc->bd_mds, desc->bd_md_max_brw);
409                 req->rq_status = -ENOMEM;
410                 desc->bd_registered = 0;
411                 RETURN(-ENOMEM);
412         }
413
414         spin_lock(&desc->bd_lock);
415         /* Holler if peer manages to touch buffers before he knows the mbits */
416         if (desc->bd_md_count != total_md)
417                 CWARN("%s: Peer %s touched %d buffers while I registered\n",
418                       desc->bd_import->imp_obd->obd_name, libcfs_id2str(peer),
419                       total_md - desc->bd_md_count);
420         spin_unlock(&desc->bd_lock);
421
422         CDEBUG(D_NET, "Setup %u bulk %s buffers: %u pages %u bytes, "
423                "mbits x%#llx-%#llx, portal %u\n", desc->bd_md_count,
424                ptlrpc_is_bulk_op_get(desc->bd_type) ? "get-source" : "put-sink",
425                desc->bd_iov_count, desc->bd_nob,
426                desc->bd_last_mbits, req->rq_mbits, desc->bd_portal);
427
428         RETURN(0);
429 }
430
431 /**
432  * Disconnect a bulk desc from the network. Idempotent. Not
433  * thread-safe (i.e. only interlocks with completion callback).
434  * Returns 1 on success or 0 if network unregistration failed for whatever
435  * reason.
436  */
437 int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async)
438 {
439         struct ptlrpc_bulk_desc *desc = req->rq_bulk;
440         ENTRY;
441
442         LASSERT(!in_interrupt());     /* might sleep */
443
444         /* Let's setup deadline for reply unlink. */
445         if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) &&
446             async && req->rq_bulk_deadline == 0 && cfs_fail_val == 0)
447                 req->rq_bulk_deadline = ktime_get_real_seconds() + LONG_UNLINK;
448
449         if (ptlrpc_client_bulk_active(req) == 0)        /* completed or */
450                 RETURN(1);                              /* never registered */
451
452         LASSERT(desc->bd_req == req);  /* bd_req NULL until registered */
453
454         /* the unlink ensures the callback happens ASAP and is the last
455          * one.  If it fails, it must be because completion just happened,
456          * but we must still wait_event_idle_timeout() in this case to give
457          * us a chance to run client_bulk_callback()
458          */
459         mdunlink_iterate_helper(desc->bd_mds, desc->bd_md_max_brw);
460
461         if (ptlrpc_client_bulk_active(req) == 0)        /* completed or */
462                 RETURN(1);                              /* never registered */
463
464         /* Move to "Unregistering" phase as bulk was not unlinked yet. */
465         ptlrpc_rqphase_move(req, RQ_PHASE_UNREG_BULK);
466
467         /* Do not wait for unlink to finish. */
468         if (async)
469                 RETURN(0);
470
471         for (;;) {
472                 /* The wq argument is ignored by user-space wait_event macros */
473                 wait_queue_head_t *wq = (req->rq_set != NULL) ?
474                                         &req->rq_set->set_waitq :
475                                         &req->rq_reply_waitq;
476                 /*
477                  * Network access will complete in finite time but the HUGE
478                  * timeout lets us CWARN for visibility of sluggish NALs.
479                  */
480                 int seconds = LONG_UNLINK;
481
482                 while (seconds > 0 &&
483                        wait_event_idle_timeout(*wq,
484                                                !ptlrpc_client_bulk_active(req),
485                                                cfs_time_seconds(1)) == 0)
486                         seconds -= 1;
487                 if (seconds > 0) {
488                         ptlrpc_rqphase_move(req, req->rq_next_phase);
489                         RETURN(1);
490                 }
491
492                 DEBUG_REQ(D_WARNING, req, "Unexpectedly long timeout: desc %p",
493                           desc);
494         }
495         RETURN(0);
496 }
497
498 static void ptlrpc_at_set_reply(struct ptlrpc_request *req, int flags)
499 {
500         struct ptlrpc_service_part      *svcpt = req->rq_rqbd->rqbd_svcpt;
501         struct ptlrpc_service           *svc = svcpt->scp_service;
502         int service_time = max_t(int, ktime_get_real_seconds() -
503                                  req->rq_arrival_time.tv_sec, 1);
504
505         if (!(flags & PTLRPC_REPLY_EARLY) &&
506             (req->rq_type != PTL_RPC_MSG_ERR) &&
507             (req->rq_reqmsg != NULL) &&
508             !(lustre_msg_get_flags(req->rq_reqmsg) &
509               (MSG_RESENT | MSG_REPLAY |
510                MSG_REQ_REPLAY_DONE | MSG_LOCK_REPLAY_DONE))) {
511                 /* early replies, errors and recovery requests don't count
512                  * toward our service time estimate */
513                 int oldse = at_measured(&svcpt->scp_at_estimate, service_time);
514
515                 if (oldse != 0) {
516                         DEBUG_REQ(D_ADAPTTO, req,
517                                   "svc %s changed estimate from %d to %d",
518                                   svc->srv_name, oldse,
519                                   at_get(&svcpt->scp_at_estimate));
520                 }
521         }
522         /* Report actual service time for client latency calc */
523         lustre_msg_set_service_time(req->rq_repmsg, service_time);
524         /* Report service time estimate for future client reqs, but report 0
525          * (to be ignored by client) if it's an error reply during recovery.
526          * b=15815
527          */
528         if (req->rq_type == PTL_RPC_MSG_ERR &&
529             (req->rq_export == NULL ||
530              req->rq_export->exp_obd->obd_recovering)) {
531                 lustre_msg_set_timeout(req->rq_repmsg, 0);
532         } else {
533                 time64_t timeout;
534
535                 if (req->rq_export && req->rq_reqmsg != NULL &&
536                     (flags & PTLRPC_REPLY_EARLY) &&
537                     lustre_msg_get_flags(req->rq_reqmsg) &
538                     (MSG_REPLAY | MSG_REQ_REPLAY_DONE | MSG_LOCK_REPLAY_DONE)) {
539                         struct obd_device *exp_obd = req->rq_export->exp_obd;
540
541                         timeout = ktime_get_real_seconds() -
542                                   req->rq_arrival_time.tv_sec +
543                                   min_t(time64_t, at_extra,
544                                         exp_obd->obd_recovery_timeout / 4);
545                 } else {
546                         timeout = at_get(&svcpt->scp_at_estimate);
547                 }
548                 lustre_msg_set_timeout(req->rq_repmsg, timeout);
549         }
550
551         if (req->rq_reqmsg &&
552             !(lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)) {
553                 CDEBUG(D_ADAPTTO, "No early reply support: flags=%#x "
554                        "req_flags=%#x magic=%x/%x len=%d\n",
555                        flags, lustre_msg_get_flags(req->rq_reqmsg),
556                        lustre_msg_get_magic(req->rq_reqmsg),
557                        lustre_msg_get_magic(req->rq_repmsg), req->rq_replen);
558         }
559 }
560
561 /**
562  * Send request reply from request \a req reply buffer.
563  * \a flags defines reply types
564  * Returns 0 on success or error code
565  */
566 int ptlrpc_send_reply(struct ptlrpc_request *req, int flags)
567 {
568         struct ptlrpc_reply_state *rs = req->rq_reply_state;
569         struct ptlrpc_connection  *conn;
570         int                        rc;
571
572         /* We must already have a reply buffer (only ptlrpc_error() may be
573          * called without one). The reply generated by sptlrpc layer (e.g.
574          * error notify, etc.) might have NULL rq->reqmsg; Otherwise we must
575          * have a request buffer which is either the actual (swabbed) incoming
576          * request, or a saved copy if this is a req saved in
577          * target_queue_final_reply().
578          */
579         LASSERT (req->rq_no_reply == 0);
580         LASSERT (req->rq_reqbuf != NULL);
581         LASSERT (rs != NULL);
582         LASSERT ((flags & PTLRPC_REPLY_MAYBE_DIFFICULT) || !rs->rs_difficult);
583         LASSERT (req->rq_repmsg != NULL);
584         LASSERT (req->rq_repmsg == rs->rs_msg);
585         LASSERT (rs->rs_cb_id.cbid_fn == reply_out_callback);
586         LASSERT (rs->rs_cb_id.cbid_arg == rs);
587
588         /* There may be no rq_export during failover */
589
590         if (unlikely(req->rq_export && req->rq_export->exp_obd &&
591                      req->rq_export->exp_obd->obd_fail)) {
592                 /* Failed obd's only send ENODEV */
593                 req->rq_type = PTL_RPC_MSG_ERR;
594                 req->rq_status = -ENODEV;
595                 CDEBUG(D_HA, "sending ENODEV from failed obd %d\n",
596                        req->rq_export->exp_obd->obd_minor);
597         }
598
599         if (req->rq_type != PTL_RPC_MSG_ERR)
600                 req->rq_type = PTL_RPC_MSG_REPLY;
601
602         lustre_msg_set_type(req->rq_repmsg, req->rq_type);
603         lustre_msg_set_status(req->rq_repmsg,
604                               ptlrpc_status_hton(req->rq_status));
605         lustre_msg_set_opc(req->rq_repmsg,
606                 req->rq_reqmsg ? lustre_msg_get_opc(req->rq_reqmsg) : 0);
607
608         target_pack_pool_reply(req);
609
610         ptlrpc_at_set_reply(req, flags);
611
612         if (req->rq_export == NULL || req->rq_export->exp_connection == NULL)
613                 conn = ptlrpc_connection_get(req->rq_peer, req->rq_self, NULL);
614         else
615                 conn = ptlrpc_connection_addref(req->rq_export->exp_connection);
616
617         if (unlikely(conn == NULL)) {
618                 CERROR("not replying on NULL connection\n"); /* bug 9635 */
619                 return -ENOTCONN;
620         }
621         ptlrpc_rs_addref(rs);                   /* +1 ref for the network */
622
623         rc = sptlrpc_svc_wrap_reply(req);
624         if (unlikely(rc))
625                 goto out;
626
627         req->rq_sent = ktime_get_real_seconds();
628
629         rc = ptl_send_buf(&rs->rs_md_h, rs->rs_repbuf, rs->rs_repdata_len,
630                           (rs->rs_difficult && !rs->rs_no_ack) ?
631                           LNET_ACK_REQ : LNET_NOACK_REQ,
632                           &rs->rs_cb_id, req->rq_self, req->rq_source,
633                           ptlrpc_req2svc(req)->srv_rep_portal,
634                           req->rq_xid, req->rq_reply_off, NULL);
635 out:
636         if (unlikely(rc != 0))
637                 ptlrpc_req_drop_rs(req);
638         ptlrpc_connection_put(conn);
639         return rc;
640 }
641
642 int ptlrpc_reply (struct ptlrpc_request *req)
643 {
644         if (req->rq_no_reply)
645                 return 0;
646         else
647                 return (ptlrpc_send_reply(req, 0));
648 }
649
650 /**
651  * For request \a req send an error reply back. Create empty
652  * reply buffers if necessary.
653  */
654 int ptlrpc_send_error(struct ptlrpc_request *req, int may_be_difficult)
655 {
656         int rc;
657         ENTRY;
658
659         if (req->rq_no_reply)
660                 RETURN(0);
661
662         if (!req->rq_repmsg) {
663                 rc = lustre_pack_reply(req, 1, NULL, NULL);
664                 if (rc)
665                         RETURN(rc);
666         }
667
668         if (req->rq_status != -ENOSPC && req->rq_status != -EACCES &&
669             req->rq_status != -EPERM && req->rq_status != -ENOENT &&
670             req->rq_status != -EINPROGRESS && req->rq_status != -EDQUOT)
671                 req->rq_type = PTL_RPC_MSG_ERR;
672
673         rc = ptlrpc_send_reply(req, may_be_difficult);
674         RETURN(rc);
675 }
676
677 int ptlrpc_error(struct ptlrpc_request *req)
678 {
679         return ptlrpc_send_error(req, 0);
680 }
681
682 /**
683  * Send request \a request.
684  * if \a noreply is set, don't expect any reply back and don't set up
685  * reply buffers.
686  * Returns 0 on success or error code.
687  */
688 int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
689 {
690         int rc;
691         int mpflag = 0;
692         struct lnet_handle_md bulk_cookie;
693         struct ptlrpc_connection *connection;
694         struct lnet_me *reply_me = NULL;
695         struct lnet_md reply_md;
696         struct obd_import *imp = request->rq_import;
697         struct obd_device *obd = imp->imp_obd;
698         ENTRY;
699
700         LNetInvalidateMDHandle(&bulk_cookie);
701
702         if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DROP_RPC))
703                 RETURN(0);
704
705         LASSERT(request->rq_type == PTL_RPC_MSG_REQUEST);
706         LASSERT(request->rq_wait_ctx == 0);
707
708         /* If this is a re-transmit, we're required to have disengaged
709          * cleanly from the previous attempt */
710         LASSERT(!request->rq_receiving_reply);
711         LASSERT(!((lustre_msg_get_flags(request->rq_reqmsg) & MSG_REPLAY) &&
712                 (imp->imp_state == LUSTRE_IMP_FULL)));
713
714         if (unlikely(obd != NULL && obd->obd_fail)) {
715                 CDEBUG(D_HA, "muting rpc for failed imp obd %s\n",
716                         obd->obd_name);
717                 /* this prevents us from waiting in ptlrpc_queue_wait */
718                 spin_lock(&request->rq_lock);
719                 request->rq_err = 1;
720                 spin_unlock(&request->rq_lock);
721                 request->rq_status = -ENODEV;
722                 RETURN(-ENODEV);
723         }
724
725         connection = imp->imp_connection;
726
727         lustre_msg_set_handle(request->rq_reqmsg,
728                               &imp->imp_remote_handle);
729         lustre_msg_set_type(request->rq_reqmsg, PTL_RPC_MSG_REQUEST);
730         lustre_msg_set_conn_cnt(request->rq_reqmsg,
731                                 imp->imp_conn_cnt);
732         lustre_msghdr_set_flags(request->rq_reqmsg,
733                                 imp->imp_msghdr_flags);
734
735         /* If it's the first time to resend the request for EINPROGRESS,
736          * we need to allocate a new XID (see after_reply()), it's different
737          * from the resend for reply timeout. */
738         if (request->rq_nr_resend != 0 &&
739             list_empty(&request->rq_unreplied_list)) {
740                 __u64 min_xid = 0;
741                 /* resend for EINPROGRESS, allocate new xid to avoid reply
742                  * reconstruction */
743                 spin_lock(&imp->imp_lock);
744                 ptlrpc_assign_next_xid_nolock(request);
745                 min_xid = ptlrpc_known_replied_xid(imp);
746                 spin_unlock(&imp->imp_lock);
747
748                 lustre_msg_set_last_xid(request->rq_reqmsg, min_xid);
749                 DEBUG_REQ(D_RPCTRACE, request,
750                           "Allocating new XID for resend on EINPROGRESS");
751         }
752
753         if (request->rq_bulk != NULL) {
754                 ptlrpc_set_bulk_mbits(request);
755                 lustre_msg_set_mbits(request->rq_reqmsg, request->rq_mbits);
756         }
757
758         if (list_empty(&request->rq_unreplied_list) ||
759             request->rq_xid <= imp->imp_known_replied_xid) {
760                 DEBUG_REQ(D_ERROR, request,
761                           "xid=%llu, replied=%llu, list_empty=%d",
762                           request->rq_xid, imp->imp_known_replied_xid,
763                           list_empty(&request->rq_unreplied_list));
764                 LBUG();
765         }
766
767         /** For enabled AT all request should have AT_SUPPORT in the
768          * FULL import state when OBD_CONNECT_AT is set */
769         LASSERT(AT_OFF || imp->imp_state != LUSTRE_IMP_FULL ||
770                 (imp->imp_msghdr_flags & MSGHDR_AT_SUPPORT) ||
771                 !(imp->imp_connect_data.ocd_connect_flags &
772                 OBD_CONNECT_AT));
773
774         if (request->rq_resend) {
775                 lustre_msg_add_flags(request->rq_reqmsg, MSG_RESENT);
776                 if (request->rq_resend_cb != NULL)
777                         request->rq_resend_cb(request, &request->rq_async_args);
778         }
779         if (request->rq_memalloc)
780                 mpflag = cfs_memory_pressure_get_and_set();
781
782         rc = sptlrpc_cli_wrap_request(request);
783         if (rc)
784                 GOTO(out, rc);
785
786         /* bulk register should be done after wrap_request() */
787         if (request->rq_bulk != NULL) {
788                 rc = ptlrpc_register_bulk (request);
789                 if (rc != 0)
790                         GOTO(cleanup_bulk, rc);
791                 /*
792                  * All the mds in the request will have the same cpt
793                  * encoded in the cookie. So we can just get the first
794                  * one.
795                  */
796                 bulk_cookie = request->rq_bulk->bd_mds[0];
797         }
798
799         if (!noreply) {
800                 LASSERT (request->rq_replen != 0);
801                 if (request->rq_repbuf == NULL) {
802                         LASSERT(request->rq_repdata == NULL);
803                         LASSERT(request->rq_repmsg == NULL);
804                         rc = sptlrpc_cli_alloc_repbuf(request,
805                                                       request->rq_replen);
806                         if (rc) {
807                                 /* this prevents us from looping in
808                                  * ptlrpc_queue_wait */
809                                 spin_lock(&request->rq_lock);
810                                 request->rq_err = 1;
811                                 spin_unlock(&request->rq_lock);
812                                 request->rq_status = rc;
813                                 GOTO(cleanup_bulk, rc);
814                         }
815                 } else {
816                         request->rq_repdata = NULL;
817                         request->rq_repmsg = NULL;
818                 }
819
820                 reply_me = LNetMEAttach(request->rq_reply_portal,
821                                         connection->c_peer, request->rq_xid, 0,
822                                         LNET_UNLINK, LNET_INS_AFTER);
823                 if (IS_ERR(reply_me)) {
824                         rc = PTR_ERR(reply_me);
825                         CERROR("LNetMEAttach failed: %d\n", rc);
826                         LASSERT(rc == -ENOMEM);
827                         GOTO(cleanup_bulk, rc = -ENOMEM);
828                 }
829         }
830
831         spin_lock(&request->rq_lock);
832         /* We are responsible for unlinking the reply buffer */
833         request->rq_reply_unlinked = noreply;
834         request->rq_receiving_reply = !noreply;
835         /* Clear any flags that may be present from previous sends. */
836         request->rq_req_unlinked = 0;
837         request->rq_replied = 0;
838         request->rq_err = 0;
839         request->rq_timedout = 0;
840         request->rq_net_err = 0;
841         request->rq_resend = 0;
842         request->rq_restart = 0;
843         request->rq_reply_truncated = 0;
844         spin_unlock(&request->rq_lock);
845
846         if (!noreply) {
847                 reply_md.start     = request->rq_repbuf;
848                 reply_md.length    = request->rq_repbuf_len;
849                 /* Allow multiple early replies */
850                 reply_md.threshold = LNET_MD_THRESH_INF;
851                 /* Manage remote for early replies */
852                 reply_md.options   = PTLRPC_MD_OPTIONS | LNET_MD_OP_PUT |
853                         LNET_MD_MANAGE_REMOTE |
854                         LNET_MD_TRUNCATE; /* allow to make EOVERFLOW error */;
855                 reply_md.user_ptr  = &request->rq_reply_cbid;
856                 reply_md.eq_handle = ptlrpc_eq;
857
858                 /* We must see the unlink callback to set rq_reply_unlinked,
859                  * so we can't auto-unlink */
860                 rc = LNetMDAttach(reply_me, reply_md, LNET_RETAIN,
861                                   &request->rq_reply_md_h);
862                 if (rc != 0) {
863                         CERROR("LNetMDAttach failed: %d\n", rc);
864                         LASSERT(rc == -ENOMEM);
865                         spin_lock(&request->rq_lock);
866                         /* ...but the MD attach didn't succeed... */
867                         request->rq_receiving_reply = 0;
868                         spin_unlock(&request->rq_lock);
869                         GOTO(cleanup_me, rc = -ENOMEM);
870                 }
871
872                 CDEBUG(D_NET, "Setup reply buffer: %u bytes, xid %llu"
873                        ", portal %u\n",
874                        request->rq_repbuf_len, request->rq_xid,
875                        request->rq_reply_portal);
876         }
877
878         /* add references on request for request_out_callback */
879         ptlrpc_request_addref(request);
880         if (obd != NULL && obd->obd_svc_stats != NULL)
881                 lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQACTIVE_CNTR,
882                         atomic_read(&imp->imp_inflight));
883
884         OBD_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_DELAY_SEND, request->rq_timeout + 5);
885
886         request->rq_sent_ns = ktime_get_real();
887         request->rq_sent = ktime_get_real_seconds();
888         /* We give the server rq_timeout secs to process the req, and
889            add the network latency for our local timeout. */
890         request->rq_deadline = request->rq_sent + request->rq_timeout +
891                 ptlrpc_at_get_net_latency(request);
892
893         ptlrpc_pinger_sending_on_import(imp);
894
895         DEBUG_REQ(D_INFO, request, "send flags=%x",
896                   lustre_msg_get_flags(request->rq_reqmsg));
897         rc = ptl_send_buf(&request->rq_req_md_h,
898                           request->rq_reqbuf, request->rq_reqdata_len,
899                           LNET_NOACK_REQ, &request->rq_req_cbid,
900                           LNET_NID_ANY, connection->c_peer,
901                           request->rq_request_portal,
902                           request->rq_xid, 0, &bulk_cookie);
903         if (likely(rc == 0))
904                 GOTO(out, rc);
905
906         request->rq_req_unlinked = 1;
907         ptlrpc_req_finished(request);
908         if (noreply)
909                 GOTO(out, rc);
910
911  cleanup_me:
912         /* MEUnlink is safe; the PUT didn't even get off the ground, and
913          * nobody apart from the PUT's target has the right nid+XID to
914          * access the reply buffer.
915          */
916         LNetMEUnlink(reply_me);
917         /* UNLINKED callback called synchronously */
918         LASSERT(!request->rq_receiving_reply);
919
920  cleanup_bulk:
921         /* We do sync unlink here as there was no real transfer here so
922          * the chance to have long unlink to sluggish net is smaller here. */
923         ptlrpc_unregister_bulk(request, 0);
924         if (request->rq_bulk != NULL)
925                 request->rq_bulk->bd_registered = 0;
926  out:
927         if (rc == -ENOMEM) {
928                 /* set rq_sent so that this request is treated
929                  * as a delayed send in the upper layers */
930                 request->rq_sent = ktime_get_real_seconds();
931         }
932
933         if (request->rq_memalloc)
934                 cfs_memory_pressure_restore(mpflag);
935
936         return rc;
937 }
938 EXPORT_SYMBOL(ptl_send_rpc);
939
940 /**
941  * Register request buffer descriptor for request receiving.
942  */
943 int ptlrpc_register_rqbd(struct ptlrpc_request_buffer_desc *rqbd)
944 {
945         struct ptlrpc_service *service = rqbd->rqbd_svcpt->scp_service;
946         static struct lnet_process_id match_id = {
947                 .nid = LNET_NID_ANY,
948                 .pid = LNET_PID_ANY
949         };
950         int rc;
951         struct lnet_md md;
952         struct lnet_me *me;
953
954         CDEBUG(D_NET, "LNetMEAttach: portal %d\n",
955                service->srv_req_portal);
956
957         if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_RQBD))
958                 return (-ENOMEM);
959
960         /* NB: CPT affinity service should use new LNet flag LNET_INS_LOCAL,
961          * which means buffer can only be attached on local CPT, and LND
962          * threads can find it by grabbing a local lock */
963         me = LNetMEAttach(service->srv_req_portal,
964                           match_id, 0, ~0, LNET_UNLINK,
965                           rqbd->rqbd_svcpt->scp_cpt >= 0 ?
966                           LNET_INS_LOCAL : LNET_INS_AFTER);
967         if (IS_ERR(me)) {
968                 CERROR("LNetMEAttach failed: %ld\n", PTR_ERR(me));
969                 return -ENOMEM;
970         }
971
972         LASSERT(rqbd->rqbd_refcount == 0);
973         rqbd->rqbd_refcount = 1;
974
975         md.start     = rqbd->rqbd_buffer;
976         md.length    = service->srv_buf_size;
977         md.max_size  = service->srv_max_req_size;
978         md.threshold = LNET_MD_THRESH_INF;
979         md.options   = PTLRPC_MD_OPTIONS | LNET_MD_OP_PUT | LNET_MD_MAX_SIZE;
980         md.user_ptr  = &rqbd->rqbd_cbid;
981         md.eq_handle = ptlrpc_eq;
982
983         rc = LNetMDAttach(me, md, LNET_UNLINK, &rqbd->rqbd_md_h);
984         if (rc == 0)
985                 return 0;
986
987         CERROR("LNetMDAttach failed: %d;\n", rc);
988         LASSERT(rc == -ENOMEM);
989         LNetMEUnlink(me);
990         LASSERT(rc == 0);
991         rqbd->rqbd_refcount = 0;
992
993         return -ENOMEM;
994 }