Whamcloud - gitweb
89869860bb5d8c6674c532cd20ce19abecdaa595
[fs/lustre-release.git] / lustre / ptlrpc / niobuf.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  */
31
32 #define DEBUG_SUBSYSTEM S_RPC
33 #include <libcfs/linux/linux-mem.h>
34 #include <obd_support.h>
35 #include <lustre_net.h>
36 #include <lustre_lib.h>
37 #include <obd.h>
38 #include <obd_class.h>
39 #include "ptlrpc_internal.h"
40 #include <lnet/lib-lnet.h> /* for CFS_FAIL_PTLRPC_OST_BULK_CB2 */
41
42 /**
43  * Helper function. Sends \a len bytes from \a base at offset \a offset
44  * over \a conn connection to portal \a portal.
45  * Returns 0 on success or error code.
46  */
47 static int ptl_send_buf(struct lnet_handle_md *mdh, void *base, int len,
48                         enum lnet_ack_req ack, struct ptlrpc_cb_id *cbid,
49                         lnet_nid_t self4, struct lnet_processid *peer_id,
50                         int portal, __u64 xid, unsigned int offset,
51                         struct lnet_handle_md *bulk_cookie)
52 {
53         int rc;
54         struct lnet_md md;
55         struct lnet_nid self;
56         ENTRY;
57
58         lnet_nid4_to_nid(self4, &self);
59
60         LASSERT(portal != 0);
61         CDEBUG(D_INFO, "peer_id %s\n", libcfs_idstr(peer_id));
62         md.start     = base;
63         md.length    = len;
64         md.threshold = (ack == LNET_ACK_REQ) ? 2 : 1;
65         md.options   = PTLRPC_MD_OPTIONS;
66         md.user_ptr  = cbid;
67         md.handler   = ptlrpc_handler;
68         LNetInvalidateMDHandle(&md.bulk_handle);
69
70         if (bulk_cookie) {
71                 md.bulk_handle = *bulk_cookie;
72                 md.options |= LNET_MD_BULK_HANDLE;
73         }
74
75         if (unlikely(ack == LNET_ACK_REQ &&
76                      OBD_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_ACK, OBD_FAIL_ONCE))){
77                 /* don't ask for the ack to simulate failing client */
78                 ack = LNET_NOACK_REQ;
79         }
80
81         rc = LNetMDBind(&md, LNET_UNLINK, mdh);
82         if (unlikely(rc != 0)) {
83                 CERROR ("LNetMDBind failed: %d\n", rc);
84                 LASSERT (rc == -ENOMEM);
85                 RETURN (-ENOMEM);
86         }
87
88         CDEBUG(D_NET, "Sending %d bytes to portal %d, xid %lld, offset %u\n",
89                len, portal, xid, offset);
90
91         percpu_ref_get(&ptlrpc_pending);
92
93         rc = LNetPut(&self, *mdh, ack,
94                      peer_id, portal, xid, offset, 0);
95         if (unlikely(rc != 0)) {
96                 int rc2;
97                 /* We're going to get an UNLINK event when I unlink below,
98                  * which will complete just like any other failed send, so
99                  * I fall through and return success here! */
100                 CERROR("LNetPut(%s, %d, %lld) failed: %d\n",
101                        libcfs_idstr(peer_id), portal, xid, rc);
102                 rc2 = LNetMDUnlink(*mdh);
103                 LASSERTF(rc2 == 0, "rc2 = %d\n", rc2);
104         }
105
106         RETURN (0);
107 }
108
109 #define mdunlink_iterate_helper(mds, count) \
110                 __mdunlink_iterate_helper(mds, count, false) 
111 static void __mdunlink_iterate_helper(struct lnet_handle_md *bd_mds,
112                                       int count, bool discard)
113 {
114         int i;
115
116         for (i = 0; i < count; i++)
117                 __LNetMDUnlink(bd_mds[i], discard);
118 }
119
120 #ifdef HAVE_SERVER_SUPPORT
121 /**
122  * Prepare bulk descriptor for specified incoming request \a req that
123  * can fit \a nfrags * pages. \a type is bulk type. \a portal is where
124  * the bulk to be sent. Used on server-side after request was already
125  * received.
126  * Returns pointer to newly allocatrd initialized bulk descriptor or NULL on
127  * error.
128  */
129 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_exp(struct ptlrpc_request *req,
130                                               unsigned nfrags, unsigned max_brw,
131                                               unsigned int type,
132                                               unsigned portal,
133                                               const struct ptlrpc_bulk_frag_ops
134                                                 *ops)
135 {
136         struct obd_export *exp = req->rq_export;
137         struct ptlrpc_bulk_desc *desc;
138
139         ENTRY;
140         LASSERT(ptlrpc_is_bulk_op_active(type));
141
142         desc = ptlrpc_new_bulk(nfrags, max_brw, type, portal, ops);
143         if (desc == NULL)
144                 RETURN(NULL);
145
146         desc->bd_export = class_export_get(exp);
147         desc->bd_req = req;
148
149         desc->bd_cbid.cbid_fn  = server_bulk_callback;
150         desc->bd_cbid.cbid_arg = desc;
151
152         /* NB we don't assign rq_bulk here; server-side requests are
153          * re-used, and the handler frees the bulk desc explicitly. */
154
155         return desc;
156 }
157 EXPORT_SYMBOL(ptlrpc_prep_bulk_exp);
158
159 /**
160  * Starts bulk transfer for descriptor \a desc on the server.
161  * Returns 0 on success or error code.
162  */
163 int ptlrpc_start_bulk_transfer(struct ptlrpc_bulk_desc *desc)
164 {
165         struct obd_export       *exp = desc->bd_export;
166         struct lnet_nid          self_nid;
167         struct lnet_processid    peer_id;
168         int                      rc = 0;
169         __u64                    mbits;
170         int                      posted_md;
171         int                      total_md;
172         struct lnet_md           md;
173         ENTRY;
174
175         if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_BULK_PUT_NET))
176                 RETURN(0);
177
178         /* NB no locking required until desc is on the network */
179         LASSERT(ptlrpc_is_bulk_op_active(desc->bd_type));
180
181         LASSERT(desc->bd_cbid.cbid_fn == server_bulk_callback);
182         LASSERT(desc->bd_cbid.cbid_arg == desc);
183
184         /*
185          * Multi-Rail: get the preferred self and peer NIDs from the
186          * request, so they are based on the route taken by the
187          * message.
188          */
189         self_nid = desc->bd_req->rq_self;
190         peer_id = desc->bd_req->rq_source;
191
192         /* NB total length may be 0 for a read past EOF, so we send 0
193          * length bulks, since the client expects bulk events.
194          *
195          * The client may not need all of the bulk mbits for the RPC. The RPC
196          * used the mbits of the highest bulk mbits needed, and the server masks
197          * off high bits to get bulk count for this RPC. LU-1431 */
198         mbits = desc->bd_req->rq_mbits & ~((__u64)desc->bd_md_max_brw - 1);
199         total_md = desc->bd_req->rq_mbits - mbits + 1;
200         desc->bd_refs = total_md;
201         desc->bd_failure = 0;
202
203         md.user_ptr = &desc->bd_cbid;
204         md.handler = ptlrpc_handler;
205         md.threshold = 2; /* SENT and ACK/REPLY */
206
207         for (posted_md = 0; posted_md < total_md; mbits++) {
208                 md.options = PTLRPC_MD_OPTIONS;
209
210                 /* NB it's assumed that source and sink buffer frags are
211                  * page-aligned. Otherwise we'd have to send client bulk
212                  * sizes over and split server buffer accordingly */
213                 ptlrpc_fill_bulk_md(&md, desc, posted_md);
214                 rc = LNetMDBind(&md, LNET_UNLINK, &desc->bd_mds[posted_md]);
215                 if (rc != 0) {
216                         CERROR("%s: LNetMDBind failed for MD %u: rc = %d\n",
217                                exp->exp_obd->obd_name, posted_md, rc);
218                         LASSERT(rc == -ENOMEM);
219                         if (posted_md == 0) {
220                                 desc->bd_md_count = 0;
221                                 RETURN(-ENOMEM);
222                         }
223                         break;
224                 }
225                 percpu_ref_get(&ptlrpc_pending);
226
227                 /* sanity.sh 224c: lets skip last md */
228                 if (posted_md == desc->bd_md_max_brw - 1)
229                         OBD_FAIL_CHECK_RESET(OBD_FAIL_PTLRPC_CLIENT_BULK_CB3,
230                                              CFS_FAIL_PTLRPC_OST_BULK_CB2);
231
232                 /* Network is about to get at the memory */
233                 if (ptlrpc_is_bulk_put_source(desc->bd_type))
234                         rc = LNetPut(&self_nid, desc->bd_mds[posted_md],
235                                      LNET_ACK_REQ, &peer_id,
236                                      desc->bd_portal, mbits, 0, 0);
237                 else
238                         rc = LNetGet(&self_nid, desc->bd_mds[posted_md],
239                                      &peer_id, desc->bd_portal,
240                                      mbits, 0, false);
241
242                 posted_md++;
243                 if (rc != 0) {
244                         CERROR("%s: failed bulk transfer with %s:%u x%llu: "
245                                "rc = %d\n", exp->exp_obd->obd_name,
246                                libcfs_idstr(&peer_id), desc->bd_portal,
247                                mbits, rc);
248                         break;
249                 }
250         }
251
252         if (rc != 0) {
253                 /* Can't send, so we unlink the MD bound above.  The UNLINK
254                  * event this creates will signal completion with failure,
255                  * so we return SUCCESS here! */
256                 spin_lock(&desc->bd_lock);
257                 desc->bd_refs -= total_md - posted_md;
258                 spin_unlock(&desc->bd_lock);
259                 LASSERT(desc->bd_refs >= 0);
260
261                 mdunlink_iterate_helper(desc->bd_mds, posted_md);
262                 RETURN(0);
263         }
264
265         CDEBUG(D_NET, "Transferring %u pages %u bytes via portal %d "
266                "id %s mbits %#llx-%#llx\n", desc->bd_iov_count,
267                desc->bd_nob, desc->bd_portal, libcfs_idstr(&peer_id),
268                mbits - posted_md, mbits - 1);
269
270         RETURN(0);
271 }
272
273 /**
274  * Server side bulk abort. Idempotent. Not thread-safe (i.e. only
275  * serialises with completion callback)
276  */
277 void ptlrpc_abort_bulk(struct ptlrpc_bulk_desc *desc)
278 {
279         LASSERT(!in_interrupt());           /* might sleep */
280
281         if (!ptlrpc_server_bulk_active(desc))   /* completed or */
282                 return;                         /* never started */
283
284         /* We used to poison the pages with 0xab here because we did not want to
285          * send any meaningful data over the wire for evicted clients (bug 9297)
286          * However, this is no longer safe now that we use the page cache on the
287          * OSS (bug 20560) */
288
289         /* The unlink ensures the callback happens ASAP and is the last
290          * one.  If it fails, it must be because completion just happened,
291          * but we must still wait_event_idle_timeout() in this case, to give
292          * us a chance to run server_bulk_callback()
293          */
294         __mdunlink_iterate_helper(desc->bd_mds, desc->bd_md_max_brw, true);
295
296         for (;;) {
297                 /* Network access will complete in finite time but the HUGE
298                  * timeout lets us CWARN for visibility of sluggish NALs */
299                 int seconds = PTLRPC_REQ_LONG_UNLINK;
300
301                 while (seconds > 0 &&
302                        wait_event_idle_timeout(desc->bd_waitq,
303                                                !ptlrpc_server_bulk_active(desc),
304                                                cfs_time_seconds(1)) == 0)
305                         seconds -= 1;
306                 if (seconds > 0)
307                         return;
308
309                 CWARN("Unexpectedly long timeout: desc %p\n", desc);
310         }
311 }
312 #endif /* HAVE_SERVER_SUPPORT */
313
314 /**
315  * Register bulk at the sender for later transfer.
316  * Returns 0 on success or error code.
317  */
318 int ptlrpc_register_bulk(struct ptlrpc_request *req)
319 {
320         struct ptlrpc_bulk_desc *desc = req->rq_bulk;
321         struct lnet_processid peer;
322         int rc = 0;
323         int posted_md;
324         int total_md;
325         __u64 mbits;
326         struct lnet_me *me;
327         struct lnet_md md;
328         ENTRY;
329
330         if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_BULK_GET_NET))
331                 RETURN(0);
332
333         /* NB no locking required until desc is on the network */
334         LASSERT(desc->bd_nob > 0);
335         LASSERT(desc->bd_md_max_brw <= PTLRPC_BULK_OPS_COUNT);
336         LASSERT(desc->bd_iov_count <= PTLRPC_MAX_BRW_PAGES);
337         LASSERT(desc->bd_req != NULL);
338         LASSERT(ptlrpc_is_bulk_op_passive(desc->bd_type));
339
340         /* cleanup the state of the bulk for it will be reused */
341         if (req->rq_resend || req->rq_send_state == LUSTRE_IMP_REPLAY)
342                 desc->bd_nob_transferred = 0;
343         else if (desc->bd_nob_transferred != 0)
344                 /* If the network failed after an RPC was sent, this condition
345                  * could happen.  Rather than assert (was here before), return
346                  * an EIO error. */
347                 RETURN(-EIO);
348
349         desc->bd_failure = 0;
350
351         peer = desc->bd_import->imp_connection->c_peer;
352
353         LASSERT(desc->bd_cbid.cbid_fn == client_bulk_callback);
354         LASSERT(desc->bd_cbid.cbid_arg == desc);
355
356         total_md = desc->bd_md_count;
357         /* rq_mbits is matchbits of the final bulk */
358         mbits = req->rq_mbits - desc->bd_md_count + 1;
359
360         LASSERTF(mbits == (req->rq_mbits & PTLRPC_BULK_OPS_MASK),
361                  "first mbits = x%llu, last mbits = x%llu\n",
362                  mbits, req->rq_mbits);
363         LASSERTF(!(desc->bd_registered &&
364                    req->rq_send_state != LUSTRE_IMP_REPLAY) ||
365                  mbits != desc->bd_last_mbits,
366                  "registered: %d  rq_mbits: %llu bd_last_mbits: %llu\n",
367                  desc->bd_registered, mbits, desc->bd_last_mbits);
368
369         desc->bd_registered = 1;
370         desc->bd_last_mbits = mbits;
371         desc->bd_refs = total_md;
372         md.user_ptr = &desc->bd_cbid;
373         md.handler = ptlrpc_handler;
374         md.threshold = 1;                       /* PUT or GET */
375
376         for (posted_md = 0; posted_md < desc->bd_md_count;
377              posted_md++, mbits++) {
378                 md.options = PTLRPC_MD_OPTIONS |
379                              (ptlrpc_is_bulk_op_get(desc->bd_type) ?
380                               LNET_MD_OP_GET : LNET_MD_OP_PUT);
381                 ptlrpc_fill_bulk_md(&md, desc, posted_md);
382
383                 if (posted_md > 0 && posted_md + 1 == desc->bd_md_count &&
384                     OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_BULK_ATTACH)) {
385                         rc = -ENOMEM;
386                 } else {
387                         me = LNetMEAttach(desc->bd_portal, &peer, mbits, 0,
388                                   LNET_UNLINK, LNET_INS_AFTER);
389                         rc = PTR_ERR_OR_ZERO(me);
390                 }
391                 if (rc != 0) {
392                         CERROR("%s: LNetMEAttach failed x%llu/%d: rc = %d\n",
393                                desc->bd_import->imp_obd->obd_name, mbits,
394                                posted_md, rc);
395                         break;
396                 }
397                 percpu_ref_get(&ptlrpc_pending);
398
399                 /* About to let the network at it... */
400                 rc = LNetMDAttach(me, &md, LNET_UNLINK,
401                                   &desc->bd_mds[posted_md]);
402                 if (rc != 0) {
403                         CERROR("%s: LNetMDAttach failed x%llu/%d: rc = %d\n",
404                                desc->bd_import->imp_obd->obd_name, mbits,
405                                posted_md, rc);
406                         break;
407                 }
408         }
409
410         if (rc != 0) {
411                 LASSERT(rc == -ENOMEM);
412                 spin_lock(&desc->bd_lock);
413                 desc->bd_refs -= total_md - posted_md;
414                 spin_unlock(&desc->bd_lock);
415                 LASSERT(desc->bd_refs >= 0);
416                 mdunlink_iterate_helper(desc->bd_mds, desc->bd_md_max_brw);
417                 req->rq_status = -ENOMEM;
418                 desc->bd_registered = 0;
419                 RETURN(-ENOMEM);
420         }
421
422         spin_lock(&desc->bd_lock);
423         /* Holler if peer manages to touch buffers before he knows the mbits */
424         if (desc->bd_refs != total_md)
425                 CWARN("%s: Peer %s touched %d buffers while I registered\n",
426                       desc->bd_import->imp_obd->obd_name, libcfs_idstr(&peer),
427                       total_md - desc->bd_refs);
428         spin_unlock(&desc->bd_lock);
429
430         CDEBUG(D_NET,
431                "Setup %u bulk %s buffers: %u pages %u bytes, mbits x%#llx-%#llx, portal %u\n",
432                desc->bd_refs,
433                ptlrpc_is_bulk_op_get(desc->bd_type) ? "get-source" : "put-sink",
434                desc->bd_iov_count, desc->bd_nob,
435                desc->bd_last_mbits, req->rq_mbits, desc->bd_portal);
436
437         RETURN(0);
438 }
439
440 /**
441  * Disconnect a bulk desc from the network. Idempotent. Not
442  * thread-safe (i.e. only interlocks with completion callback).
443  * Returns 1 on success or 0 if network unregistration failed for whatever
444  * reason.
445  */
446 int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async)
447 {
448         struct ptlrpc_bulk_desc *desc = req->rq_bulk;
449         ENTRY;
450
451         LASSERT(!in_interrupt());     /* might sleep */
452
453         if (desc)
454                 desc->bd_registered = 0;
455
456         /* Let's setup deadline for reply unlink. */
457         if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) &&
458             async && req->rq_bulk_deadline == 0 && cfs_fail_val == 0)
459                 req->rq_bulk_deadline = ktime_get_real_seconds() +
460                                         PTLRPC_REQ_LONG_UNLINK;
461
462         if (ptlrpc_client_bulk_active(req) == 0)        /* completed or */
463                 RETURN(1);                              /* never registered */
464
465         LASSERT(desc->bd_req == req);  /* bd_req NULL until registered */
466
467         /* the unlink ensures the callback happens ASAP and is the last
468          * one.  If it fails, it must be because completion just happened,
469          * but we must still wait_event_idle_timeout() in this case to give
470          * us a chance to run client_bulk_callback()
471          */
472         mdunlink_iterate_helper(desc->bd_mds, desc->bd_md_max_brw);
473
474         if (ptlrpc_client_bulk_active(req) == 0)        /* completed or */
475                 RETURN(1);                              /* never registered */
476
477         /* Move to "Unregistering" phase as bulk was not unlinked yet. */
478         ptlrpc_rqphase_move(req, RQ_PHASE_UNREG_BULK);
479
480         /* Do not wait for unlink to finish. */
481         if (async)
482                 RETURN(0);
483
484         for (;;) {
485                 /* The wq argument is ignored by user-space wait_event macros */
486                 wait_queue_head_t *wq = (req->rq_set != NULL) ?
487                                         &req->rq_set->set_waitq :
488                                         &req->rq_reply_waitq;
489                 /*
490                  * Network access will complete in finite time but the HUGE
491                  * timeout lets us CWARN for visibility of sluggish NALs.
492                  */
493                 int seconds = PTLRPC_REQ_LONG_UNLINK;
494
495                 while (seconds > 0 &&
496                        wait_event_idle_timeout(*wq,
497                                                !ptlrpc_client_bulk_active(req),
498                                                cfs_time_seconds(1)) == 0)
499                         seconds -= 1;
500                 if (seconds > 0) {
501                         ptlrpc_rqphase_move(req, req->rq_next_phase);
502                         RETURN(1);
503                 }
504
505                 DEBUG_REQ(D_WARNING, req, "Unexpectedly long timeout: desc %p",
506                           desc);
507         }
508         RETURN(0);
509 }
510
511 static void ptlrpc_at_set_reply(struct ptlrpc_request *req, int flags)
512 {
513         struct ptlrpc_service_part      *svcpt = req->rq_rqbd->rqbd_svcpt;
514         struct ptlrpc_service           *svc = svcpt->scp_service;
515         timeout_t service_timeout;
516
517         service_timeout = clamp_t(timeout_t, ktime_get_real_seconds() -
518                                              req->rq_arrival_time.tv_sec, 1,
519                                   (AT_OFF ? obd_timeout * 3 / 2 : at_max));
520         if (!(flags & PTLRPC_REPLY_EARLY) &&
521             (req->rq_type != PTL_RPC_MSG_ERR) &&
522             (req->rq_reqmsg != NULL) &&
523             !(lustre_msg_get_flags(req->rq_reqmsg) &
524               (MSG_RESENT | MSG_REPLAY |
525                MSG_REQ_REPLAY_DONE | MSG_LOCK_REPLAY_DONE))) {
526                 /* early replies, errors and recovery requests don't count
527                  * toward our service time estimate
528                  */
529                 timeout_t oldse = at_measured(&svcpt->scp_at_estimate,
530                                               service_timeout);
531
532                 if (oldse != 0) {
533                         DEBUG_REQ(D_ADAPTTO, req,
534                                   "svc %s changed estimate from %d to %d",
535                                   svc->srv_name, oldse,
536                                   at_get(&svcpt->scp_at_estimate));
537                 }
538         }
539         /* Report actual service time for client latency calc */
540         lustre_msg_set_service_timeout(req->rq_repmsg, service_timeout);
541         /* Report service time estimate for future client reqs, but report 0
542          * (to be ignored by client) if it's an error reply during recovery.
543          * b=15815
544          */
545         if (req->rq_type == PTL_RPC_MSG_ERR &&
546             (req->rq_export == NULL ||
547              req->rq_export->exp_obd->obd_recovering)) {
548                 lustre_msg_set_timeout(req->rq_repmsg, 0);
549         } else {
550                 timeout_t timeout;
551
552                 if (req->rq_export && req->rq_reqmsg != NULL &&
553                     (flags & PTLRPC_REPLY_EARLY) &&
554                     lustre_msg_get_flags(req->rq_reqmsg) &
555                     (MSG_REPLAY | MSG_REQ_REPLAY_DONE | MSG_LOCK_REPLAY_DONE)) {
556                         struct obd_device *exp_obd = req->rq_export->exp_obd;
557
558                         timeout = ktime_get_real_seconds() -
559                                   req->rq_arrival_time.tv_sec +
560                                   min_t(timeout_t, at_extra,
561                                         exp_obd->obd_recovery_timeout / 4);
562                 } else {
563                         timeout = at_get(&svcpt->scp_at_estimate);
564                 }
565                 lustre_msg_set_timeout(req->rq_repmsg, timeout);
566         }
567
568         if (req->rq_reqmsg &&
569             !(lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)) {
570                 CDEBUG(D_ADAPTTO, "No early reply support: flags=%#x "
571                        "req_flags=%#x magic=%x/%x len=%d\n",
572                        flags, lustre_msg_get_flags(req->rq_reqmsg),
573                        lustre_msg_get_magic(req->rq_reqmsg),
574                        lustre_msg_get_magic(req->rq_repmsg), req->rq_replen);
575         }
576 }
577
578 /**
579  * Send request reply from request \a req reply buffer.
580  * \a flags defines reply types
581  * Returns 0 on success or error code
582  */
583 int ptlrpc_send_reply(struct ptlrpc_request *req, int flags)
584 {
585         struct ptlrpc_reply_state *rs = req->rq_reply_state;
586         struct ptlrpc_connection  *conn;
587         int                        rc;
588
589         /* We must already have a reply buffer (only ptlrpc_error() may be
590          * called without one). The reply generated by sptlrpc layer (e.g.
591          * error notify, etc.) might have NULL rq->reqmsg; Otherwise we must
592          * have a request buffer which is either the actual (swabbed) incoming
593          * request, or a saved copy if this is a req saved in
594          * target_queue_final_reply().
595          */
596         LASSERT (req->rq_no_reply == 0);
597         LASSERT (req->rq_reqbuf != NULL);
598         LASSERT (rs != NULL);
599         LASSERT ((flags & PTLRPC_REPLY_MAYBE_DIFFICULT) || !rs->rs_difficult);
600         LASSERT (req->rq_repmsg != NULL);
601         LASSERT (req->rq_repmsg == rs->rs_msg);
602         LASSERT (rs->rs_cb_id.cbid_fn == reply_out_callback);
603         LASSERT (rs->rs_cb_id.cbid_arg == rs);
604
605         /* There may be no rq_export during failover */
606
607         if (unlikely(req->rq_export && req->rq_export->exp_obd &&
608                      req->rq_export->exp_obd->obd_fail)) {
609                 /* Failed obd's only send ENODEV */
610                 req->rq_type = PTL_RPC_MSG_ERR;
611                 req->rq_status = -ENODEV;
612                 CDEBUG(D_HA, "sending ENODEV from failed obd %d\n",
613                        req->rq_export->exp_obd->obd_minor);
614         }
615
616         if (req->rq_type != PTL_RPC_MSG_ERR)
617                 req->rq_type = PTL_RPC_MSG_REPLY;
618
619         lustre_msg_set_type(req->rq_repmsg, req->rq_type);
620         lustre_msg_set_status(req->rq_repmsg,
621                               ptlrpc_status_hton(req->rq_status));
622         lustre_msg_set_opc(req->rq_repmsg,
623                 req->rq_reqmsg ? lustre_msg_get_opc(req->rq_reqmsg) : 0);
624
625         target_pack_pool_reply(req);
626
627         ptlrpc_at_set_reply(req, flags);
628
629         if (req->rq_export == NULL || req->rq_export->exp_connection == NULL)
630                 conn = ptlrpc_connection_get(&req->rq_peer, &req->rq_self,
631                                              NULL);
632         else
633                 conn = ptlrpc_connection_addref(req->rq_export->exp_connection);
634
635         if (unlikely(conn == NULL)) {
636                 CERROR("not replying on NULL connection\n"); /* bug 9635 */
637                 return -ENOTCONN;
638         }
639         ptlrpc_rs_addref(rs);                   /* +1 ref for the network */
640
641         rc = sptlrpc_svc_wrap_reply(req);
642         if (unlikely(rc))
643                 goto out;
644
645         req->rq_sent = ktime_get_real_seconds();
646
647         rc = ptl_send_buf(&rs->rs_md_h, rs->rs_repbuf, rs->rs_repdata_len,
648                           (rs->rs_difficult && !rs->rs_no_ack) ?
649                           LNET_ACK_REQ : LNET_NOACK_REQ,
650                           &rs->rs_cb_id, lnet_nid_to_nid4(&req->rq_self),
651                           &req->rq_source,
652                           ptlrpc_req2svc(req)->srv_rep_portal,
653                           req->rq_rep_mbits ? req->rq_rep_mbits : req->rq_xid,
654                           req->rq_reply_off, NULL);
655 out:
656         if (unlikely(rc != 0))
657                 ptlrpc_req_drop_rs(req);
658         ptlrpc_connection_put(conn);
659         return rc;
660 }
661
662 int ptlrpc_reply (struct ptlrpc_request *req)
663 {
664         if (req->rq_no_reply)
665                 return 0;
666         else
667                 return (ptlrpc_send_reply(req, 0));
668 }
669
670 /**
671  * For request \a req send an error reply back. Create empty
672  * reply buffers if necessary.
673  */
674 int ptlrpc_send_error(struct ptlrpc_request *req, int may_be_difficult)
675 {
676         int rc;
677         ENTRY;
678
679         if (req->rq_no_reply)
680                 RETURN(0);
681
682         if (!req->rq_repmsg) {
683                 rc = lustre_pack_reply(req, 1, NULL, NULL);
684                 if (rc)
685                         RETURN(rc);
686         }
687
688         if (req->rq_status != -ENOSPC && req->rq_status != -EACCES &&
689             req->rq_status != -EPERM && req->rq_status != -ENOENT &&
690             req->rq_status != -EINPROGRESS && req->rq_status != -EDQUOT &&
691             req->rq_status != -EROFS)
692                 req->rq_type = PTL_RPC_MSG_ERR;
693
694         rc = ptlrpc_send_reply(req, may_be_difficult);
695         RETURN(rc);
696 }
697
698 int ptlrpc_error(struct ptlrpc_request *req)
699 {
700         return ptlrpc_send_error(req, 0);
701 }
702
703 /**
704  * Send request \a request.
705  * if \a noreply is set, don't expect any reply back and don't set up
706  * reply buffers.
707  * Returns 0 on success or error code.
708  */
709 int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
710 {
711         int rc;
712         __u32 opc;
713         int mpflag = 0;
714         bool rep_mbits = false;
715         struct lnet_handle_md bulk_cookie;
716         struct lnet_processid peer;
717         struct ptlrpc_connection *connection;
718         struct lnet_me *reply_me = NULL;
719         struct lnet_md reply_md;
720         struct obd_import *imp = request->rq_import;
721         struct obd_device *obd = imp->imp_obd;
722         ENTRY;
723
724         LNetInvalidateMDHandle(&bulk_cookie);
725
726         if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DROP_RPC))
727                 RETURN(0);
728
729         if (unlikely(OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DELAY_RECOV) &&
730                      lustre_msg_get_opc(request->rq_reqmsg) == MDS_CONNECT &&
731                      strcmp(obd->obd_type->typ_name, LUSTRE_OSP_NAME) == 0)) {
732                 RETURN(0);
733         }
734
735         LASSERT(request->rq_type == PTL_RPC_MSG_REQUEST);
736         LASSERT(request->rq_wait_ctx == 0);
737
738         /* If this is a re-transmit, we're required to have disengaged
739          * cleanly from the previous attempt */
740         LASSERT(!request->rq_receiving_reply);
741         LASSERT(!((lustre_msg_get_flags(request->rq_reqmsg) & MSG_REPLAY) &&
742                   (imp->imp_state == LUSTRE_IMP_FULL)));
743
744         if (unlikely(obd != NULL && obd->obd_fail)) {
745                 CDEBUG(D_HA, "muting rpc for failed imp obd %s\n",
746                        obd->obd_name);
747                 /* this prevents us from waiting in ptlrpc_queue_wait */
748                 spin_lock(&request->rq_lock);
749                 request->rq_err = 1;
750                 spin_unlock(&request->rq_lock);
751                 request->rq_status = -ENODEV;
752                 RETURN(-ENODEV);
753         }
754
755         connection = imp->imp_connection;
756
757         lustre_msg_set_handle(request->rq_reqmsg,
758                               &imp->imp_remote_handle);
759         lustre_msg_set_type(request->rq_reqmsg, PTL_RPC_MSG_REQUEST);
760         lustre_msg_set_conn_cnt(request->rq_reqmsg,
761                                 imp->imp_conn_cnt);
762         lustre_msghdr_set_flags(request->rq_reqmsg,
763                                 imp->imp_msghdr_flags);
764
765         /* If it's the first time to resend the request for EINPROGRESS,
766          * we need to allocate a new XID (see after_reply()), it's different
767          * from the resend for reply timeout. */
768         if (request->rq_nr_resend != 0 &&
769             list_empty(&request->rq_unreplied_list)) {
770                 __u64 min_xid = 0;
771                 /* resend for EINPROGRESS, allocate new xid to avoid reply
772                  * reconstruction */
773                 spin_lock(&imp->imp_lock);
774                 ptlrpc_assign_next_xid_nolock(request);
775                 min_xid = ptlrpc_known_replied_xid(imp);
776                 spin_unlock(&imp->imp_lock);
777
778                 lustre_msg_set_last_xid(request->rq_reqmsg, min_xid);
779                 DEBUG_REQ(D_RPCTRACE, request,
780                           "Allocating new XID for resend on EINPROGRESS");
781         }
782
783         opc = lustre_msg_get_opc(request->rq_reqmsg);
784         if (opc != OST_CONNECT && opc != MDS_CONNECT &&
785             opc != MGS_CONNECT && OCD_HAS_FLAG(&imp->imp_connect_data, FLAGS2))
786                 rep_mbits = imp->imp_connect_data.ocd_connect_flags2 &
787                         OBD_CONNECT2_REP_MBITS;
788
789         if ((request->rq_bulk != NULL) || rep_mbits) {
790                 ptlrpc_set_mbits(request);
791                 lustre_msg_set_mbits(request->rq_reqmsg, request->rq_mbits);
792         }
793
794         if (list_empty(&request->rq_unreplied_list) ||
795             request->rq_xid <= imp->imp_known_replied_xid) {
796                 DEBUG_REQ(D_ERROR, request,
797                           "xid=%llu, replied=%llu, list_empty=%d",
798                           request->rq_xid, imp->imp_known_replied_xid,
799                           list_empty(&request->rq_unreplied_list));
800                 LBUG();
801         }
802
803         /** For enabled AT all request should have AT_SUPPORT in the
804          * FULL import state when OBD_CONNECT_AT is set */
805         LASSERT(AT_OFF || imp->imp_state != LUSTRE_IMP_FULL ||
806                 (imp->imp_msghdr_flags & MSGHDR_AT_SUPPORT) ||
807                 !(imp->imp_connect_data.ocd_connect_flags &
808                   OBD_CONNECT_AT));
809
810         if (request->rq_resend) {
811                 lustre_msg_add_flags(request->rq_reqmsg, MSG_RESENT);
812                 if (request->rq_resend_cb != NULL)
813                         request->rq_resend_cb(request, &request->rq_async_args);
814         }
815         if (request->rq_memalloc)
816                 mpflag = memalloc_noreclaim_save();
817
818         rc = sptlrpc_cli_wrap_request(request);
819         if (rc)
820                 GOTO(out, rc);
821
822         /* bulk register should be done after wrap_request() */
823         if (request->rq_bulk != NULL) {
824                 rc = ptlrpc_register_bulk (request);
825                 if (rc != 0)
826                         GOTO(cleanup_bulk, rc);
827                 /*
828                  * All the mds in the request will have the same cpt
829                  * encoded in the cookie. So we can just get the first
830                  * one.
831                  */
832                 bulk_cookie = request->rq_bulk->bd_mds[0];
833         }
834
835         if (!noreply) {
836                 LASSERT (request->rq_replen != 0);
837                 if (request->rq_repbuf == NULL) {
838                         LASSERT(request->rq_repdata == NULL);
839                         LASSERT(request->rq_repmsg == NULL);
840                         rc = sptlrpc_cli_alloc_repbuf(request,
841                                                       request->rq_replen);
842                         if (rc) {
843                                 /* this prevents us from looping in
844                                  * ptlrpc_queue_wait */
845                                 spin_lock(&request->rq_lock);
846                                 request->rq_err = 1;
847                                 spin_unlock(&request->rq_lock);
848                                 request->rq_status = rc;
849                                 GOTO(cleanup_bulk, rc);
850                         }
851                 } else {
852                         request->rq_repdata = NULL;
853                         request->rq_repmsg = NULL;
854                 }
855
856                 peer = connection->c_peer;
857                 if (request->rq_bulk &&
858                     OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_BULK_REPLY_ATTACH)) {
859                         reply_me = ERR_PTR(-ENOMEM);
860                 } else {
861                         reply_me = LNetMEAttach(request->rq_reply_portal,
862                                                 &peer,
863                                                 rep_mbits ? request->rq_mbits :
864                                                 request->rq_xid,
865                                                 0, LNET_UNLINK, LNET_INS_AFTER);
866                 }
867
868                 if (IS_ERR(reply_me)) {
869                         rc = PTR_ERR(reply_me);
870                         CERROR("LNetMEAttach failed: %d\n", rc);
871                         LASSERT(rc == -ENOMEM);
872                         GOTO(cleanup_bulk, rc = -ENOMEM);
873                 }
874         }
875
876         spin_lock(&request->rq_lock);
877         /* We are responsible for unlinking the reply buffer */
878         request->rq_reply_unlinked = noreply;
879         request->rq_receiving_reply = !noreply;
880         /* Clear any flags that may be present from previous sends. */
881         request->rq_req_unlinked = 0;
882         request->rq_replied = 0;
883         request->rq_err = 0;
884         request->rq_timedout = 0;
885         request->rq_net_err = 0;
886         request->rq_resend = 0;
887         request->rq_restart = 0;
888         request->rq_reply_truncated = 0;
889         spin_unlock(&request->rq_lock);
890
891         if (!noreply) {
892                 reply_md.start     = request->rq_repbuf;
893                 reply_md.length    = request->rq_repbuf_len;
894                 /* Allow multiple early replies */
895                 reply_md.threshold = LNET_MD_THRESH_INF;
896                 /* Manage remote for early replies */
897                 reply_md.options   = PTLRPC_MD_OPTIONS | LNET_MD_OP_PUT |
898                         LNET_MD_MANAGE_REMOTE |
899                         LNET_MD_TRUNCATE; /* allow to make EOVERFLOW error */;
900                 reply_md.user_ptr  = &request->rq_reply_cbid;
901                 reply_md.handler = ptlrpc_handler;
902
903                 /* We must see the unlink callback to set rq_reply_unlinked,
904                  * so we can't auto-unlink */
905                 rc = LNetMDAttach(reply_me, &reply_md, LNET_RETAIN,
906                                   &request->rq_reply_md_h);
907                 if (rc != 0) {
908                         CERROR("LNetMDAttach failed: %d\n", rc);
909                         LASSERT(rc == -ENOMEM);
910                         spin_lock(&request->rq_lock);
911                         /* ...but the MD attach didn't succeed... */
912                         request->rq_receiving_reply = 0;
913                         spin_unlock(&request->rq_lock);
914                         GOTO(cleanup_bulk, rc = -ENOMEM);
915                 }
916                 percpu_ref_get(&ptlrpc_pending);
917
918                 CDEBUG(D_NET,
919                        "Setup reply buffer: %u bytes, xid %llu, portal %u\n",
920                        request->rq_repbuf_len, request->rq_xid,
921                        request->rq_reply_portal);
922         }
923
924         /* add references on request for request_out_callback */
925         ptlrpc_request_addref(request);
926         if (obd != NULL && obd->obd_svc_stats != NULL)
927                 lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQACTIVE_CNTR,
928                                     atomic_read(&imp->imp_inflight));
929
930         OBD_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_DELAY_SEND, request->rq_timeout + 5);
931
932         request->rq_sent_ns = ktime_get_real();
933         request->rq_sent = ktime_get_real_seconds();
934         /* We give the server rq_timeout secs to process the req, and
935          * add the network latency for our local timeout.
936          */
937         request->rq_deadline = request->rq_sent + request->rq_timeout +
938                 ptlrpc_at_get_net_latency(request);
939
940         DEBUG_REQ(D_INFO, request, "send flags=%x",
941                   lustre_msg_get_flags(request->rq_reqmsg));
942         rc = ptl_send_buf(&request->rq_req_md_h,
943                           request->rq_reqbuf, request->rq_reqdata_len,
944                           LNET_NOACK_REQ, &request->rq_req_cbid,
945                           LNET_NID_ANY,
946                           &connection->c_peer,
947                           request->rq_request_portal,
948                           request->rq_xid, 0, &bulk_cookie);
949         if (likely(rc == 0))
950                 GOTO(out, rc);
951
952         request->rq_req_unlinked = 1;
953         ptlrpc_req_finished(request);
954         if (noreply)
955                 GOTO(out, rc);
956
957         LNetMDUnlink(request->rq_reply_md_h);
958
959         /* UNLINKED callback called synchronously */
960         LASSERT(!request->rq_receiving_reply);
961
962  cleanup_bulk:
963         /* We do sync unlink here as there was no real transfer here so
964          * the chance to have long unlink to sluggish net is smaller here. */
965         ptlrpc_unregister_bulk(request, 0);
966  out:
967         if (rc == -ENOMEM) {
968                 /* set rq_sent so that this request is treated
969                  * as a delayed send in the upper layers */
970                 request->rq_sent = ktime_get_real_seconds();
971         }
972
973         if (request->rq_memalloc)
974                 memalloc_noreclaim_restore(mpflag);
975
976         return rc;
977 }
978 EXPORT_SYMBOL(ptl_send_rpc);
979
980 /**
981  * Register request buffer descriptor for request receiving.
982  */
983 int ptlrpc_register_rqbd(struct ptlrpc_request_buffer_desc *rqbd)
984 {
985         struct ptlrpc_service *service = rqbd->rqbd_svcpt->scp_service;
986         static struct lnet_processid match_id = {
987                 .nid = LNET_ANY_NID,
988                 .pid = LNET_PID_ANY
989         };
990         int rc;
991         struct lnet_md md;
992         struct lnet_me *me;
993
994         CDEBUG(D_NET, "%s: registering portal %d\n", service->srv_name,
995                service->srv_req_portal);
996
997         if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_RQBD))
998                 return -ENOMEM;
999
1000         /* NB: CPT affinity service should use new LNet flag LNET_INS_LOCAL,
1001          * which means buffer can only be attached on local CPT, and LND
1002          * threads can find it by grabbing a local lock */
1003         me = LNetMEAttach(service->srv_req_portal,
1004                           &match_id, 0, ~0, LNET_UNLINK,
1005                           rqbd->rqbd_svcpt->scp_cpt >= 0 ?
1006                           LNET_INS_LOCAL : LNET_INS_AFTER);
1007         if (IS_ERR(me)) {
1008                 CERROR("%s: LNetMEAttach failed: rc = %ld\n",
1009                        service->srv_name, PTR_ERR(me));
1010                 return PTR_ERR(me);
1011         }
1012
1013         LASSERT(rqbd->rqbd_refcount == 0);
1014         rqbd->rqbd_refcount = 1;
1015
1016         md.start     = rqbd->rqbd_buffer;
1017         md.length    = service->srv_buf_size;
1018         md.max_size  = service->srv_max_req_size;
1019         md.threshold = LNET_MD_THRESH_INF;
1020         md.options   = PTLRPC_MD_OPTIONS | LNET_MD_OP_PUT | LNET_MD_MAX_SIZE;
1021         md.user_ptr  = &rqbd->rqbd_cbid;
1022         md.handler   = ptlrpc_handler;
1023
1024         rc = LNetMDAttach(me, &md, LNET_UNLINK, &rqbd->rqbd_md_h);
1025         if (rc == 0) {
1026                 percpu_ref_get(&ptlrpc_pending);
1027                 return 0;
1028         }
1029
1030         CERROR("%s: LNetMDAttach failed: rc = %d\n", service->srv_name, rc);
1031         LASSERT(rc == -ENOMEM);
1032         rqbd->rqbd_refcount = 0;
1033
1034         return rc;
1035 }